From cbe4cdc48486fbedede448aa59aa6a367efa1038 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 09:45:52 +0200 Subject: Merging upstream version 2.1.7. Signed-off-by: Daniel Baumann --- ChangeLog | 433 +++ GNUmakefile | 8 +- INSTALL.md | 8 +- Makefile.am | 71 +- agents/Makefile.am | 4 +- agents/alerts/Makefile.am | 4 +- agents/ocf/HealthCPU.in | 23 +- agents/ocf/HealthSMART.in | 16 +- agents/ocf/Makefile.am | 11 +- agents/ocf/ifspeed.in | 20 +- agents/ocf/ping.in | 13 +- agents/stonith/Makefile.am | 3 +- configure.ac | 793 ++--- cts/Makefile.am | 61 +- cts/README.md | 110 +- cts/benchmark/Makefile.am | 5 +- cts/benchmark/clubench.in | 2 +- cts/cli/crm_verify_invalid_bz.xml | 72 + cts/cli/crm_verify_invalid_no_stonith.xml | 12 + cts/cli/regression.daemons.exp | 10 + cts/cli/regression.error_codes.exp | 12 + cts/cli/regression.rules.exp | 12 + cts/cli/regression.tools.exp | 348 ++- cts/cluster_test.in | 175 ++ cts/cts-attrd.in | 17 +- cts/cts-cli.in | 56 +- cts/cts-lab.in | 136 + cts/cts-log-watcher.in | 84 + cts/cts-scheduler.in | 88 + cts/cts.in | 404 +++ cts/lab/CIB.py | 518 ---- cts/lab/CM_corosync.py | 60 - cts/lab/CTSaudits.py | 879 ------ cts/lab/CTSlab.py.in | 135 - cts/lab/CTSscenarios.py | 563 ---- cts/lab/CTStests.py | 3178 -------------------- cts/lab/ClusterManager.py | 940 ------ cts/lab/Makefile.am | 31 - cts/lab/OCFIPraTest.py.in | 173 -- cts/lab/__init__.py | 15 - cts/lab/cib_xml.py | 319 -- cts/lab/cluster_test.in | 175 -- cts/lab/cts-log-watcher.in | 84 - cts/lab/cts.in | 262 -- cts/lxc_autogen.sh.in | 545 ---- cts/scheduler/Makefile.am | 8 +- cts/scheduler/dot/bug-lf-2422.dot | 3 + cts/scheduler/dot/bundle-interleave-start.dot | 46 +- cts/scheduler/dot/bundle-nested-colocation.dot | 1 + cts/scheduler/dot/bundle-order-startup-clone-2.dot | 2 + cts/scheduler/dot/bundle-probe-remotes.dot | 10 + .../dot/bundle-promoted-anticolocation-1.dot | 7 + .../dot/bundle-promoted-anticolocation-2.dot | 7 + .../dot/bundle-promoted-anticolocation-3.dot | 32 + .../dot/bundle-promoted-anticolocation-4.dot | 32 + .../dot/bundle-promoted-anticolocation-5.dot | 32 + .../dot/bundle-promoted-anticolocation-6.dot | 32 + cts/scheduler/dot/bundle-promoted-colocation-1.dot | 7 + cts/scheduler/dot/bundle-promoted-colocation-2.dot | 7 + cts/scheduler/dot/bundle-promoted-colocation-3.dot | 32 + cts/scheduler/dot/bundle-promoted-colocation-4.dot | 32 + cts/scheduler/dot/bundle-promoted-colocation-5.dot | 32 + cts/scheduler/dot/bundle-promoted-colocation-6.dot | 32 + cts/scheduler/dot/bundle-promoted-location-1.dot | 2 + cts/scheduler/dot/bundle-promoted-location-2.dot | 75 + cts/scheduler/dot/bundle-promoted-location-3.dot | 2 + cts/scheduler/dot/bundle-promoted-location-4.dot | 2 + cts/scheduler/dot/bundle-promoted-location-5.dot | 2 + cts/scheduler/dot/bundle-promoted-location-6.dot | 37 + cts/scheduler/dot/bundle-replicas-change.dot | 1 + cts/scheduler/dot/cancel-behind-moving-remote.dot | 121 +- cts/scheduler/dot/clone-order-16instances.dot | 93 +- cts/scheduler/dot/clone-recover-no-shuffle-1.dot | 10 + cts/scheduler/dot/clone-recover-no-shuffle-10.dot | 10 + cts/scheduler/dot/clone-recover-no-shuffle-11.dot | 21 + cts/scheduler/dot/clone-recover-no-shuffle-12.dot | 35 + cts/scheduler/dot/clone-recover-no-shuffle-2.dot | 21 + cts/scheduler/dot/clone-recover-no-shuffle-3.dot | 32 + cts/scheduler/dot/clone-recover-no-shuffle-4.dot | 10 + cts/scheduler/dot/clone-recover-no-shuffle-5.dot | 21 + cts/scheduler/dot/clone-recover-no-shuffle-6.dot | 32 + cts/scheduler/dot/clone-recover-no-shuffle-7.dot | 30 + cts/scheduler/dot/clone-recover-no-shuffle-8.dot | 63 + cts/scheduler/dot/clone-recover-no-shuffle-9.dot | 69 + .../dot/coloc-with-inner-group-member.dot | 40 + cts/scheduler/dot/group-anticolocation-2.dot | 29 + cts/scheduler/dot/group-anticolocation-3.dot | 8 + cts/scheduler/dot/group-anticolocation-4.dot | 29 + cts/scheduler/dot/group-anticolocation-5.dot | 29 + cts/scheduler/dot/group-anticolocation.dot | 27 + cts/scheduler/dot/guest-host-not-fenceable.dot | 4 + cts/scheduler/dot/inc4.dot | 2 + cts/scheduler/dot/node-pending-timeout.dot | 7 + cts/scheduler/dot/order-clone.dot | 3 + cts/scheduler/dot/pending-node-no-uname.dot | 7 + cts/scheduler/dot/promoted-ordering.dot | 28 +- cts/scheduler/dot/promoted-probed-score.dot | 292 +- cts/scheduler/dot/timeout-by-node.dot | 40 + cts/scheduler/dot/unfence-definition.dot | 4 + cts/scheduler/dot/unfence-parameters.dot | 4 + cts/scheduler/dot/utilization-complex.dot | 1 + cts/scheduler/exp/bug-1822.exp | 4 +- cts/scheduler/exp/bug-lf-2422.exp | 9 + cts/scheduler/exp/bundle-interleave-start.exp | 608 ++-- cts/scheduler/exp/bundle-nested-colocation.exp | 3 + cts/scheduler/exp/bundle-order-fencing.exp | 242 +- cts/scheduler/exp/bundle-order-startup-clone-2.exp | 6 + cts/scheduler/exp/bundle-order-stop-on-remote.exp | 134 +- cts/scheduler/exp/bundle-probe-remotes.exp | 30 + .../exp/bundle-promoted-anticolocation-1.exp | 37 + .../exp/bundle-promoted-anticolocation-2.exp | 37 + .../exp/bundle-promoted-anticolocation-3.exp | 179 ++ .../exp/bundle-promoted-anticolocation-4.exp | 179 ++ .../exp/bundle-promoted-anticolocation-5.exp | 179 ++ .../exp/bundle-promoted-anticolocation-6.exp | 179 ++ cts/scheduler/exp/bundle-promoted-colocation-1.exp | 37 + cts/scheduler/exp/bundle-promoted-colocation-2.exp | 37 + cts/scheduler/exp/bundle-promoted-colocation-3.exp | 179 ++ cts/scheduler/exp/bundle-promoted-colocation-4.exp | 179 ++ cts/scheduler/exp/bundle-promoted-colocation-5.exp | 179 ++ cts/scheduler/exp/bundle-promoted-colocation-6.exp | 179 ++ cts/scheduler/exp/bundle-promoted-location-1.exp | 1 + cts/scheduler/exp/bundle-promoted-location-2.exp | 328 ++ cts/scheduler/exp/bundle-promoted-location-3.exp | 1 + cts/scheduler/exp/bundle-promoted-location-4.exp | 1 + cts/scheduler/exp/bundle-promoted-location-5.exp | 1 + cts/scheduler/exp/bundle-promoted-location-6.exp | 136 + cts/scheduler/exp/bundle-replicas-change.exp | 3 + cts/scheduler/exp/cancel-behind-moving-remote.exp | 760 ++--- cts/scheduler/exp/clone-anon-failcount.exp | 2 +- cts/scheduler/exp/clone-order-16instances.exp | 234 ++ cts/scheduler/exp/clone-recover-no-shuffle-1.exp | 51 + cts/scheduler/exp/clone-recover-no-shuffle-10.exp | 51 + cts/scheduler/exp/clone-recover-no-shuffle-11.exp | 110 + cts/scheduler/exp/clone-recover-no-shuffle-12.exp | 187 ++ cts/scheduler/exp/clone-recover-no-shuffle-2.exp | 110 + cts/scheduler/exp/clone-recover-no-shuffle-3.exp | 171 ++ cts/scheduler/exp/clone-recover-no-shuffle-4.exp | 51 + cts/scheduler/exp/clone-recover-no-shuffle-5.exp | 110 + cts/scheduler/exp/clone-recover-no-shuffle-6.exp | 171 ++ cts/scheduler/exp/clone-recover-no-shuffle-7.exp | 162 + cts/scheduler/exp/clone-recover-no-shuffle-8.exp | 338 +++ cts/scheduler/exp/clone-recover-no-shuffle-9.exp | 364 +++ .../exp/coloc-with-inner-group-member.exp | 202 ++ cts/scheduler/exp/group-anticolocation-2.exp | 148 + cts/scheduler/exp/group-anticolocation-3.exp | 38 + cts/scheduler/exp/group-anticolocation-4.exp | 148 + cts/scheduler/exp/group-anticolocation-5.exp | 148 + cts/scheduler/exp/group-anticolocation.exp | 204 +- cts/scheduler/exp/inc4.exp | 6 + .../exp/no-promote-on-unrunnable-guest.exp | 110 +- cts/scheduler/exp/node-pending-timeout.exp | 38 + cts/scheduler/exp/pending-node-no-uname.exp | 11 + cts/scheduler/exp/promoted-failed-demote-2.exp | 2 +- cts/scheduler/exp/promoted-failed-demote.exp | 2 +- cts/scheduler/exp/promoted-ordering.exp | 40 +- cts/scheduler/exp/promoted-probed-score.exp | 336 +-- cts/scheduler/exp/shutdown-lock-expiration.exp | 2 +- cts/scheduler/exp/timeout-by-node.exp | 228 ++ cts/scheduler/exp/unfence-definition.exp | 12 + cts/scheduler/exp/unfence-parameters.exp | 12 + cts/scheduler/scores/594.scores | 3 + .../scores/a-promote-then-b-migrate.scores | 2 + cts/scheduler/scores/asymmetric.scores | 1 - cts/scheduler/scores/bug-1822.scores | 2 +- .../scores/bug-5014-CLONE-A-stop-B-started.scores | 1 + cts/scheduler/scores/bug-5143-ms-shuffle.scores | 12 +- .../scores/bug-5186-partial-migrate.scores | 6 +- cts/scheduler/scores/bug-cl-5168.scores | 2 +- cts/scheduler/scores/bug-lf-2106.scores | 36 +- cts/scheduler/scores/bug-lf-2153.scores | 6 +- cts/scheduler/scores/bug-lf-2171.scores | 4 + cts/scheduler/scores/bug-lf-2422.scores | 16 + cts/scheduler/scores/bug-lf-2453.scores | 4 + cts/scheduler/scores/bug-lf-2551.scores | 42 +- cts/scheduler/scores/bug-lf-2574.scores | 7 +- cts/scheduler/scores/bug-lf-2581.scores | 4 +- cts/scheduler/scores/bug-lf-2619.scores | 2 +- cts/scheduler/scores/bug-n-387749.scores | 9 +- cts/scheduler/scores/bug-suse-707150.scores | 52 + .../scores/bundle-connection-with-container.scores | 144 +- .../scores/bundle-interleave-promote.scores | 186 +- .../scores/bundle-interleave-start.scores | 196 +- .../scores/bundle-nested-colocation.scores | 228 +- cts/scheduler/scores/bundle-order-fencing.scores | 246 +- .../scores/bundle-order-partial-start-2.scores | 52 +- .../scores/bundle-order-partial-start.scores | 52 +- .../scores/bundle-order-partial-stop.scores | 50 +- .../scores/bundle-order-startup-clone-2.scores | 184 +- .../scores/bundle-order-startup-clone.scores | 88 +- cts/scheduler/scores/bundle-order-startup.scores | 50 +- .../scores/bundle-order-stop-clone.scores | 188 +- .../scores/bundle-order-stop-on-remote.scores | 612 ++-- cts/scheduler/scores/bundle-order-stop.scores | 50 +- cts/scheduler/scores/bundle-probe-order-1.scores | 60 +- cts/scheduler/scores/bundle-probe-order-2.scores | 60 +- cts/scheduler/scores/bundle-probe-order-3.scores | 60 +- cts/scheduler/scores/bundle-probe-remotes.scores | 192 +- .../scores/bundle-promoted-anticolocation-1.scores | 70 + .../scores/bundle-promoted-anticolocation-2.scores | 70 + .../scores/bundle-promoted-anticolocation-3.scores | 70 + .../scores/bundle-promoted-anticolocation-4.scores | 70 + .../scores/bundle-promoted-anticolocation-5.scores | 160 + .../scores/bundle-promoted-anticolocation-6.scores | 160 + .../scores/bundle-promoted-colocation-1.scores | 70 + .../scores/bundle-promoted-colocation-2.scores | 70 + .../scores/bundle-promoted-colocation-3.scores | 70 + .../scores/bundle-promoted-colocation-4.scores | 70 + .../scores/bundle-promoted-colocation-5.scores | 160 + .../scores/bundle-promoted-colocation-6.scores | 160 + .../scores/bundle-promoted-location-1.scores | 70 + .../scores/bundle-promoted-location-2.scores | 67 + .../scores/bundle-promoted-location-3.scores | 67 + .../scores/bundle-promoted-location-4.scores | 67 + .../scores/bundle-promoted-location-5.scores | 67 + .../scores/bundle-promoted-location-6.scores | 67 + cts/scheduler/scores/bundle-replicas-change.scores | 34 +- .../scores/cancel-behind-moving-remote.scores | 1005 ++++--- cts/scheduler/scores/clone-anon-failcount.scores | 96 +- .../scores/clone-fail-block-colocation.scores | 2 + cts/scheduler/scores/clone-max-zero.scores | 8 + .../scores/clone-recover-no-shuffle-1.scores | 25 + .../scores/clone-recover-no-shuffle-10.scores | 31 + .../scores/clone-recover-no-shuffle-11.scores | 82 + .../scores/clone-recover-no-shuffle-12.scores | 67 + .../scores/clone-recover-no-shuffle-2.scores | 79 + .../scores/clone-recover-no-shuffle-3.scores | 64 + .../scores/clone-recover-no-shuffle-4.scores | 31 + .../scores/clone-recover-no-shuffle-5.scores | 109 + .../scores/clone-recover-no-shuffle-6.scores | 70 + .../scores/clone-recover-no-shuffle-7.scores | 34 + .../scores/clone-recover-no-shuffle-8.scores | 82 + .../scores/clone-recover-no-shuffle-9.scores | 67 + cts/scheduler/scores/cloned-group-stop.scores | 4 + .../scores/coloc-clone-stays-active.scores | 22 +- .../scores/coloc-with-inner-group-member.scores | 46 + .../scores/colocate-primitive-with-clone.scores | 48 +- cts/scheduler/scores/colocation-influence.scores | 264 +- cts/scheduler/scores/complex_enforce_colo.scores | 9 + cts/scheduler/scores/enforce-colo1.scores | 9 + cts/scheduler/scores/group-anticolocation-2.scores | 23 + cts/scheduler/scores/group-anticolocation-3.scores | 23 + cts/scheduler/scores/group-anticolocation-4.scores | 23 + cts/scheduler/scores/group-anticolocation-5.scores | 34 + cts/scheduler/scores/group-anticolocation.scores | 6 +- cts/scheduler/scores/group-dependents.scores | 10 +- .../scores/guest-host-not-fenceable.scores | 122 +- cts/scheduler/scores/load-stopped-loop-2.scores | 4 +- cts/scheduler/scores/load-stopped-loop.scores | 30 + cts/scheduler/scores/migrate-begin.scores | 2 + cts/scheduler/scores/migrate-fail-2.scores | 2 + cts/scheduler/scores/migrate-fail-3.scores | 4 +- cts/scheduler/scores/migrate-fail-4.scores | 2 + cts/scheduler/scores/migrate-fail-5.scores | 2 + cts/scheduler/scores/migrate-fail-6.scores | 2 + cts/scheduler/scores/migrate-fail-7.scores | 4 +- cts/scheduler/scores/migrate-fail-8.scores | 2 + cts/scheduler/scores/migrate-fail-9.scores | 2 + cts/scheduler/scores/migrate-partial-1.scores | 4 +- cts/scheduler/scores/migrate-partial-2.scores | 2 + cts/scheduler/scores/migrate-partial-3.scores | 3 + cts/scheduler/scores/migrate-start-complex.scores | 18 +- cts/scheduler/scores/migrate-start.scores | 12 +- .../scores/migrate-stop-start-complex.scores | 4 +- cts/scheduler/scores/migrate-success.scores | 4 +- cts/scheduler/scores/nested-remote-recovery.scores | 648 ++-- .../scores/no-promote-on-unrunnable-guest.scores | 254 +- cts/scheduler/scores/node-pending-timeout.scores | 3 + cts/scheduler/scores/notifs-for-unrunnable.scores | 230 +- .../scores/notify-behind-stopping-remote.scores | 60 +- cts/scheduler/scores/novell-239087.scores | 4 +- cts/scheduler/scores/on_fail_demote1.scores | 166 +- cts/scheduler/scores/on_fail_demote4.scores | 166 +- cts/scheduler/scores/order-expired-failure.scores | 376 +-- cts/scheduler/scores/params-6.scores | 23 +- cts/scheduler/scores/pending-node-no-uname.scores | 3 + cts/scheduler/scores/probe-2.scores | 2 +- cts/scheduler/scores/promoted-13.scores | 2 +- .../scores/promoted-asymmetrical-order.scores | 4 + cts/scheduler/scores/promoted-demote.scores | 12 +- .../scores/promoted-failed-demote-2.scores | 6 + cts/scheduler/scores/promoted-failed-demote.scores | 6 + cts/scheduler/scores/promoted-move.scores | 2 + cts/scheduler/scores/promoted-ordering.scores | 10 +- .../scores/promoted-partially-demoted-group.scores | 10 +- cts/scheduler/scores/promoted-probed-score.scores | 92 +- .../scores/remote-connection-shutdown.scores | 974 +++--- cts/scheduler/scores/remote-fence-unclean-3.scores | 348 +-- cts/scheduler/scores/route-remote-notify.scores | 122 +- cts/scheduler/scores/rsc-sets-clone-1.scores | 18 +- .../scores/start-then-stop-with-unfence.scores | 6 +- cts/scheduler/scores/stop-all-resources.scores | 56 +- cts/scheduler/scores/timeout-by-node.scores | 61 + cts/scheduler/scores/unrunnable-2.scores | 2 +- cts/scheduler/scores/utilization-complex.scores | 176 +- cts/scheduler/scores/utilization-order2.scores | 2 + cts/scheduler/scores/utilization-order4.scores | 6 +- cts/scheduler/scores/utilization-shuffle.scores | 48 +- cts/scheduler/scores/year-2038.scores | 376 +-- .../11-a-then-bm-b-move-a-clone-starting.summary | 2 +- .../summary/5-am-then-bm-a-not-migratable.summary | 2 +- .../7-migrate-group-one-unmigratable.summary | 2 +- .../summary/bundle-interleave-start.summary | 70 +- cts/scheduler/summary/bundle-order-fencing.summary | 4 +- .../summary/bundle-order-stop-on-remote.summary | 6 +- .../bundle-promoted-anticolocation-1.summary | 33 + .../bundle-promoted-anticolocation-2.summary | 33 + .../bundle-promoted-anticolocation-3.summary | 45 + .../bundle-promoted-anticolocation-4.summary | 45 + .../bundle-promoted-anticolocation-5.summary | 51 + .../bundle-promoted-anticolocation-6.summary | 51 + .../summary/bundle-promoted-colocation-1.summary | 33 + .../summary/bundle-promoted-colocation-2.summary | 33 + .../summary/bundle-promoted-colocation-3.summary | 45 + .../summary/bundle-promoted-colocation-4.summary | 45 + .../summary/bundle-promoted-colocation-5.summary | 51 + .../summary/bundle-promoted-colocation-6.summary | 51 + .../summary/bundle-promoted-location-1.summary | 27 + .../summary/bundle-promoted-location-2.summary | 54 + .../summary/bundle-promoted-location-3.summary | 27 + .../summary/bundle-promoted-location-4.summary | 27 + .../summary/bundle-promoted-location-5.summary | 27 + .../summary/bundle-promoted-location-6.summary | 40 + .../summary/cancel-behind-moving-remote.summary | 78 +- .../summary/clone-recover-no-shuffle-1.summary | 29 + .../summary/clone-recover-no-shuffle-10.summary | 29 + .../summary/clone-recover-no-shuffle-11.summary | 34 + .../summary/clone-recover-no-shuffle-12.summary | 43 + .../summary/clone-recover-no-shuffle-2.summary | 32 + .../summary/clone-recover-no-shuffle-3.summary | 42 + .../summary/clone-recover-no-shuffle-4.summary | 29 + .../summary/clone-recover-no-shuffle-5.summary | 32 + .../summary/clone-recover-no-shuffle-6.summary | 42 + .../summary/clone-recover-no-shuffle-7.summary | 38 + .../summary/clone-recover-no-shuffle-8.summary | 52 + .../summary/clone-recover-no-shuffle-9.summary | 56 + .../summary/coloc-with-inner-group-member.summary | 45 + .../summary/group-anticolocation-2.summary | 41 + .../summary/group-anticolocation-3.summary | 33 + .../summary/group-anticolocation-4.summary | 41 + .../summary/group-anticolocation-5.summary | 41 + cts/scheduler/summary/group-anticolocation.summary | 16 +- cts/scheduler/summary/migrate-fencing.summary | 2 +- .../summary/no-promote-on-unrunnable-guest.summary | 2 +- cts/scheduler/summary/node-pending-timeout.summary | 26 + .../summary/pending-node-no-uname.summary | 23 + cts/scheduler/summary/promoted-ordering.summary | 24 +- .../summary/promoted-probed-score.summary | 124 +- cts/scheduler/summary/timeout-by-node.summary | 43 + cts/scheduler/summary/unfence-definition.summary | 2 +- cts/scheduler/summary/unfence-parameters.summary | 2 +- cts/scheduler/xml/anon-instance-pending.xml | 2 +- cts/scheduler/xml/bundle-interleave-start.xml | 3 +- .../xml/bundle-promoted-anticolocation-1.xml | 238 ++ .../xml/bundle-promoted-anticolocation-2.xml | 238 ++ .../xml/bundle-promoted-anticolocation-3.xml | 238 ++ .../xml/bundle-promoted-anticolocation-4.xml | 238 ++ .../xml/bundle-promoted-anticolocation-5.xml | 368 +++ .../xml/bundle-promoted-anticolocation-6.xml | 368 +++ cts/scheduler/xml/bundle-promoted-colocation-1.xml | 237 ++ cts/scheduler/xml/bundle-promoted-colocation-2.xml | 237 ++ cts/scheduler/xml/bundle-promoted-colocation-3.xml | 237 ++ cts/scheduler/xml/bundle-promoted-colocation-4.xml | 237 ++ cts/scheduler/xml/bundle-promoted-colocation-5.xml | 367 +++ cts/scheduler/xml/bundle-promoted-colocation-6.xml | 367 +++ cts/scheduler/xml/bundle-promoted-location-1.xml | 221 ++ cts/scheduler/xml/bundle-promoted-location-2.xml | 218 ++ cts/scheduler/xml/bundle-promoted-location-3.xml | 225 ++ cts/scheduler/xml/bundle-promoted-location-4.xml | 225 ++ cts/scheduler/xml/bundle-promoted-location-5.xml | 231 ++ cts/scheduler/xml/bundle-promoted-location-6.xml | 224 ++ cts/scheduler/xml/cancel-behind-moving-remote.xml | 14 + cts/scheduler/xml/clone-recover-no-shuffle-1.xml | 113 + cts/scheduler/xml/clone-recover-no-shuffle-10.xml | 120 + cts/scheduler/xml/clone-recover-no-shuffle-11.xml | 153 + cts/scheduler/xml/clone-recover-no-shuffle-12.xml | 186 ++ cts/scheduler/xml/clone-recover-no-shuffle-2.xml | 141 + cts/scheduler/xml/clone-recover-no-shuffle-3.xml | 180 ++ cts/scheduler/xml/clone-recover-no-shuffle-4.xml | 115 + cts/scheduler/xml/clone-recover-no-shuffle-5.xml | 143 + cts/scheduler/xml/clone-recover-no-shuffle-6.xml | 182 ++ cts/scheduler/xml/clone-recover-no-shuffle-7.xml | 120 + cts/scheduler/xml/clone-recover-no-shuffle-8.xml | 153 + cts/scheduler/xml/clone-recover-no-shuffle-9.xml | 186 ++ .../xml/coloc-with-inner-group-member.xml | 258 ++ cts/scheduler/xml/group-anticolocation-2.xml | 166 + cts/scheduler/xml/group-anticolocation-3.xml | 165 + cts/scheduler/xml/group-anticolocation-4.xml | 167 + cts/scheduler/xml/group-anticolocation-5.xml | 188 ++ cts/scheduler/xml/group-anticolocation.xml | 14 +- cts/scheduler/xml/node-pending-timeout.xml | 27 + cts/scheduler/xml/pending-node-no-uname.xml | 26 + cts/scheduler/xml/promoted-ordering.xml | 26 +- cts/scheduler/xml/promoted-probed-score.xml | 4 +- cts/scheduler/xml/timeout-by-node.xml | 139 + cts/support/Makefile.am | 6 +- daemons/Makefile.am | 10 +- daemons/attrd/Makefile.am | 43 +- daemons/attrd/attrd_alerts.c | 21 +- daemons/attrd/attrd_attributes.c | 46 +- daemons/attrd/attrd_cib.c | 464 ++- daemons/attrd/attrd_corosync.c | 21 +- daemons/attrd/attrd_elections.c | 14 +- daemons/attrd/attrd_ipc.c | 25 +- daemons/attrd/attrd_messages.c | 34 + daemons/attrd/attrd_sync.c | 4 +- daemons/attrd/attrd_utils.c | 59 +- daemons/attrd/pacemaker-attrd.c | 140 +- daemons/attrd/pacemaker-attrd.h | 17 +- daemons/based/Makefile.am | 42 +- daemons/based/based_callbacks.c | 854 +++--- daemons/based/based_common.c | 352 --- daemons/based/based_io.c | 22 +- daemons/based/based_messages.c | 125 +- daemons/based/based_notify.c | 99 +- daemons/based/based_operation.c | 59 + daemons/based/based_remote.c | 29 +- daemons/based/based_transaction.c | 167 + daemons/based/based_transaction.h | 24 + daemons/based/pacemaker-based.c | 17 +- daemons/based/pacemaker-based.h | 45 +- daemons/controld/Makefile.am | 36 +- daemons/controld/controld_callbacks.c | 20 +- daemons/controld/controld_cib.c | 298 +- daemons/controld/controld_cib.h | 12 +- daemons/controld/controld_control.c | 37 +- daemons/controld/controld_corosync.c | 8 +- daemons/controld/controld_election.c | 7 - daemons/controld/controld_execd.c | 92 +- daemons/controld/controld_execd_state.c | 15 +- daemons/controld/controld_fencing.c | 87 +- daemons/controld/controld_fencing.h | 2 +- daemons/controld/controld_fsa.c | 7 - daemons/controld/controld_globals.h | 6 +- daemons/controld/controld_join_client.c | 36 +- daemons/controld/controld_join_dc.c | 133 +- daemons/controld/controld_lrm.h | 5 - daemons/controld/controld_membership.c | 40 +- daemons/controld/controld_messages.c | 197 +- daemons/controld/controld_metadata.c | 6 +- daemons/controld/controld_remote_ra.c | 99 +- daemons/controld/controld_schedulerd.c | 23 +- daemons/controld/controld_te_actions.c | 25 +- daemons/controld/controld_te_callbacks.c | 6 +- daemons/controld/controld_te_events.c | 12 +- daemons/controld/controld_te_utils.c | 175 +- daemons/controld/controld_throttle.c | 6 +- daemons/controld/controld_transition.c | 13 +- daemons/controld/controld_transition.h | 2 + daemons/controld/controld_utils.c | 2 +- daemons/controld/pacemaker-controld.c | 2 +- daemons/controld/pacemaker-controld.h | 3 + daemons/execd/Makefile.am | 43 +- daemons/execd/cts-exec-helper.c | 34 +- daemons/execd/execd_commands.c | 53 +- daemons/execd/pacemaker-execd.c | 10 +- daemons/execd/remoted_pidone.c | 18 +- daemons/execd/remoted_tls.c | 42 +- daemons/fenced/Makefile.am | 33 +- daemons/fenced/cts-fence-helper.c | 43 +- daemons/fenced/fenced_cib.c | 734 +++++ daemons/fenced/fenced_commands.c | 128 +- daemons/fenced/fenced_remote.c | 61 +- daemons/fenced/fenced_scheduler.c | 225 ++ daemons/fenced/pacemaker-fenced.c | 929 +----- daemons/fenced/pacemaker-fenced.h | 31 +- daemons/pacemakerd/Makefile.am | 8 +- daemons/pacemakerd/pacemakerd.c | 26 +- daemons/pacemakerd/pacemakerd.h | 6 +- daemons/pacemakerd/pcmkd_corosync.c | 16 +- daemons/pacemakerd/pcmkd_messages.c | 2 +- daemons/pacemakerd/pcmkd_subdaemons.c | 27 +- daemons/schedulerd/Makefile.am | 22 +- daemons/schedulerd/pacemaker-schedulerd.h | 4 +- daemons/schedulerd/schedulerd_messages.c | 34 +- devel/Makefile.am | 56 +- doc/Makefile.am | 25 +- doc/abi-check.in | 10 +- doc/sphinx/Clusters_from_Scratch/apache.rst | 2 +- doc/sphinx/Clusters_from_Scratch/cluster-setup.rst | 8 +- doc/sphinx/Makefile.am | 44 +- .../Pacemaker_Administration/administrative.rst | 150 + doc/sphinx/Pacemaker_Administration/alerts.rst | 6 +- .../Pacemaker_Administration/configuring.rst | 109 +- doc/sphinx/Pacemaker_Administration/index.rst | 2 + doc/sphinx/Pacemaker_Administration/moving.rst | 305 ++ doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst | 14 +- doc/sphinx/Pacemaker_Development/c.rst | 14 +- doc/sphinx/Pacemaker_Development/components.rst | 52 +- doc/sphinx/Pacemaker_Development/helpers.rst | 5 +- doc/sphinx/Pacemaker_Explained/acls.rst | 18 +- .../Pacemaker_Explained/advanced-options.rst | 586 ---- .../Pacemaker_Explained/advanced-resources.rst | 1629 ---------- doc/sphinx/Pacemaker_Explained/cluster-options.rst | 921 ++++++ doc/sphinx/Pacemaker_Explained/collective.rst | 1637 ++++++++++ doc/sphinx/Pacemaker_Explained/constraints.rst | 65 +- doc/sphinx/Pacemaker_Explained/index.rst | 7 +- doc/sphinx/Pacemaker_Explained/local-options.rst | 515 ++++ doc/sphinx/Pacemaker_Explained/nodes.rst | 48 +- doc/sphinx/Pacemaker_Explained/operations.rst | 623 ++++ doc/sphinx/Pacemaker_Explained/options.rst | 622 ---- doc/sphinx/Pacemaker_Explained/resources.rst | 424 +-- .../Pacemaker_Explained/reusing-configuration.rst | 5 + doc/sphinx/Pacemaker_Explained/status.rst | 72 +- doc/sphinx/Pacemaker_Explained/utilization.rst | 38 +- doc/sphinx/Pacemaker_Remote/alternatives.rst | 9 +- doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst | 2 +- doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst | 2 +- doc/sphinx/conf.py.in | 10 + etc/Makefile.am | 7 +- etc/sysconfig/pacemaker.in | 68 +- include/Makefile.am | 22 +- include/crm/Makefile.am | 23 +- include/crm/cib/cib_types.h | 139 +- include/crm/cib/internal.h | 124 +- include/crm/cluster.h | 8 +- include/crm/cluster/Makefile.am | 5 +- include/crm/cluster/compat.h | 8 +- include/crm/cluster/internal.h | 8 +- include/crm/common/Makefile.am | 73 +- include/crm/common/action_relation_internal.h | 132 + include/crm/common/actions.h | 467 +++ include/crm/common/actions_internal.h | 57 + include/crm/common/alerts_internal.h | 3 +- include/crm/common/cib_internal.h | 23 + include/crm/common/clone_internal.h | 33 + include/crm/common/digests_internal.h | 33 + include/crm/common/failcounts_internal.h | 41 + include/crm/common/group_internal.h | 27 + include/crm/common/health_internal.h | 2 +- include/crm/common/internal.h | 23 +- include/crm/common/ipc.h | 6 +- include/crm/common/ipc_internal.h | 11 +- include/crm/common/logging.h | 8 +- include/crm/common/logging_compat.h | 4 + include/crm/common/logging_internal.h | 102 +- include/crm/common/nodes.h | 144 + include/crm/common/nvpair.h | 1 - include/crm/common/options_internal.h | 42 +- include/crm/common/output_internal.h | 5 + include/crm/common/remote_internal.h | 4 +- include/crm/common/resources.h | 502 ++++ include/crm/common/results.h | 4 +- include/crm/common/results_compat.h | 5 +- include/crm/common/results_internal.h | 3 + include/crm/common/roles.h | 62 + include/crm/common/roles_internal.h | 30 + include/crm/common/scheduler.h | 238 ++ include/crm/common/scheduler_internal.h | 67 + include/crm/common/scheduler_types.h | 39 + include/crm/common/tags.h | 35 + include/crm/common/tickets.h | 39 + include/crm/common/unittest_internal.h | 40 +- include/crm/common/util.h | 24 +- include/crm/common/util_compat.h | 9 +- include/crm/common/xml.h | 36 +- include/crm/common/xml_compat.h | 15 +- include/crm/common/xml_internal.h | 42 +- include/crm/compatibility.h | 24 +- include/crm/crm.h | 78 +- include/crm/crm_compat.h | 129 +- include/crm/lrmd.h | 69 +- include/crm/lrmd_events.h | 108 + include/crm/lrmd_internal.h | 1 + include/crm/msg_xml.h | 33 +- include/crm/msg_xml_compat.h | 53 +- include/crm/pengine/Makefile.am | 11 +- include/crm/pengine/common.h | 123 +- include/crm/pengine/common_compat.h | 35 +- include/crm/pengine/complex.h | 22 +- include/crm/pengine/internal.h | 615 ++-- include/crm/pengine/pe_types.h | 530 +--- include/crm/pengine/pe_types_compat.h | 221 +- include/crm/pengine/remote_internal.h | 23 +- include/crm/pengine/status.h | 47 +- include/crm/services_compat.h | 7 +- include/crm_internal.h | 5 + include/pacemaker-internal.h | 5 +- include/pacemaker.h | 53 +- include/pcmki/Makefile.am | 16 +- include/pcmki/pcmki_agents.h | 19 + include/pcmki/pcmki_cluster_queries.h | 7 +- include/pcmki/pcmki_resource.h | 8 +- include/pcmki/pcmki_sched_allocate.h | 50 - include/pcmki/pcmki_sched_utils.h | 33 - include/pcmki/pcmki_scheduler.h | 31 +- include/pcmki/pcmki_simulate.h | 26 +- include/pcmki/pcmki_status.h | 8 +- include/pcmki/pcmki_transition.h | 2 + include/portability.h | 41 +- lib/Makefile.am | 18 +- lib/cib/Makefile.am | 22 +- lib/cib/cib_attrs.c | 19 +- lib/cib/cib_client.c | 112 +- lib/cib/cib_file.c | 477 ++- lib/cib/cib_native.c | 56 +- lib/cib/cib_ops.c | 228 +- lib/cib/cib_remote.c | 38 +- lib/cib/cib_utils.c | 511 +++- lib/cluster/Makefile.am | 19 +- lib/cluster/cluster.c | 33 +- lib/cluster/cpg.c | 18 +- lib/cluster/crmcluster_private.h | 6 +- lib/cluster/membership.c | 85 +- lib/common/Makefile.am | 33 +- lib/common/acl.c | 41 +- lib/common/actions.c | 532 ++++ lib/common/alerts.c | 87 +- lib/common/cib.c | 23 +- lib/common/crmcommon_private.h | 63 +- lib/common/digest.c | 4 +- lib/common/io.c | 8 +- lib/common/ipc_attrd.c | 37 +- lib/common/ipc_client.c | 461 +-- lib/common/ipc_common.c | 2 +- lib/common/ipc_controld.c | 61 +- lib/common/ipc_pacemakerd.c | 4 +- lib/common/ipc_schedulerd.c | 4 +- lib/common/ipc_server.c | 48 +- lib/common/iso8601.c | 3 +- lib/common/logging.c | 151 +- lib/common/mainloop.c | 42 +- lib/common/mock.c | 26 +- lib/common/mock_private.h | 6 +- lib/common/nvpair.c | 92 +- lib/common/operations.c | 530 ---- lib/common/options.c | 19 +- lib/common/output_html.c | 4 +- lib/common/output_log.c | 130 +- lib/common/output_xml.c | 20 +- lib/common/patchset.c | 121 +- lib/common/patchset_display.c | 26 +- lib/common/remote.c | 39 +- lib/common/results.c | 133 +- lib/common/scheduler.c | 14 + lib/common/schemas.c | 149 +- lib/common/strings.c | 16 +- lib/common/tests/Makefile.am | 4 +- lib/common/tests/acl/Makefile.am | 11 +- lib/common/tests/actions/Makefile.am | 22 + lib/common/tests/actions/copy_in_properties_test.c | 62 + lib/common/tests/actions/expand_plus_plus_test.c | 256 ++ .../tests/actions/fix_plus_plus_recursive_test.c | 47 + lib/common/tests/actions/parse_op_key_test.c | 275 ++ lib/common/tests/actions/pcmk_is_probe_test.c | 25 + lib/common/tests/actions/pcmk_xe_is_probe_test.c | 43 + .../actions/pcmk_xe_mask_probe_failure_test.c | 150 + lib/common/tests/agents/Makefile.am | 12 +- .../tests/agents/crm_parse_agent_spec_test.c | 18 +- lib/common/tests/cmdline/Makefile.am | 5 +- .../tests/cmdline/pcmk__cmdline_preproc_test.c | 13 +- .../tests/cmdline/pcmk__new_common_args_test.c | 62 + lib/common/tests/flags/Makefile.am | 11 +- lib/common/tests/io/Makefile.am | 7 +- lib/common/tests/lists/Makefile.am | 9 +- lib/common/tests/nvpair/Makefile.am | 8 +- lib/common/tests/operations/Makefile.am | 22 - .../tests/operations/copy_in_properties_test.c | 62 - .../tests/operations/expand_plus_plus_test.c | 256 -- .../operations/fix_plus_plus_recursive_test.c | 47 - lib/common/tests/operations/parse_op_key_test.c | 275 -- lib/common/tests/operations/pcmk_is_probe_test.c | 25 - .../tests/operations/pcmk_xe_is_probe_test.c | 43 - .../operations/pcmk_xe_mask_probe_failure_test.c | 150 - lib/common/tests/options/Makefile.am | 9 +- .../tests/options/pcmk__set_env_option_test.c | 57 +- lib/common/tests/output/Makefile.am | 20 +- lib/common/tests/output/pcmk__output_new_test.c | 8 +- lib/common/tests/results/Makefile.am | 4 +- lib/common/tests/results/pcmk__results_test.c | 8 +- lib/common/tests/scores/Makefile.am | 9 +- lib/common/tests/scores/pcmk__add_scores_test.c | 4 +- lib/common/tests/strings/Makefile.am | 54 +- lib/common/tests/strings/pcmk__compress_test.c | 2 +- .../tests/strings/pcmk__guint_from_hash_test.c | 4 + lib/common/tests/strings/pcmk__scan_ll_test.c | 64 + lib/common/tests/utils/Makefile.am | 22 +- lib/common/tests/utils/pcmk__fail_attr_name_test.c | 36 + lib/common/tests/utils/pcmk__failcount_name_test.c | 35 + .../tests/utils/pcmk__lastfailure_name_test.c | 35 + lib/common/tests/xml/Makefile.am | 6 +- lib/common/tests/xml/pcmk__xe_foreach_child_test.c | 13 +- lib/common/tests/xpath/Makefile.am | 4 +- lib/common/watchdog.c | 13 +- lib/common/xml.c | 527 ++-- lib/common/xml_attr.c | 84 + lib/common/xml_display.c | 18 +- lib/common/xpath.c | 13 +- lib/fencing/Makefile.am | 12 +- lib/fencing/st_client.c | 50 +- lib/fencing/st_lha.c | 13 +- lib/fencing/st_rhcs.c | 15 +- lib/lrmd/Makefile.am | 17 +- lib/lrmd/lrmd_alerts.c | 4 +- lib/lrmd/lrmd_client.c | 67 +- lib/pacemaker/Makefile.am | 20 +- lib/pacemaker/libpacemaker_private.h | 712 +++-- lib/pacemaker/pcmk_acl.c | 142 +- lib/pacemaker/pcmk_agents.c | 243 ++ lib/pacemaker/pcmk_cluster_queries.c | 23 +- lib/pacemaker/pcmk_fence.c | 59 +- lib/pacemaker/pcmk_graph_consumer.c | 52 +- lib/pacemaker/pcmk_graph_logging.c | 15 +- lib/pacemaker/pcmk_graph_producer.c | 420 +-- lib/pacemaker/pcmk_injections.c | 60 +- lib/pacemaker/pcmk_output.c | 512 ++-- lib/pacemaker/pcmk_resource.c | 30 +- lib/pacemaker/pcmk_rule.c | 67 +- lib/pacemaker/pcmk_sched_actions.c | 860 +++--- lib/pacemaker/pcmk_sched_bundle.c | 1422 +++++---- lib/pacemaker/pcmk_sched_clone.c | 684 +++-- lib/pacemaker/pcmk_sched_colocation.c | 1266 +++++--- lib/pacemaker/pcmk_sched_constraints.c | 199 +- lib/pacemaker/pcmk_sched_fencing.c | 181 +- lib/pacemaker/pcmk_sched_group.c | 581 ++-- lib/pacemaker/pcmk_sched_instances.c | 738 ++--- lib/pacemaker/pcmk_sched_location.c | 216 +- lib/pacemaker/pcmk_sched_migration.c | 220 +- lib/pacemaker/pcmk_sched_nodes.c | 221 +- lib/pacemaker/pcmk_sched_ordering.c | 459 +-- lib/pacemaker/pcmk_sched_primitive.c | 641 ++-- lib/pacemaker/pcmk_sched_probes.c | 350 +-- lib/pacemaker/pcmk_sched_promotable.c | 443 +-- lib/pacemaker/pcmk_sched_recurring.c | 240 +- lib/pacemaker/pcmk_sched_remote.c | 252 +- lib/pacemaker/pcmk_sched_resource.c | 517 ++-- lib/pacemaker/pcmk_sched_tickets.c | 142 +- lib/pacemaker/pcmk_sched_utilization.c | 102 +- lib/pacemaker/pcmk_scheduler.c | 421 +-- lib/pacemaker/pcmk_simulate.c | 269 +- lib/pacemaker/pcmk_status.c | 73 +- lib/pengine/Makefile.am | 52 +- lib/pengine/bundle.c | 415 ++- lib/pengine/clone.c | 428 +-- lib/pengine/common.c | 339 ++- lib/pengine/complex.c | 338 ++- lib/pengine/failcounts.c | 247 +- lib/pengine/group.c | 102 +- lib/pengine/native.c | 335 ++- lib/pengine/pe_actions.c | 1303 ++++---- lib/pengine/pe_digest.c | 162 +- lib/pengine/pe_health.c | 16 +- lib/pengine/pe_notif.c | 226 +- lib/pengine/pe_output.c | 552 ++-- lib/pengine/pe_status_private.h | 83 +- lib/pengine/remote.c | 100 +- lib/pengine/rules.c | 47 +- lib/pengine/rules_alerts.c | 13 +- lib/pengine/status.c | 268 +- lib/pengine/tags.c | 37 +- lib/pengine/tests/Makefile.am | 15 +- lib/pengine/tests/native/Makefile.am | 4 +- lib/pengine/tests/native/native_find_rsc_test.c | 724 +++-- lib/pengine/tests/native/pe_base_name_eq_test.c | 31 +- lib/pengine/tests/status/Makefile.am | 12 +- lib/pengine/tests/status/pe_find_node_any_test.c | 6 +- lib/pengine/tests/status/pe_find_node_id_test.c | 6 +- lib/pengine/tests/status/pe_find_node_test.c | 6 +- lib/pengine/tests/status/pe_new_working_set_test.c | 10 +- .../tests/status/set_working_set_defaults_test.c | 27 +- lib/pengine/tests/utils/Makefile.am | 5 +- lib/pengine/tests/utils/pe__cmp_node_name_test.c | 6 +- .../tests/utils/pe__cmp_rsc_priority_test.c | 4 +- lib/pengine/unpack.c | 1794 ++++++----- lib/pengine/utils.c | 331 +- lib/pengine/variant.h | 91 - lib/services/Makefile.am | 13 +- lib/services/dbus.c | 2 + lib/services/services.c | 8 +- lib/services/services_linux.c | 22 +- lib/services/services_lsb.c | 5 +- lib/services/services_nagios.c | 4 +- lib/services/systemd.c | 26 +- lib/services/upstart.c | 21 +- m4/REQUIRE_PROG.m4 | 18 + m4/version.m4 | 2 +- maint/Makefile.am | 44 +- maint/bumplibs.in | 57 +- mk/common.mk | 8 +- mk/release.mk | 21 +- mk/tap.mk | 13 +- po/zh_CN.po | 397 +-- python/Makefile.am | 10 +- python/pacemaker/Makefile.am | 4 +- python/pacemaker/_cts/CTS.py | 31 +- python/pacemaker/_cts/Makefile.am | 14 +- python/pacemaker/_cts/audits.py | 1029 +++++++ python/pacemaker/_cts/cib.py | 425 +++ python/pacemaker/_cts/cibxml.py | 734 +++++ python/pacemaker/_cts/clustermanager.py | 916 ++++++ python/pacemaker/_cts/cmcorosync.py | 80 + python/pacemaker/_cts/environment.py | 35 +- python/pacemaker/_cts/input.py | 18 + python/pacemaker/_cts/logging.py | 2 +- python/pacemaker/_cts/network.py | 59 + python/pacemaker/_cts/patterns.py | 14 +- python/pacemaker/_cts/process.py | 2 +- python/pacemaker/_cts/remote.py | 8 +- python/pacemaker/_cts/scenarios.py | 422 +++ python/pacemaker/_cts/test.py | 35 +- python/pacemaker/_cts/tests/Makefile.am | 14 + python/pacemaker/_cts/tests/__init__.py | 87 + python/pacemaker/_cts/tests/componentfail.py | 167 + python/pacemaker/_cts/tests/ctstest.py | 252 ++ python/pacemaker/_cts/tests/fliptest.py | 61 + python/pacemaker/_cts/tests/maintenancemode.py | 238 ++ python/pacemaker/_cts/tests/nearquorumpointtest.py | 125 + python/pacemaker/_cts/tests/partialstart.py | 75 + python/pacemaker/_cts/tests/reattach.py | 221 ++ python/pacemaker/_cts/tests/remotebasic.py | 39 + python/pacemaker/_cts/tests/remotedriver.py | 556 ++++ python/pacemaker/_cts/tests/remotemigrate.py | 63 + python/pacemaker/_cts/tests/remoterscfailure.py | 73 + python/pacemaker/_cts/tests/remotestonithd.py | 62 + python/pacemaker/_cts/tests/resourcerecover.py | 175 ++ python/pacemaker/_cts/tests/restartonebyone.py | 58 + python/pacemaker/_cts/tests/restarttest.py | 49 + python/pacemaker/_cts/tests/resynccib.py | 75 + python/pacemaker/_cts/tests/simulstart.py | 42 + python/pacemaker/_cts/tests/simulstartlite.py | 133 + python/pacemaker/_cts/tests/simulstop.py | 42 + python/pacemaker/_cts/tests/simulstoplite.py | 91 + python/pacemaker/_cts/tests/splitbraintest.py | 215 ++ python/pacemaker/_cts/tests/standbytest.py | 110 + python/pacemaker/_cts/tests/startonebyone.py | 55 + python/pacemaker/_cts/tests/starttest.py | 54 + python/pacemaker/_cts/tests/stonithdtest.py | 145 + python/pacemaker/_cts/tests/stoponebyone.py | 56 + python/pacemaker/_cts/tests/stoptest.py | 99 + python/pacemaker/_cts/timer.py | 63 + python/pacemaker/_cts/watcher.py | 10 +- python/pacemaker/buildoptions.py.in | 3 + python/pylintrc | 3 +- python/setup.py.in | 2 +- python/tests/Makefile.am | 3 +- python/tests/__init__.py | 0 python/tests/test_cts_network.py | 37 + replace/Makefile.am | 28 - replace/NoSuchFunctionName.c | 31 - replace/alphasort.c | 55 - replace/scandir.c | 233 -- replace/strchrnul.c | 15 - replace/strerror.c | 37 - replace/strndup.c | 38 - replace/strnlen.c | 31 - rpm/Makefile.am | 62 +- rpm/pacemaker.spec.in | 19 +- tests/Makefile.am | 9 +- tools/Makefile.am | 171 +- tools/attrd_updater.c | 16 +- tools/cibadmin.c | 12 +- tools/cluster-helper.in | 2 +- tools/cluster-init.in | 537 ---- tools/crm_attribute.c | 4 +- tools/crm_diff.c | 43 +- tools/crm_mon.c | 6 +- tools/crm_mon.h | 2 +- tools/crm_mon_curses.c | 8 +- tools/crm_node.c | 751 +++-- tools/crm_resource.c | 339 +-- tools/crm_resource.h | 61 +- tools/crm_resource_ban.c | 75 +- tools/crm_resource_print.c | 96 +- tools/crm_resource_runtime.c | 540 ++-- tools/crm_shadow.c | 49 +- tools/crm_simulate.c | 31 +- tools/crm_ticket.c | 74 +- tools/crm_verify.c | 53 +- tools/stonith_admin.c | 18 +- xml/Makefile.am | 85 +- xml/README.md | 1 - xml/api/crm_node-2.32.rng | 53 + xml/version-diff.sh.in | 60 + 873 files changed, 63952 insertions(+), 37502 deletions(-) create mode 100644 cts/cli/crm_verify_invalid_bz.xml create mode 100644 cts/cli/crm_verify_invalid_no_stonith.xml create mode 100755 cts/cluster_test.in create mode 100644 cts/cts-lab.in create mode 100644 cts/cts-log-watcher.in create mode 100755 cts/cts.in delete mode 100644 cts/lab/CIB.py delete mode 100644 cts/lab/CM_corosync.py delete mode 100755 cts/lab/CTSaudits.py delete mode 100644 cts/lab/CTSlab.py.in delete mode 100644 cts/lab/CTSscenarios.py delete mode 100644 cts/lab/CTStests.py delete mode 100644 cts/lab/ClusterManager.py delete mode 100644 cts/lab/Makefile.am delete mode 100644 cts/lab/OCFIPraTest.py.in delete mode 100644 cts/lab/__init__.py delete mode 100644 cts/lab/cib_xml.py delete mode 100755 cts/lab/cluster_test.in delete mode 100644 cts/lab/cts-log-watcher.in delete mode 100755 cts/lab/cts.in delete mode 100644 cts/lxc_autogen.sh.in create mode 100644 cts/scheduler/dot/bundle-promoted-anticolocation-1.dot create mode 100644 cts/scheduler/dot/bundle-promoted-anticolocation-2.dot create mode 100644 cts/scheduler/dot/bundle-promoted-anticolocation-3.dot create mode 100644 cts/scheduler/dot/bundle-promoted-anticolocation-4.dot create mode 100644 cts/scheduler/dot/bundle-promoted-anticolocation-5.dot create mode 100644 cts/scheduler/dot/bundle-promoted-anticolocation-6.dot create mode 100644 cts/scheduler/dot/bundle-promoted-colocation-1.dot create mode 100644 cts/scheduler/dot/bundle-promoted-colocation-2.dot create mode 100644 cts/scheduler/dot/bundle-promoted-colocation-3.dot create mode 100644 cts/scheduler/dot/bundle-promoted-colocation-4.dot create mode 100644 cts/scheduler/dot/bundle-promoted-colocation-5.dot create mode 100644 cts/scheduler/dot/bundle-promoted-colocation-6.dot create mode 100644 cts/scheduler/dot/bundle-promoted-location-1.dot create mode 100644 cts/scheduler/dot/bundle-promoted-location-2.dot create mode 100644 cts/scheduler/dot/bundle-promoted-location-3.dot create mode 100644 cts/scheduler/dot/bundle-promoted-location-4.dot create mode 100644 cts/scheduler/dot/bundle-promoted-location-5.dot create mode 100644 cts/scheduler/dot/bundle-promoted-location-6.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-1.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-10.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-11.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-12.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-2.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-3.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-4.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-5.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-6.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-7.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-8.dot create mode 100644 cts/scheduler/dot/clone-recover-no-shuffle-9.dot create mode 100644 cts/scheduler/dot/coloc-with-inner-group-member.dot create mode 100644 cts/scheduler/dot/group-anticolocation-2.dot create mode 100644 cts/scheduler/dot/group-anticolocation-3.dot create mode 100644 cts/scheduler/dot/group-anticolocation-4.dot create mode 100644 cts/scheduler/dot/group-anticolocation-5.dot create mode 100644 cts/scheduler/dot/node-pending-timeout.dot create mode 100644 cts/scheduler/dot/pending-node-no-uname.dot create mode 100644 cts/scheduler/dot/timeout-by-node.dot create mode 100644 cts/scheduler/exp/bundle-promoted-anticolocation-1.exp create mode 100644 cts/scheduler/exp/bundle-promoted-anticolocation-2.exp create mode 100644 cts/scheduler/exp/bundle-promoted-anticolocation-3.exp create mode 100644 cts/scheduler/exp/bundle-promoted-anticolocation-4.exp create mode 100644 cts/scheduler/exp/bundle-promoted-anticolocation-5.exp create mode 100644 cts/scheduler/exp/bundle-promoted-anticolocation-6.exp create mode 100644 cts/scheduler/exp/bundle-promoted-colocation-1.exp create mode 100644 cts/scheduler/exp/bundle-promoted-colocation-2.exp create mode 100644 cts/scheduler/exp/bundle-promoted-colocation-3.exp create mode 100644 cts/scheduler/exp/bundle-promoted-colocation-4.exp create mode 100644 cts/scheduler/exp/bundle-promoted-colocation-5.exp create mode 100644 cts/scheduler/exp/bundle-promoted-colocation-6.exp create mode 100644 cts/scheduler/exp/bundle-promoted-location-1.exp create mode 100644 cts/scheduler/exp/bundle-promoted-location-2.exp create mode 100644 cts/scheduler/exp/bundle-promoted-location-3.exp create mode 100644 cts/scheduler/exp/bundle-promoted-location-4.exp create mode 100644 cts/scheduler/exp/bundle-promoted-location-5.exp create mode 100644 cts/scheduler/exp/bundle-promoted-location-6.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-1.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-10.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-11.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-12.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-2.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-3.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-4.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-5.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-6.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-7.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-8.exp create mode 100644 cts/scheduler/exp/clone-recover-no-shuffle-9.exp create mode 100644 cts/scheduler/exp/coloc-with-inner-group-member.exp create mode 100644 cts/scheduler/exp/group-anticolocation-2.exp create mode 100644 cts/scheduler/exp/group-anticolocation-3.exp create mode 100644 cts/scheduler/exp/group-anticolocation-4.exp create mode 100644 cts/scheduler/exp/group-anticolocation-5.exp create mode 100644 cts/scheduler/exp/node-pending-timeout.exp create mode 100644 cts/scheduler/exp/pending-node-no-uname.exp create mode 100644 cts/scheduler/exp/timeout-by-node.exp create mode 100644 cts/scheduler/scores/bundle-promoted-anticolocation-1.scores create mode 100644 cts/scheduler/scores/bundle-promoted-anticolocation-2.scores create mode 100644 cts/scheduler/scores/bundle-promoted-anticolocation-3.scores create mode 100644 cts/scheduler/scores/bundle-promoted-anticolocation-4.scores create mode 100644 cts/scheduler/scores/bundle-promoted-anticolocation-5.scores create mode 100644 cts/scheduler/scores/bundle-promoted-anticolocation-6.scores create mode 100644 cts/scheduler/scores/bundle-promoted-colocation-1.scores create mode 100644 cts/scheduler/scores/bundle-promoted-colocation-2.scores create mode 100644 cts/scheduler/scores/bundle-promoted-colocation-3.scores create mode 100644 cts/scheduler/scores/bundle-promoted-colocation-4.scores create mode 100644 cts/scheduler/scores/bundle-promoted-colocation-5.scores create mode 100644 cts/scheduler/scores/bundle-promoted-colocation-6.scores create mode 100644 cts/scheduler/scores/bundle-promoted-location-1.scores create mode 100644 cts/scheduler/scores/bundle-promoted-location-2.scores create mode 100644 cts/scheduler/scores/bundle-promoted-location-3.scores create mode 100644 cts/scheduler/scores/bundle-promoted-location-4.scores create mode 100644 cts/scheduler/scores/bundle-promoted-location-5.scores create mode 100644 cts/scheduler/scores/bundle-promoted-location-6.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-1.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-10.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-11.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-12.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-2.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-3.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-4.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-5.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-6.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-7.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-8.scores create mode 100644 cts/scheduler/scores/clone-recover-no-shuffle-9.scores create mode 100644 cts/scheduler/scores/coloc-with-inner-group-member.scores create mode 100644 cts/scheduler/scores/group-anticolocation-2.scores create mode 100644 cts/scheduler/scores/group-anticolocation-3.scores create mode 100644 cts/scheduler/scores/group-anticolocation-4.scores create mode 100644 cts/scheduler/scores/group-anticolocation-5.scores create mode 100644 cts/scheduler/scores/node-pending-timeout.scores create mode 100644 cts/scheduler/scores/pending-node-no-uname.scores create mode 100644 cts/scheduler/scores/timeout-by-node.scores create mode 100644 cts/scheduler/summary/bundle-promoted-anticolocation-1.summary create mode 100644 cts/scheduler/summary/bundle-promoted-anticolocation-2.summary create mode 100644 cts/scheduler/summary/bundle-promoted-anticolocation-3.summary create mode 100644 cts/scheduler/summary/bundle-promoted-anticolocation-4.summary create mode 100644 cts/scheduler/summary/bundle-promoted-anticolocation-5.summary create mode 100644 cts/scheduler/summary/bundle-promoted-anticolocation-6.summary create mode 100644 cts/scheduler/summary/bundle-promoted-colocation-1.summary create mode 100644 cts/scheduler/summary/bundle-promoted-colocation-2.summary create mode 100644 cts/scheduler/summary/bundle-promoted-colocation-3.summary create mode 100644 cts/scheduler/summary/bundle-promoted-colocation-4.summary create mode 100644 cts/scheduler/summary/bundle-promoted-colocation-5.summary create mode 100644 cts/scheduler/summary/bundle-promoted-colocation-6.summary create mode 100644 cts/scheduler/summary/bundle-promoted-location-1.summary create mode 100644 cts/scheduler/summary/bundle-promoted-location-2.summary create mode 100644 cts/scheduler/summary/bundle-promoted-location-3.summary create mode 100644 cts/scheduler/summary/bundle-promoted-location-4.summary create mode 100644 cts/scheduler/summary/bundle-promoted-location-5.summary create mode 100644 cts/scheduler/summary/bundle-promoted-location-6.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-1.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-10.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-11.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-12.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-2.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-3.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-4.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-5.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-6.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-7.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-8.summary create mode 100644 cts/scheduler/summary/clone-recover-no-shuffle-9.summary create mode 100644 cts/scheduler/summary/coloc-with-inner-group-member.summary create mode 100644 cts/scheduler/summary/group-anticolocation-2.summary create mode 100644 cts/scheduler/summary/group-anticolocation-3.summary create mode 100644 cts/scheduler/summary/group-anticolocation-4.summary create mode 100644 cts/scheduler/summary/group-anticolocation-5.summary create mode 100644 cts/scheduler/summary/node-pending-timeout.summary create mode 100644 cts/scheduler/summary/pending-node-no-uname.summary create mode 100644 cts/scheduler/summary/timeout-by-node.summary create mode 100644 cts/scheduler/xml/bundle-promoted-anticolocation-1.xml create mode 100644 cts/scheduler/xml/bundle-promoted-anticolocation-2.xml create mode 100644 cts/scheduler/xml/bundle-promoted-anticolocation-3.xml create mode 100644 cts/scheduler/xml/bundle-promoted-anticolocation-4.xml create mode 100644 cts/scheduler/xml/bundle-promoted-anticolocation-5.xml create mode 100644 cts/scheduler/xml/bundle-promoted-anticolocation-6.xml create mode 100644 cts/scheduler/xml/bundle-promoted-colocation-1.xml create mode 100644 cts/scheduler/xml/bundle-promoted-colocation-2.xml create mode 100644 cts/scheduler/xml/bundle-promoted-colocation-3.xml create mode 100644 cts/scheduler/xml/bundle-promoted-colocation-4.xml create mode 100644 cts/scheduler/xml/bundle-promoted-colocation-5.xml create mode 100644 cts/scheduler/xml/bundle-promoted-colocation-6.xml create mode 100644 cts/scheduler/xml/bundle-promoted-location-1.xml create mode 100644 cts/scheduler/xml/bundle-promoted-location-2.xml create mode 100644 cts/scheduler/xml/bundle-promoted-location-3.xml create mode 100644 cts/scheduler/xml/bundle-promoted-location-4.xml create mode 100644 cts/scheduler/xml/bundle-promoted-location-5.xml create mode 100644 cts/scheduler/xml/bundle-promoted-location-6.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-1.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-10.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-11.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-12.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-2.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-3.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-4.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-5.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-6.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-7.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-8.xml create mode 100644 cts/scheduler/xml/clone-recover-no-shuffle-9.xml create mode 100644 cts/scheduler/xml/coloc-with-inner-group-member.xml create mode 100644 cts/scheduler/xml/group-anticolocation-2.xml create mode 100644 cts/scheduler/xml/group-anticolocation-3.xml create mode 100644 cts/scheduler/xml/group-anticolocation-4.xml create mode 100644 cts/scheduler/xml/group-anticolocation-5.xml create mode 100644 cts/scheduler/xml/node-pending-timeout.xml create mode 100644 cts/scheduler/xml/pending-node-no-uname.xml create mode 100644 cts/scheduler/xml/timeout-by-node.xml delete mode 100644 daemons/based/based_common.c create mode 100644 daemons/based/based_operation.c create mode 100644 daemons/based/based_transaction.c create mode 100644 daemons/based/based_transaction.h create mode 100644 daemons/fenced/fenced_cib.c create mode 100644 daemons/fenced/fenced_scheduler.c create mode 100644 doc/sphinx/Pacemaker_Administration/administrative.rst create mode 100644 doc/sphinx/Pacemaker_Administration/moving.rst delete mode 100644 doc/sphinx/Pacemaker_Explained/advanced-options.rst delete mode 100644 doc/sphinx/Pacemaker_Explained/advanced-resources.rst create mode 100644 doc/sphinx/Pacemaker_Explained/cluster-options.rst create mode 100644 doc/sphinx/Pacemaker_Explained/collective.rst create mode 100644 doc/sphinx/Pacemaker_Explained/local-options.rst create mode 100644 doc/sphinx/Pacemaker_Explained/operations.rst delete mode 100644 doc/sphinx/Pacemaker_Explained/options.rst create mode 100644 include/crm/common/action_relation_internal.h create mode 100644 include/crm/common/actions.h create mode 100644 include/crm/common/actions_internal.h create mode 100644 include/crm/common/cib_internal.h create mode 100644 include/crm/common/clone_internal.h create mode 100644 include/crm/common/digests_internal.h create mode 100644 include/crm/common/failcounts_internal.h create mode 100644 include/crm/common/group_internal.h create mode 100644 include/crm/common/nodes.h create mode 100644 include/crm/common/resources.h create mode 100644 include/crm/common/roles.h create mode 100644 include/crm/common/roles_internal.h create mode 100644 include/crm/common/scheduler.h create mode 100644 include/crm/common/scheduler_internal.h create mode 100644 include/crm/common/scheduler_types.h create mode 100644 include/crm/common/tags.h create mode 100644 include/crm/common/tickets.h create mode 100644 include/crm/lrmd_events.h create mode 100644 include/pcmki/pcmki_agents.h delete mode 100644 include/pcmki/pcmki_sched_allocate.h delete mode 100644 include/pcmki/pcmki_sched_utils.h create mode 100644 lib/common/actions.c delete mode 100644 lib/common/operations.c create mode 100644 lib/common/scheduler.c create mode 100644 lib/common/tests/actions/Makefile.am create mode 100644 lib/common/tests/actions/copy_in_properties_test.c create mode 100644 lib/common/tests/actions/expand_plus_plus_test.c create mode 100644 lib/common/tests/actions/fix_plus_plus_recursive_test.c create mode 100644 lib/common/tests/actions/parse_op_key_test.c create mode 100644 lib/common/tests/actions/pcmk_is_probe_test.c create mode 100644 lib/common/tests/actions/pcmk_xe_is_probe_test.c create mode 100644 lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c create mode 100644 lib/common/tests/cmdline/pcmk__new_common_args_test.c delete mode 100644 lib/common/tests/operations/Makefile.am delete mode 100644 lib/common/tests/operations/copy_in_properties_test.c delete mode 100644 lib/common/tests/operations/expand_plus_plus_test.c delete mode 100644 lib/common/tests/operations/fix_plus_plus_recursive_test.c delete mode 100644 lib/common/tests/operations/parse_op_key_test.c delete mode 100644 lib/common/tests/operations/pcmk_is_probe_test.c delete mode 100644 lib/common/tests/operations/pcmk_xe_is_probe_test.c delete mode 100644 lib/common/tests/operations/pcmk_xe_mask_probe_failure_test.c create mode 100644 lib/common/tests/strings/pcmk__scan_ll_test.c create mode 100644 lib/common/tests/utils/pcmk__fail_attr_name_test.c create mode 100644 lib/common/tests/utils/pcmk__failcount_name_test.c create mode 100644 lib/common/tests/utils/pcmk__lastfailure_name_test.c create mode 100644 lib/common/xml_attr.c create mode 100644 lib/pacemaker/pcmk_agents.c delete mode 100644 lib/pengine/variant.h create mode 100644 m4/REQUIRE_PROG.m4 create mode 100644 python/pacemaker/_cts/audits.py create mode 100644 python/pacemaker/_cts/cib.py create mode 100644 python/pacemaker/_cts/cibxml.py create mode 100644 python/pacemaker/_cts/clustermanager.py create mode 100644 python/pacemaker/_cts/cmcorosync.py create mode 100644 python/pacemaker/_cts/input.py create mode 100644 python/pacemaker/_cts/network.py create mode 100644 python/pacemaker/_cts/scenarios.py create mode 100644 python/pacemaker/_cts/tests/Makefile.am create mode 100644 python/pacemaker/_cts/tests/__init__.py create mode 100644 python/pacemaker/_cts/tests/componentfail.py create mode 100644 python/pacemaker/_cts/tests/ctstest.py create mode 100644 python/pacemaker/_cts/tests/fliptest.py create mode 100644 python/pacemaker/_cts/tests/maintenancemode.py create mode 100644 python/pacemaker/_cts/tests/nearquorumpointtest.py create mode 100644 python/pacemaker/_cts/tests/partialstart.py create mode 100644 python/pacemaker/_cts/tests/reattach.py create mode 100644 python/pacemaker/_cts/tests/remotebasic.py create mode 100644 python/pacemaker/_cts/tests/remotedriver.py create mode 100644 python/pacemaker/_cts/tests/remotemigrate.py create mode 100644 python/pacemaker/_cts/tests/remoterscfailure.py create mode 100644 python/pacemaker/_cts/tests/remotestonithd.py create mode 100644 python/pacemaker/_cts/tests/resourcerecover.py create mode 100644 python/pacemaker/_cts/tests/restartonebyone.py create mode 100644 python/pacemaker/_cts/tests/restarttest.py create mode 100644 python/pacemaker/_cts/tests/resynccib.py create mode 100644 python/pacemaker/_cts/tests/simulstart.py create mode 100644 python/pacemaker/_cts/tests/simulstartlite.py create mode 100644 python/pacemaker/_cts/tests/simulstop.py create mode 100644 python/pacemaker/_cts/tests/simulstoplite.py create mode 100644 python/pacemaker/_cts/tests/splitbraintest.py create mode 100644 python/pacemaker/_cts/tests/standbytest.py create mode 100644 python/pacemaker/_cts/tests/startonebyone.py create mode 100644 python/pacemaker/_cts/tests/starttest.py create mode 100644 python/pacemaker/_cts/tests/stonithdtest.py create mode 100644 python/pacemaker/_cts/tests/stoponebyone.py create mode 100644 python/pacemaker/_cts/tests/stoptest.py create mode 100644 python/pacemaker/_cts/timer.py create mode 100644 python/tests/__init__.py create mode 100644 python/tests/test_cts_network.py delete mode 100644 replace/Makefile.am delete mode 100644 replace/NoSuchFunctionName.c delete mode 100644 replace/alphasort.c delete mode 100644 replace/scandir.c delete mode 100644 replace/strchrnul.c delete mode 100644 replace/strerror.c delete mode 100644 replace/strndup.c delete mode 100644 replace/strnlen.c delete mode 100755 tools/cluster-init.in create mode 100644 xml/api/crm_node-2.32.rng create mode 100644 xml/version-diff.sh.in diff --git a/ChangeLog b/ChangeLog index a0a5419..e5ecf98 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,436 @@ +* Tue Dec 19 2023 Ken Gaillot Pacemaker-2.1.7 +- 1388 commits with 358 files changed, 23771 insertions(+), 17219 deletions(-) + +- Features added since Pacemaker-2.1.6 + + build: allow building with libxml2 2.12.0 and greater + + CIB: deprecate "ordering" attribute of "resource_set" + + CIB: new cluster option "node-pending-timeout" (defaulting to 0, meaning + no timeout, to preserve existing behavior) allows fencing of nodes that do + not join Pacemaker's controller group within this much time after joining + the cluster + + controller: PCMK_node_start_state now works with Pacemaker Remote nodes + + tools: crm_verify now supports --quiet option (currently same as default + behavior, but in the future, verbose behavior might become the default, + so script writers are recommended to explicitly add --quiet if they do not + want output) + + tools: crm_node supports standard --output-as/--output-to arguments + + tests: CTSlab.py was renamed to cts-lab + +- Fixes since Pacemaker-2.1.6 + + logging: restore ability to enable XML trace logs by file and function + (regression introduced in 2.1.6) + + scheduler: avoid double free with disabled recurring actions + (regression introduced in 2.1.5) + + tools: consider dampening argument when setting values with attrd_updater + (regression introduced in 2.1.5) + + tools: wait for reply from crm_node -R (regression introduced in 2.0.5) + + agents: handle dampening parameter consistently and correctly + + CIB: be more strict about ignoring colocation elements without an ID + + controller: do not check whether watchdog fencing is enabled + if "stonith-watchdog-timeout" is not configured + + controller: don't try to execute agent action at shutdown + + controller: avoid race condition when updating node state during join + + controller: correctly determine state of a fenced node without a name + + controller: wait a second between fencer connection attempts + + libpacemaker: avoid shuffling clone instances unnecessarily + + libpacemaker: get bundle container's promotion score from correct node + + libpacemaker: role-based colocations now work with bundles + + libpacemaker: clone-node-max now works with cloned groups + + scheduler: compare anti-colocation dependent negative preferences against + stickiness + + scheduler: consider explicit colocations with group members + + scheduler: avoid fencing a pending node without a name + + scheduler: properly evaluate rules in action meta-attributes + + scheduler: properly sort rule-based blocks when overwriting values + + tools: crm_resource --wait will now wait if any actions are pending + (previously it would wait only if new actions were planned) + + tools: crm_verify --output-as=xml now includes detailed messages + + tools: avoid showing pending nodes as having "<3.15.1" feature set in + crm_mon + + tools: fix display of clone descriptions + + tools: crm_resource now reports an error rather than timing out when + trying to restart an unmanaged resource + + tools: crm_resource now properly detects which promoted role name to use + in ban and move constraints + +- Public API changes since Pacemaker-2.1.6 (all API/ABI backward-compatible) + + libcib: cib_t now supports transactions via new cib_api_operations_t + methods, new cib_transaction value in enum cib_call_options, and new + cib_t transaction and user members + + libcib: cib_t now supports setting the ACL user for methods via new + cib_api_operations_t set_user() method + + libcib: deprecate cib_api_operations_t methods inputfd(), noop(), quit(), + set_op_callback(), and signon_raw() + + libcib: deprecate cib_call_options values cib_mixed_update, + cib_scope_local, and cib_zero_copy + + libcib: deprecate cib_t op_callback member + + libcrmcluster: deprecate set_uuid() + + libcrmcluster: send_cluster_message()'s data argument is const + + libcrmcommon: add enum pcmk_rc_e values pcmk_rc_compression, + pcmk_rc_ns_resolution, and pcmk_rc_no_transaction + + libcrmcommon,libpe_rules,libpe_status: many APIs have been moved from + libpe_rules and libpe_status to libcrmcommon, sometimes with new names + (deprecating the old ones), as described below + + libcrmcommon: add (and deprecate) PCMK_DEFAULT_METADATA_TIMEOUT_MS defined + constant + + libcrmcommon: add enum pcmk_rsc_flags + + libcrmcommon: add enum pcmk_scheduler_flags + + libcrmcommon: add pcmk_action_added_to_graph + + libcrmcommon: add pcmk_action_always_in_graph + + libcrmcommon: add pcmk_action_attrs_evaluated + + libcrmcommon: add PCMK_ACTION_CANCEL string constant + + libcrmcommon: add PCMK_ACTION_CLEAR_FAILCOUNT string constant + + libcrmcommon: add PCMK_ACTION_CLONE_ONE_OR_MORE string constant + + libcrmcommon: add PCMK_ACTION_DELETE string constant + + libcrmcommon: add PCMK_ACTION_DEMOTE string constant + + libcrmcommon: add pcmk_action_demote to enum action_tasks + + libcrmcommon: add PCMK_ACTION_DEMOTED string constant + + libcrmcommon: add pcmk_action_demoted to enum action_tasks + + libcrmcommon: add pcmk_action_detect_loop + + libcrmcommon: add PCMK_ACTION_DO_SHUTDOWN string constant + + libcrmcommon: add pcmk_action_fence to enum action_tasks + + libcrmcommon: add pcmk_action_inputs_deduplicated + + libcrmcommon: add PCMK_ACTION_LIST string constant + + libcrmcommon: add PCMK_ACTION_LOAD_STOPPED string constant + + libcrmcommon: add PCMK_ACTION_LRM_DELETE string constant + + libcrmcommon: add PCMK_ACTION_MAINTENANCE_NODES string constant + + libcrmcommon: add PCMK_ACTION_META_DATA string constant + + libcrmcommon: add pcmk_action_migratable + + libcrmcommon: add PCMK_ACTION_MIGRATE_FROM string constant + + libcrmcommon: add PCMK_ACTION_MIGRATE_TO string constant + + libcrmcommon: add pcmk_action_migration_abort + + libcrmcommon: add pcmk_action_min_runnable + + libcrmcommon: add PCMK_ACTION_MONITOR string constant + + libcrmcommon: add pcmk_action_monitor to enum action_tasks + + libcrmcommon: add PCMK_ACTION_NOTIFIED string constant + + libcrmcommon: add pcmk_action_notified to enum action_tasks + + libcrmcommon: add PCMK_ACTION_NOTIFY string constant + + libcrmcommon: add pcmk_action_notify to enum action_tasks + + libcrmcommon: add PCMK_ACTION_OFF string constant + + libcrmcommon: add PCMK_ACTION_ON string constant + + libcrmcommon: add PCMK_ACTION_ONE_OR_MORE string constant + + libcrmcommon: add pcmk_action_on_dc + + libcrmcommon: add pcmk_action_optional + + libcrmcommon: add PCMK_ACTION_PROMOTE string constant + + libcrmcommon: add pcmk_action_promote to enum action_tasks + + libcrmcommon: add PCMK_ACTION_PROMOTED string constant + + libcrmcommon: add pcmk_action_promoted to enum action_tasks + + libcrmcommon: add pcmk_action_pseudo + + libcrmcommon: add PCMK_ACTION_REBOOT string constant + + libcrmcommon: add PCMK_ACTION_RELOAD string constant + + libcrmcommon: add PCMK_ACTION_RELOAD_AGENT string constant + + libcrmcommon: add pcmk_action_reschedule + + libcrmcommon: add pcmk_action_runnable + + libcrmcommon: add PCMK_ACTION_RUNNING string constant + + libcrmcommon: add pcmk_action_shutdown to enum action_tasks + + libcrmcommon: add PCMK_ACTION_START string constant + + libcrmcommon: add pcmk_action_start to enum action_tasks + + libcrmcommon: add pcmk_action_started to enum action_tasks + + libcrmcommon: add PCMK_ACTION_STATUS string constant + + libcrmcommon: add PCMK_ACTION_STONITH string constant + + libcrmcommon: add PCMK_ACTION_STOP string constant + + libcrmcommon: add pcmk_action_stop to enum action_tasks + + libcrmcommon: add PCMK_ACTION_STOPPED string constant + + libcrmcommon: add pcmk_action_stopped to enum action_tasks + + libcrmcommon: add pcmk_action_t type + + libcrmcommon: add pcmk_action_unspecified to enum action_tasks + + libcrmcommon: add PCMK_ACTION_VALIDATE_ALL string constant + + libcrmcommon: add pcmk_assignment_methods_t type + + libcrmcommon: add PCMK_DEFAULT_ACTION_TIMEOUT_MS defined constant + + libcrmcommon: add pcmk_log_xml_as() + + libcrmcommon: add PCMK_META_CLONE_MAX string constant + + libcrmcommon: add PCMK_META_CLONE_MIN string constant + + libcrmcommon: add PCMK_META_CLONE_NODE_MAX string constant + + libcrmcommon: add PCMK_META_FAILURE_TIMEOUT string constant + + libcrmcommon: add PCMK_META_MIGRATION_THRESHOLD string constant + + libcrmcommon: add PCMK_META_PROMOTED_MAX string constant + + libcrmcommon: add PCMK_META_PROMOTED_NODE_MAX string constant + + libcrmcommon: add pcmk_multiply_active_block to enum rsc_recovery_type + + libcrmcommon: add pcmk_multiply_active_restart to enum rsc_recovery_type + + libcrmcommon: add pcmk_multiply_active_stop to enum rsc_recovery_type + + libcrmcommon: add pcmk_multiply_active_unexpected to enum rsc_recovery_type + + libcrmcommon: add PCMK_NODE_ATTR_TERMINATE string constant + + libcrmcommon: add pcmk_node_t type + + libcrmcommon: add pcmk_node_variant_cluster + + libcrmcommon: add pcmk_node_variant_remote + + libcrmcommon: add pcmk_no_action_flags + + libcrmcommon: add pcmk_no_quorum_demote + + libcrmcommon: add pcmk_no_quorum_fence + + libcrmcommon: add pcmk_no_quorum_freeze + + libcrmcommon: add pcmk_no_quorum_ignore + + libcrmcommon: add pcmk_no_quorum_stop + + libcrmcommon: add pcmk_on_fail_ban to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_block to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_demote to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_fence_node to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_ignore to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_reset_remote to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_restart to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_restart_container to enum action_fail_response + + libcrmcommon: add pcmk_on_fail_standby_node to action_fail_response + + libcrmcommon: add pcmk_on_fail_stop to enum action_fail_response + + libcrmcommon: add pcmk_probe_always to enum pe_discover_e + + libcrmcommon: add pcmk_probe_exclusive to enum pe_discover_e + + libcrmcommon: add pcmk_probe_never to enum pe_discover_e + + libcrmcommon: add pcmk_requires_fencing to enum rsc_start_requirement + + libcrmcommon: add pcmk_requires_nothing to enum rsc_start_requirement + + libcrmcommon: add pcmk_requires_quorum to enum rsc_start_requirement + + libcrmcommon: add pcmk_resource_t type + + libcrmcommon: add pcmk_role_promoted to enum rsc_role_e + + libcrmcommon: add pcmk_role_started to enum rsc_role_e + + libcrmcommon: add pcmk_role_stopped to enum rsc_role_e + + libcrmcommon: add pcmk_role_unknown to enum rsc_role_e + + libcrmcommon: add pcmk_role_unpromoted to enum rsc_role_e + + libcrmcommon: add pcmk_rsc_match_anon_basename + + libcrmcommon: add pcmk_rsc_match_basename + + libcrmcommon: add pcmk_rsc_match_clone_only + + libcrmcommon: add pcmk_rsc_match_current_node + + libcrmcommon: add pcmk_rsc_match_history + + libcrmcommon: add pcmk_rsc_methods_t type + + libcrmcommon: add pcmk_rsc_variant_bundle + + libcrmcommon: add pcmk_rsc_variant_clone + + libcrmcommon: add pcmk_rsc_variant_group + + libcrmcommon: add pcmk_rsc_variant_primitive + + libcrmcommon: add pcmk_rsc_variant_unknown + + libcrmcommon: add pcmk_scheduler_t type + + libcrmcommon: add pcmk_tag_t type + + libcrmcommon: add pcmk_ticket_t type + + libcrmcommon: add PCMK_XA_FORMAT string constant + + libcrmcommon: crm_ipc_send()'s message argument is now const + + libcrmcommon: deprecate action_demote in enum action_tasks + + libcrmcommon: deprecate action_demoted in enum action_tasks + + libcrmcommon: deprecate action_fail_block in enum action_fail_response + + libcrmcommon: deprecate action_fail_demote in enum action_fail_response + + libcrmcommon: deprecate action_fail_fence in enum action_fail_response + + libcrmcommon: deprecate action_fail_ignore in enum action_fail_response + + libcrmcommon: deprecate action_fail_migrate in enum action_fail_response + + libcrmcommon: deprecate action_fail_recover in enum action_fail_response + + libcrmcommon: deprecate action_fail_reset_remote in enum action_fail_response + + libcrmcommon: deprecate action_fail_standby in enum action_fail_response + + libcrmcommon: deprecate action_fail_stop in action_fail_response + + libcrmcommon: deprecate action_notified in enum action_tasks + + libcrmcommon: deprecate action_notify in enum action_tasks + + libcrmcommon: deprecate action_promote in enum action_tasks + + libcrmcommon: deprecate action_promoted in enum action_tasks + + libcrmcommon: deprecate action_restart_container in enum action_fail_response + + libcrmcommon: deprecate CRMD_ACTION_CANCEL string constant + + libcrmcommon: deprecate CRMD_ACTION_DELETE string constant + + libcrmcommon: deprecate CRMD_ACTION_DEMOTE string constant + + libcrmcommon: deprecate CRMD_ACTION_DEMOTED string constant + + libcrmcommon: deprecate CRMD_ACTION_METADATA string constant + + libcrmcommon: deprecate CRMD_ACTION_MIGRATE string constant + + libcrmcommon: deprecate CRMD_ACTION_MIGRATED string constant + + libcrmcommon: deprecate CRMD_ACTION_NOTIFIED string constant + + libcrmcommon: deprecate CRMD_ACTION_NOTIFY string constant + + libcrmcommon: deprecate CRMD_ACTION_PROMOTE string constant + + libcrmcommon: deprecate CRMD_ACTION_PROMOTED string constant + + libcrmcommon: deprecate CRMD_ACTION_RELOAD string constant + + libcrmcommon: deprecate CRMD_ACTION_RELOAD_AGENT string constant + + libcrmcommon: deprecate CRMD_ACTION_START string constant + + libcrmcommon: deprecate CRMD_ACTION_STARTED string constant + + libcrmcommon: deprecate CRMD_ACTION_STATUS string constant + + libcrmcommon: deprecate CRMD_ACTION_STOP string constant + + libcrmcommon: deprecate CRMD_ACTION_STOPPED string constant + + libcrmcommon: deprecate CRMD_METADATA_CALL_TIMEOUT defined constant + + libcrmcommon: deprecate crm_action_str() + + libcrmcommon: deprecate CRM_DEFAULT_OP_TIMEOUT_S string constant + + libcrmcommon: deprecate crm_element_name() + + libcrmcommon: deprecate CRM_OP_FENCE string constant + + libcrmcommon: deprecate CRM_OP_RELAXED_CLONE string constant + + libcrmcommon: deprecate CRM_OP_RELAXED_SET string constant + + libcrmcommon: deprecate crm_xml_replace() + + libcrmcommon: deprecate enum pe_link_state + + libcrmcommon: deprecate getDocPtr() + + libcrmcommon: deprecate monitor_rsc in enum action_tasks + + libcrmcommon: deprecate node_member + + libcrmcommon: deprecate node_remote + + libcrmcommon: deprecate no_action in enum action_tasks + + libcrmcommon: deprecate no_quorum_demote + + libcrmcommon: deprecate no_quorum_freeze + + libcrmcommon: deprecate no_quorum_ignore + + libcrmcommon: deprecate no_quorum_stop + + libcrmcommon: deprecate no_quorum_suicide + + libcrmcommon: deprecate pcmk_log_xml_impl() + + libcrmcommon: deprecate pcmk_scheduler_t localhost member + + libcrmcommon: deprecate pe_action_dangle + + libcrmcommon: deprecate pe_action_dc + + libcrmcommon: deprecate pe_action_dedup + + libcrmcommon: deprecate pe_action_dumped + + libcrmcommon: deprecate pe_action_have_node_attrs + + libcrmcommon: deprecate pe_action_implied_by_stonith + + libcrmcommon: deprecate pe_action_migrate_runnable + + libcrmcommon: deprecate pe_action_optional + + libcrmcommon: deprecate pe_action_print_always + + libcrmcommon: deprecate pe_action_processed + + libcrmcommon: deprecate pe_action_pseudo + + libcrmcommon: deprecate pe_action_requires_any + + libcrmcommon: deprecate pe_action_reschedule + + libcrmcommon: deprecate pe_action_runnable + + libcrmcommon: deprecate pe_action_tracking + + libcrmcommon: deprecate pe_clone + + libcrmcommon: deprecate pe_container + + libcrmcommon: deprecate pe_discover_always in enum pe_discover_e + + libcrmcommon: deprecate pe_discover_exclusive in enum pe_discover_e + + libcrmcommon: deprecate pe_discover_never in enum pe_discover_e + + libcrmcommon: deprecate pe_find_anon + + libcrmcommon: deprecate pe_find_any + + libcrmcommon: deprecate pe_find_clone + + libcrmcommon: deprecate pe_find_current + + libcrmcommon: deprecate pe_find_inactive + + libcrmcommon: deprecate pe_find_renamed + + libcrmcommon: deprecate pe_group + + libcrmcommon: deprecate pe_native + + libcrmcommon: deprecate pe_unknown + + libcrmcommon: deprecate recovery_block in enum rsc_recovery_type + + libcrmcommon: deprecate recovery_stop_only in enum rsc_recovery_type + + libcrmcommon: deprecate recovery_stop_start in enum rsc_recovery_type + + libcrmcommon: deprecate recovery_stop_unexpected in enum rsc_recovery_type + + libcrmcommon: deprecate RSC_CANCEL string constant + + libcrmcommon: deprecate RSC_DELETE string constant + + libcrmcommon: deprecate RSC_DEMOTE string constant + + libcrmcommon: deprecate RSC_DEMOTED string constant + + libcrmcommon: deprecate RSC_METADATA string constant + + libcrmcommon: deprecate RSC_MIGRATE string constant + + libcrmcommon: deprecate RSC_MIGRATED string constant + + libcrmcommon: deprecate RSC_NOTIFIED string constant + + libcrmcommon: deprecate RSC_NOTIFY string constant + + libcrmcommon: deprecate RSC_PROMOTE string constant + + libcrmcommon: deprecate RSC_PROMOTED string constant + + libcrmcommon: deprecate rsc_req_nothing in enum rsc_start_requirement + + libcrmcommon: deprecate rsc_req_quorum in enum rsc_start_requirement + + libcrmcommon: deprecate rsc_req_stonith in enum rsc_start_requirement + + libcrmcommon: deprecate RSC_ROLE_PROMOTED in enum rsc_role_e + + libcrmcommon: deprecate RSC_ROLE_STARTED in enum rsc_role_e + + libcrmcommon: deprecate RSC_ROLE_STOPPED in enum rsc_role_e + + libcrmcommon: deprecate RSC_ROLE_UNKNOWN in enum rsc_role_e + + libcrmcommon: deprecate RSC_ROLE_UNPROMOTED + + libcrmcommon: deprecate RSC_START string constant + + libcrmcommon: deprecate RSC_STARTED string constant + + libcrmcommon: deprecate RSC_STATUS string constant + + libcrmcommon: deprecate RSC_STOP string constant + + libcrmcommon: deprecate RSC_STOPPED string constant + + libcrmcommon: deprecate shutdown_crm in enum action_tasks + + libcrmcommon: deprecate started_rsc in enum action_tasks + + libcrmcommon: deprecate start_rsc in enum action_tasks + + libcrmcommon: deprecate stonith_node in enum action_tasks + + libcrmcommon: deprecate stopped_rsc in enum action_tasks + + libcrmcommon: deprecate stop_rsc in enum action_tasks + + libcrmcommon: deprecate TYPE() macro + + libcrmcommon: deprecate XML_ATTR_VERBOSE string constant + + libcrmcommon: deprecate XML_CIB_ATTR_SOURCE string constant + + libcrmcommon: deprecate XML_CIB_TAG_DOMAINS string constant + + libcrmcommon: deprecate xml_has_children() + + libcrmcommon: deprecate XML_NODE_EXPECTED string constant + + libcrmcommon: deprecate XML_NODE_IN_CLUSTER string constant + + libcrmcommon: deprecate XML_NODE_IS_PEER string constant + + libcrmcommon: deprecate XML_NODE_JOIN_STATE string constant + + libcrmcommon: deprecate XML_RSC_ATTR_FAIL_STICKINESS string constant + + libcrmcommon: deprecate XML_RSC_ATTR_FAIL_TIMEOUT string constant + + libcrmcommon: deprecate XML_RSC_ATTR_INCARNATION_MAX string constant + + libcrmcommon: deprecate XML_RSC_ATTR_INCARNATION_MIN string constant + + libcrmcommon: deprecate XML_RSC_ATTR_INCARNATION_NODEMAX string constant + + libcrmcommon: deprecate XML_RSC_ATTR_PROMOTED_MAX string constant + + libcrmcommon: deprecate XML_RSC_ATTR_PROMOTED_NODEMAX string constant + + libcrmcommon: deprecate XML_TAG_DIFF_ADDED string constant + + libcrmcommon: deprecate XML_TAG_DIFF_REMOVED string constant + + libcrmcommon: deprecate XML_TAG_FRAGMENT + + libcrmcommon: dump_xml_formatted()'s argument is now const + + libcrmcommon: dump_xml_formatted_with_text()'s argument is const + + libcrmcommon: dump_xml_unformatted()'s argument is now const + + libcrmcommon: save_xml_to_file()'s xml argument is now const + + libcrmcommon: validate_xml_verbose()'s xml_blob argument is const + + libcrmcommon: write_xml_fd()'s xml argument is now const + + libcrmcommon: write_xml_file()'s xml argument is now const + + libcrmcommon: xml_top argument of xpath_search() is now const + + libcrmcommon,libpe_rules,libpe_status: move enum pe_ordering, struct + pe_action_wrapper_s, struct pe_tag_s, struct pe_ticket_s, struct + resource_object_functions_s, enum node_type, enum pe_action_flags, enum + pe_discover_e, enum pe_find, enum pe_link_state, enum pe_obj_types, enum + pe_quorum_policy, enum pe_restart, struct pe_action_s, struct pe_node_s, + struct pe_node_shared_s, struct pe_resource_s, struct pe_working_set_s, + enum action_fail_response, enum action_tasks, enum pe_print_options, enum + rsc_recovery_type, enum rsc_role_e, and enum rsc_start_requirement to + libcrmcommon + + libpacemaker,libpe_rules,libpe_status: use pcmk_action_t instead of + pe_action_t, pcmk_node_t instead of pe_node_t, pcmk_resource_t instead of + pe_resource_t, and pcmk_scheduler_t instead of pe_working_set_t in all API + structs and functions + + libpacemaker: add pcmk_list_alternatives(), pcmk_list_providers(), + pcmk_list_standards(), and pcmk_list_agents() for functionality equivalent + to crm_resource --list-ocf-alternatives, --list-ocf-providers, + --list-standards, and --list-agents + + libpe_rules,libpe_status: deprecate pe_action_t type + + libpe_rules,libpe_status: deprecate pe_action_wrapper_t + + libpe_rules,libpe_status: deprecate pe_node_t type + + libpe_rules,libpe_status: deprecate pe_resource_t type + + libpe_rules,libpe_status: deprecate pe_tag_t + + libpe_rules,libpe_status: deprecate pe_ticket_t + + libpe_rules,libpe_status: deprecate pe_working_set_t type + + libpe_rules,libpe_status: deprecate resource_alloc_functions_t type + + libpe_rules,libpe_status: deprecate resource_object_functions_t + + libpe_status,libpe_rules: deprecate enum pe_ordering and all its values + + libpe_status,libpe_rules: deprecate RSC_ROLE_MAX + + libpe_status,libpe_rules: deprecate RSC_ROLE_PROMOTED_LEGACY_S string constant + + libpe_status,libpe_rules: deprecate RSC_ROLE_PROMOTED_S string constant + + libpe_status,libpe_rules: deprecate RSC_ROLE_STARTED_S string constant + + libpe_status,libpe_rules: deprecate RSC_ROLE_STOPPED_S string constant + + libpe_status,libpe_rules: deprecate RSC_ROLE_UNKNOWN_S + + libpe_status,libpe_rules: deprecate RSC_ROLE_UNPROMOTED_LEGACY_S string constant + + libpe_status,libpe_rules: deprecate RSC_ROLE_UNPROMOTED_S string constant + + libpe_status: deprecate enum pe_check_parameters + + libpe_status: deprecate pe_flag_check_config + + libpe_status: deprecate pe_flag_concurrent_fencing + + libpe_status: deprecate pe_flag_enable_unfencing + + libpe_status: deprecate pe_flag_have_quorum + + libpe_status: deprecate pe_flag_have_remote_nodes + + libpe_status: deprecate pe_flag_have_status + + libpe_status: deprecate pe_flag_have_stonith_resource + + libpe_status: deprecate pe_flag_maintenance_mode + + libpe_status: deprecate pe_flag_no_compat + + libpe_status: deprecate pe_flag_no_counts + + libpe_status: deprecate pe_flag_quick_location + + libpe_status: deprecate pe_flag_sanitized + + libpe_status: deprecate pe_flag_show_scores + + libpe_status: deprecate pe_flag_show_utilization + + libpe_status: deprecate pe_flag_shutdown_lock + + libpe_status: deprecate pe_flag_startup_fencing + + libpe_status: deprecate pe_flag_startup_probes + + libpe_status: deprecate pe_flag_start_failure_fatal + + libpe_status: deprecate pe_flag_stonith_enabled + + libpe_status: deprecate pe_flag_stop_action_orphans + + libpe_status: deprecate pe_flag_stop_everything + + libpe_status: deprecate pe_flag_stop_rsc_orphans + + libpe_status: deprecate pe_flag_symmetric_cluster + + libpe_status: deprecate pe_rsc_allow_migrate + + libpe_status: deprecate pe_rsc_allow_remote_remotes + + libpe_status: deprecate pe_rsc_assigning + + libpe_status: deprecate pe_rsc_block + + libpe_status: deprecate pe_rsc_critical + + libpe_status: deprecate pe_rsc_detect_loop + + libpe_status: deprecate pe_rsc_failed + + libpe_status: deprecate pe_rsc_failure_ignored + + libpe_status: deprecate pe_rsc_fence_device + + libpe_status: deprecate pe_rsc_is_container + + libpe_status: deprecate pe_rsc_maintenance + + libpe_status: deprecate pe_rsc_managed + + libpe_status: deprecate pe_rsc_merging + + libpe_status: deprecate pe_rsc_needs_fencing + + libpe_status: deprecate pe_rsc_needs_quorum + + libpe_status: deprecate pe_rsc_needs_unfencing + + libpe_status: deprecate pe_rsc_notify + + libpe_status: deprecate pe_rsc_orphan + + libpe_status: deprecate pe_rsc_orphan_container_filler + + libpe_status: deprecate pe_rsc_promotable + + libpe_status: deprecate pe_rsc_provisional + + libpe_status: deprecate pe_rsc_reload + + libpe_status: deprecate pe_rsc_replica_container + + libpe_status: deprecate pe_rsc_restarting + + libpe_status: deprecate pe_rsc_runnable + + libpe_status: deprecate pe_rsc_start_pending + + libpe_status: deprecate pe_rsc_stop + + libpe_status: deprecate pe_rsc_stop_unexpected + + libpe_status: deprecate pe_rsc_unique + * Wed May 24 2023 Ken Gaillot Pacemaker-2.1.6 - 1124 commits with 402 files changed, 25220 insertions(+), 14751 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 8cb5d3e..8cac498 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -18,8 +18,6 @@ default: build # directory if a relevant variable hasn't been defined. abs_srcdir ?= $(shell pwd) -GLIB_CFLAGS ?= $(pkg-config --cflags glib-2.0) - PACKAGE ?= pacemaker .PHONY: init @@ -40,8 +38,7 @@ USE_FILE = $(shell test -e rpm/Makefile || echo "-f Makefile.am") $(PACKAGE).spec chroot dirty export mock rc release rpm rpmlint srpm: $(MAKE) $(AM_MAKEFLAGS) -C rpm $(USE_FILE) "$@" -.PHONY: mock-% rpm-% spec-% srpm-% -mock-% rpm-% spec-% srpm-%: +mock-% rpm-% spec-% srpm-%: FORCE $(MAKE) $(AM_MAKEFLAGS) -C rpm $(USE_FILE) "$@" ## Development-related targets @@ -59,3 +56,6 @@ clang $(COVERAGE_TARGETS) $(COVERITY_TARGETS) cppcheck indent: COVLEVEL=$(COVLEVEL) \ CPPCHECK_ARGS=$(CPPCHECK_ARGS) \ -C devel "$@" + +.PHONY: FORCE +FORCE: diff --git a/INSTALL.md b/INSTALL.md index 5bd2975..e03c594 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -11,7 +11,7 @@ | | libuuid-devel | libuuid-devel | uuid-dev | | 0.27 or later | pkgconfig | pkgconfig | pkg-config | | 2.42.0 or later | glib2-devel | glib2-devel | libglib2.0-dev | -| | libxml2-devel | libxml2-devel | libxml2-dev | +| 2.6.0 or later | libxml2-devel | libxml2-devel | libxml2-dev | | | libxslt-devel | libxslt-devel | libxslt-dev | | | bzip2-devel | libbz2-devel | libbz2-dev | | 0.17.0 or later | libqb-devel | libqb-devel | libqb-dev | @@ -55,16 +55,12 @@ Also: | documentation (PDF) | | latexmk texlive texlive-capt-of texlive-collection-xetex texlive-fncychap texlive-framed texlive-multirow texlive-needspace texlive-tabulary texlive-titlesec texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xetex | texlive texlive-latex | texlive texlive-latex-extra | | annotated source code as HTML via "make global" | | global | global | global | | RPM packages via "make rpm" | 4.11 or later | rpm | rpm | (n/a) | -| unit tests | | libcmocka-devel | libcmocka-devel | libcmocka-dev | +| unit tests | 1.1.0 or later | libcmocka-devel | libcmocka-devel | libcmocka-dev | ## Optional testing dependencies * procps and psmisc (if running cts-exec, cts-fencing, or CTS) * valgrind (if running CTS valgrind tests) * python3-systemd (if using CTS on cluster nodes running systemd) -* rsync (if running CTS container tests) -* libvirt-daemon-driver-lxc (if running CTS container tests) -* libvirt-daemon-lxc (if running CTS container tests) -* libvirt-login-shell (if running CTS container tests) * nmap (if not specifying an IP address base) * oprofile (if running CTS profiling tests) * dlm (to log DLM debugging info after CTS tests) diff --git a/Makefile.am b/Makefile.am index fd9db82..c3e39b9 100644 --- a/Makefile.am +++ b/Makefile.am @@ -10,31 +10,31 @@ # This directory must be same as in configure.ac's AC_CONFIG_MACRO_DIR ACLOCAL_AMFLAGS = -I m4 -EXTRA_DIST = CONTRIBUTING.md \ - GNUmakefile \ - INSTALL.md \ - README.markdown \ - autogen.sh \ +EXTRA_DIST = CONTRIBUTING.md \ + GNUmakefile \ + INSTALL.md \ + README.markdown \ + autogen.sh \ m4/CC_CHECK_LDFLAGS.m4 \ m4/CHECK_ENUM_VALUE.m4 \ - m4/gnulib-cache.m4 \ - m4/gnulib-tool.m4 \ - m4/PKG_CHECK_VAR.m4 \ - m4/REQUIRE_HEADER.m4 \ + m4/gnulib-cache.m4 \ + m4/gnulib-tool.m4 \ + m4/PKG_CHECK_VAR.m4 \ + m4/REQUIRE_HEADER.m4 \ m4/version.m4 DISTCLEANFILES = config.status -MAINTAINERCLEANFILES = Makefile.in \ - aclocal.m4 \ - config.guess \ - config.sub \ - configure \ - depcomp \ - install-sh \ - ltmain.sh \ - missing \ - py-compile \ +MAINTAINERCLEANFILES = Makefile.in \ + aclocal.m4 \ + config.guess \ + config.sub \ + configure \ + depcomp \ + install-sh \ + ltmain.sh \ + missing \ + py-compile \ test-driver # Don't try to install files outside build directory for "make distcheck". @@ -45,13 +45,28 @@ AM_DISTCHECK_CONFIGURE_FLAGS = --prefix="$$dc_install_base/usr" \ --with-systemdsystemunitdir="$$dc_install_base$(systemdsystemunitdir)" # Only these will get built with a plain "make" -CORE = replace include lib daemons tools xml po python cts rpm - -SUBDIRS = $(CORE) agents devel doc etc maint tests +CORE = include \ + lib \ + daemons \ + tools \ + xml \ + po \ + python \ + cts \ + rpm + +SUBDIRS = $(CORE) \ + agents \ + devel \ + doc \ + etc \ + maint \ + tests AM_CPPFLAGS = -I$(top_srcdir)/include -doc_DATA = README.markdown COPYING +doc_DATA = README.markdown \ + COPYING licensedir = $(docdir)/licenses/ dist_license_DATA = $(wildcard licenses/*) @@ -67,6 +82,7 @@ DAEMON_R_DIRS = $(CRM_CONFIG_DIR) \ DAEMON_RW_DIRS = $(CRM_BUNDLE_DIR) \ $(CRM_LOG_DIR) +.PHONY: core core: @echo "Building only core components and tests: $(CORE)" @for subdir in $(CORE); do \ @@ -74,6 +90,7 @@ core: $(MAKE) $(AM_MAKEFLAGS) -C $$subdir all || exit 1; \ done +.PHONY: core-clean core-clean: @echo "Cleaning only core components and tests: $(CORE)" @for subdir in $(CORE); do \ @@ -81,6 +98,7 @@ core-clean: $(MAKE) $(AM_MAKEFLAGS) -C $$subdir clean || exit 1; \ done +.PHONY: install-exec-local install-exec-local: for DIR in $(ROOT_DIRS) $(DAEMON_R_DIRS); do \ $(INSTALL) -d -m 750 "$(DESTDIR)/$$DIR"; \ @@ -96,18 +114,25 @@ install-exec-local: done # Remove created directories only if they're empty +.PHONY: uninstall-hook uninstall-hook: -for DIR in $(ROOT_DIRS) $(DAEMON_R_DIRS) $(DAEMON_RW_DIRS); do \ rmdir "$(DESTDIR)/$$DIR"; \ done +.PHONY: clean-generic clean-generic: -rm -f *.tar.bz2 *.sed PACKAGE ?= pacemaker +.PHONY: clean-local clean-local: -rm -f $(builddir)/$(PACKAGE)-*.tar.gz + -if [ "x$(top_srcdir)" != "x$(top_builddir)" ]; then \ + rm -rf $(top_builddir)/python; \ + fi +.PHONY: distclean-local distclean-local: -rm -rf libltdl autom4te.cache diff --git a/agents/Makefile.am b/agents/Makefile.am index 3cbd7c6..af0d970 100644 --- a/agents/Makefile.am +++ b/agents/Makefile.am @@ -9,4 +9,6 @@ include $(top_srcdir)/mk/common.mk -SUBDIRS = alerts ocf stonith +SUBDIRS = alerts \ + ocf \ + stonith diff --git a/agents/alerts/Makefile.am b/agents/alerts/Makefile.am index fdb294f..a3fe891 100644 --- a/agents/alerts/Makefile.am +++ b/agents/alerts/Makefile.am @@ -10,6 +10,4 @@ include $(top_srcdir)/mk/common.mk samplesdir = $(datadir)/$(PACKAGE)/alerts/ -dist_samples_DATA = alert_file.sh.sample \ - alert_smtp.sh.sample \ - alert_snmp.sh.sample +dist_samples_DATA = $(wildcard *.sample) diff --git a/agents/ocf/HealthCPU.in b/agents/ocf/HealthCPU.in index 4a8e7c3..14e4b07 100755 --- a/agents/ocf/HealthCPU.in +++ b/agents/ocf/HealthCPU.in @@ -67,6 +67,15 @@ the #health-cpu will go red if the %idle of the CPU falls below 10%. + + +The time to wait (dampening) in seconds for further changes before writing + +The time to wait (dampening) in seconds for further changes +before writing + + + @@ -117,16 +126,16 @@ healthcpu_monitor() { if [ $IDLE -lt ${OCF_RESKEY_red_limit} ] ; then # echo "System state RED!" - attrd_updater -n "#health-cpu" -U "red" -d "30s" + attrd_updater -n "#health-cpu" -B "red" -d "${OCF_RESKEY_dampening}" return $OCF_SUCCESS fi if [ $IDLE -lt ${OCF_RESKEY_yellow_limit} ] ; then # echo "System state yellow." - attrd_updater -n "#health-cpu" -U "yellow" -d "30s" + attrd_updater -n "#health-cpu" -B "yellow" -d "${OCF_RESKEY_dampening}" else # echo "System state green." - attrd_updater -n "#health-cpu" -U "green" -d "30s" + attrd_updater -n "#health-cpu" -B "green" -d "${OCF_RESKEY_dampening}" fi return $OCF_SUCCESS @@ -136,8 +145,7 @@ healthcpu_monitor() { } healthcpu_reload_agent() { - # No action required - :; + return $OCF_SUCCESS } healthcpu_validate() { @@ -188,6 +196,9 @@ fi if [ -z "${OCF_RESKEY_yellow_limit}" ] ; then OCF_RESKEY_yellow_limit=50 fi +if [ -z "${OCF_RESKEY_dampening}" ]; then + OCF_RESKEY_dampening="30s" +fi case "$__OCF_ACTION" in meta-data) meta_data @@ -195,7 +206,7 @@ meta-data) meta_data ;; start) healthcpu_start;; stop) healthcpu_stop;; -monitor) healthcpu_monitor;; +monitor) healthcpu_validate && healthcpu_monitor;; reload-agent) healthcpu_reload_agent;; validate-all) healthcpu_validate;; usage|help) healthcpu_usage diff --git a/agents/ocf/HealthSMART.in b/agents/ocf/HealthSMART.in index b6edac2..b2f37de 100755 --- a/agents/ocf/HealthSMART.in +++ b/agents/ocf/HealthSMART.in @@ -139,25 +139,25 @@ check_temperature() { if [ $1 -lt ${lower_red_limit} ] ; then ocf_log info "Drive ${DRIVE} ${DEVICE} too cold: ${1} C" - attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}" + attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" return 1 fi if [ $1 -gt ${upper_red_limit} ] ; then ocf_log info "Drive ${DRIVE} ${DEVICE} too hot: ${1} C" - attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}" + attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" return 1 fi if [ $1 -lt ${lower_yellow_limit} ] ; then ocf_log info "Drive ${DRIVE} ${DEVICE} quite cold: ${1} C" - attrd_updater -n "#health-smart" -U "yellow" -d "${OCF_RESKEY_dampen}" + attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}" return 1 fi if [ $1 -gt ${upper_yellow_limit} ] ; then ocf_log info "Drive ${DRIVE} ${DEVICE} quite hot: ${1} C" - attrd_updater -n "#health-smart" -U "yellow" -d "${OCF_RESKEY_dampen}" + attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}" return 1 fi } @@ -244,7 +244,7 @@ HealthSMART_start() { } HealthSMART_stop() { - attrd_updater -D -n "#health-smart" -d "${OCF_RESKEY_dampen}" + attrd_updater -D -n "#health-smart" rm "${OCF_RESKEY_state}" @@ -278,7 +278,7 @@ HealthSMART_monitor() { # Check overall S.M.A.R.T. status "${OCF_RESKEY_smartctl}" -d "${DEVICE}" -H ${DRIVE} | grep -q "SMART overall-health self-assessment test result: PASSED" if [ $? -ne 0 ]; then - attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}" + attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" return $OCF_SUCCESS fi @@ -290,7 +290,7 @@ HealthSMART_monitor() { else "${OCF_RESKEY_smartctl}" -H "${DRIVE}" | grep -q "SMART overall-health self-assessment test result: PASSED" if [ $? -ne 0 ]; then - attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}" + attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" return $OCF_SUCCESS fi @@ -301,7 +301,7 @@ HealthSMART_monitor() { fi done - attrd_updater -n "#health-smart" -U "green" -d "${OCF_RESKEY_dampen}" + attrd_updater -n "#health-smart" -B "green" -d "${OCF_RESKEY_dampen}" return $OCF_SUCCESS fi diff --git a/agents/ocf/Makefile.am b/agents/ocf/Makefile.am index 823e67e..0b18bb1 100644 --- a/agents/ocf/Makefile.am +++ b/agents/ocf/Makefile.am @@ -27,8 +27,11 @@ ocf_SCRIPTS = ClusterMon \ if BUILD_XML_HELP -man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7) $(dist_ocf_SCRIPTS:%=ocf_pacemaker_%.7) -DBOOK_OPTS = --stringparam command.prefix ocf_pacemaker_ --stringparam variable.prefix OCF_RESKEY_ --param man.vol 7 +man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7) \ + $(dist_ocf_SCRIPTS:%=ocf_pacemaker_%.7) +DBOOK_OPTS = --stringparam command.prefix ocf_pacemaker_ \ + --stringparam variable.prefix OCF_RESKEY_ \ + --param man.vol 7 ocf_pacemaker_%.xml: % $(AM_V_GEN)OCF_FUNCTIONS=/dev/null OCF_ROOT=$(OCF_ROOT_DIR) $(abs_builddir)/$< meta-data > $@ @@ -50,4 +53,6 @@ validate: all | xmllint --noout --relaxng $(RNG) - || break; \ done -CLEANFILES = $(man7_MANS) $(ocf_SCRIPTS:%=%.xml) $(dist_ocf_SCRIPTS:%=%.xml) +CLEANFILES = $(man7_MANS) \ + $(ocf_SCRIPTS:%=%.xml) \ + $(dist_ocf_SCRIPTS:%=%.xml) diff --git a/agents/ocf/ifspeed.in b/agents/ocf/ifspeed.in index 5fbaf89..8c07c3d 100755 --- a/agents/ocf/ifspeed.in +++ b/agents/ocf/ifspeed.in @@ -123,7 +123,7 @@ Can be used to tune how big attribute value will be. - + The time to wait (dampening) for further changes to occur. @@ -147,6 +147,7 @@ Log more verbosely. + END @@ -154,7 +155,7 @@ END usage() { cat <= 2.70) m4_version_prereq([2.70], [:], [AC_PROG_CC_STDC]) -dnl C++ is not needed for build, just maintainer utilities -AC_PROG_CXX +# cc_supports_flag +# Return success if the C compiler supports the given flag +cc_supports_flag() { + local CFLAGS="-Werror $@" + AC_MSG_CHECKING([whether $CC supports $@]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], + [RC=0; AC_MSG_RESULT([yes])], + [RC=1; AC_MSG_RESULT([no])]) + return $RC +} + +# cc_temp_flags +# Use the given flags for subsequent C compilation. These can be reverted to +# what was used previously with cc_restore_flags. This allows certain tests to +# use specific flags without affecting anything else. +cc_temp_flags() { + ac_save_CFLAGS="$CFLAGS" + CFLAGS="$*" +} + +# cc_restore_flags +# Restore C compiler flags to what they were before the last cc_temp_flags +# call. +cc_restore_flags() { + CFLAGS=$ac_save_CFLAGS +} + +# Check for fatal warning support +AS_IF([test $enable_fatal_warnings -ne $DISABLED dnl + && test x"$GCC" = x"yes" && cc_supports_flag -Werror], + [WERROR="-Werror"], + [ + WERROR="" + AS_CASE([$enable_fatal_warnings], + [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])], + [$OPTIONAL], [enable_fatal_warnings=$DISABLED]) + ]) dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that @@ -103,8 +121,15 @@ gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT -# --enable-new-dtags: Use RUNPATH instead of RPATH. -# It is necessary to have this done before libtool does linker detection. +AC_CHECK_SIZEOF(long) + + +dnl ============================================== +dnl Linker checks +dnl ============================================== + +# Check whether linker supports --enable-new-dtags to use RUNPATH instead of +# RPATH. It is necessary to do this before libtool does linker detection. # See also: https://github.com/kronosnet/kronosnet/issues/107 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags], [AM_LDFLAGS=-Wl,--enable-new-dtags], @@ -117,65 +142,14 @@ LT_INIT([dlopen]) LDFLAGS="$saved_LDFLAGS" LTDL_INIT([convenience]) -AC_TYPE_SIZE_T -AC_CHECK_SIZEOF(char) -AC_CHECK_SIZEOF(short) -AC_CHECK_SIZEOF(int) -AC_CHECK_SIZEOF(long) -AC_CHECK_SIZEOF(long long) -dnl =============================================== -dnl Helpers -dnl =============================================== -cc_supports_flag() { - local CFLAGS="-Werror $@" - AC_MSG_CHECKING([whether $CC supports $@]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], - [RC=0; AC_MSG_RESULT([yes])], - [RC=1; AC_MSG_RESULT([no])]) - return $RC -} - -# Some tests need to use their own CFLAGS - -cc_temp_flags() { - ac_save_CFLAGS="$CFLAGS" - CFLAGS="$*" -} - -cc_restore_flags() { - CFLAGS=$ac_save_CFLAGS -} - -# expand_path_option $path_variable_name $default -expand_path_option() { - # The first argument is the variable *name* (not value) - ac_path_varname="$1" - - # Get the original value of the variable - ac_path_value=$(eval echo "\${${ac_path_varname}}") - - # Expand any literal variable expressions in the value so that we don't - # end up with something like '${prefix}' in #defines etc. - # - # Autoconf deliberately leaves values unexpanded to allow overriding - # the configure script choices in make commands (for example, - # "make exec_prefix=/foo install"). No longer being able to do this seems - # like no great loss. - eval ac_path_value=$(eval echo "${ac_path_value}") +dnl ============================================== +dnl Define configure options +dnl ============================================== - # Use (expanded) default if necessary - AS_IF([test x"${ac_path_value}" = x""], - [eval ac_path_value=$(eval echo "$2")]) - - # Require a full path - AS_CASE(["$ac_path_value"], - [/*], [eval ${ac_path_varname}="$ac_path_value"], - [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])] - ) -} - -# yes_no_try $user_response $default +# yes_no_try +# Map a yes/no/try user selection to $REQUIRED for yes, $DISABLED for no, and +# $OPTIONAL for try. DISABLED=0 REQUIRED=1 OPTIONAL=2 @@ -190,17 +164,9 @@ yes_no_try() { AC_MSG_ERROR([Invalid option value "$value"]) } -check_systemdsystemunitdir() { - AC_MSG_CHECKING([which system unit file directory to use]) - PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) - AC_MSG_RESULT([${systemdsystemunitdir}]) - test x"$systemdsystemunitdir" != x"" - return $? -} - # -# Fix the defaults of certain built-in variables so they can be used in our -# custom argument defaults +# Fix the defaults of certain built-in variables so they can be used in the +# defaults for our custom arguments # AC_MSG_NOTICE([Sanitizing prefix: ${prefix}]) @@ -234,12 +200,12 @@ AS_CASE([$libdir], AC_MSG_RESULT([$libdir]) ]) -dnl =============================================== -dnl Configure Options -dnl =============================================== +# Start a list of optional features this build supports +PCMK_FEATURES="" -dnl Actual library checks come later, but pkg-config can be used here to grab -dnl external values to use as defaults for configure options +dnl This section should include only the definition of configure script +dnl options and determining their values. Processing should be done later when +dnl possible, other than what's needed to determine values and defaults. dnl Per the autoconf docs, --enable-*/--disable-* options should control dnl features inherent to Pacemaker, while --with-*/--without-* options should @@ -299,13 +265,6 @@ AC_ARG_ENABLE([compat-2.0], ) yes_no_try "$enable_compat_2_0" "no" enable_compat_2_0=$? -AS_IF([test $enable_compat_2_0 -ne $DISABLED], - [ - AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1], - [Keep certain output compatible with 2.0 release series]) - PCMK_FEATURES="$PCMK_FEATURES compat-2.0" - ] -) # Add an option to create symlinks at the pre-2.0.0 daemon name locations, so # that users and tools can continue to invoke those names directly (e.g., for @@ -316,7 +275,6 @@ AC_ARG_ENABLE([legacy-links], ) yes_no_try "$enable_legacy_links" "no" enable_legacy_links=$? -AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED]) # AM_GNU_GETTEXT calls AM_NLS which defines the nls option, but it defaults # to enabled. We override the definition of AM_NLS to flip the default and mark @@ -330,12 +288,9 @@ AC_DEFUN([AM_NLS], AC_MSG_RESULT([$USE_NLS]) AC_SUBST([USE_NLS])] ) - AM_GNU_GETTEXT([external]) AM_GNU_GETTEXT_VERSION([0.18]) -AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"]) - dnl --with-* options: external software support, and custom locations dnl This argument is defined via an M4 macro so default can be a variable @@ -348,31 +303,14 @@ AC_DEFUN([VERSION_ARG], ) VERSION_ARG(VERSION_NUMBER) -# Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case -# the user used --with-version. Unfortunately, this can only affect the -# substitution variables and later uses in this file, not the config.h -# constants, so we have to be careful to use only PACEMAKER_VERSION in C code. -PACKAGE_VERSION=$PACEMAKER_VERSION -VERSION=$PACEMAKER_VERSION - -# Detect highest API schema version (use git if available to list managed RNGs, -# in case there are leftover schema files from an earlier build of a different -# version, otherwise check all RNGs) -API_VERSION=$({ git ls-files xml/api/*.rng 2>/dev/null || ls -1 xml/api/*.rng ; } dnl - | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' | sort -V | tail -1) -AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"], - [Highest API schema version]) - -# Re-run configure at next make if any RNG changes, to re-detect highest -AC_SUBST([CONFIG_STATUS_DEPENDENCIES], - [$(echo '$(wildcard $(top_srcdir)/xml/api/*.rng)')]) - CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) +AS_IF([test x"${CRM_DAEMON_USER}" = x""], + [CRM_DAEMON_USER="hacluster"]) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], @@ -380,6 +318,8 @@ AC_ARG_WITH([daemon-group], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) +AS_IF([test x"${CRM_DAEMON_GROUP}" = x""], + [CRM_DAEMON_GROUP="haclient"]) BUG_URL="" AC_ARG_WITH([bug-url], @@ -388,6 +328,8 @@ AC_ARG_WITH([bug-url], @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))], [ BUG_URL="$withval" ] ) +AS_IF([test x"${BUG_URL}" = x""], + [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"]) dnl --with-* options: features @@ -422,9 +364,6 @@ AS_CASE([$with_concurrent_fencing_default], [true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"], [AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])] ) -AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT], - ["$with_concurrent_fencing_default"], - [Default value for concurrent-fencing cluster option]) AC_ARG_WITH([sbd-sync-default], [AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([ @@ -437,9 +376,6 @@ AS_CASE([$with_sbd_sync_default], [true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"], [AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])] ) -AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT], - [$with_sbd_sync_default], - [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable]) AC_ARG_WITH([resource-stickiness-default], [AS_HELP_STRING([--with-resource-stickiness-default], @@ -451,9 +387,6 @@ AS_CASE([$with_resource_stickiness_default], [*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])], [PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"] ) -AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT], - [$with_resource_stickiness_default], - [Default value for resource-stickiness resource meta-attribute]) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], @@ -462,7 +395,7 @@ AC_ARG_WITH([corosync], yes_no_try "$with_corosync" "try" with_corosync=$? -dnl Get default from corosync if possible. +dnl Get default from Corosync if possible PKG_CHECK_VAR([PCMK__COROSYNC_CONF], [corosync], [corosysconfdir], [PCMK__COROSYNC_CONF="$PCMK__COROSYNC_CONF/corosync.conf"], [PCMK__COROSYNC_CONF="${sysconfdir}/corosync/corosync.conf"]) @@ -553,10 +486,8 @@ AC_ARG_WITH([ocfdir], /usr/lib/ocf@:>@]))], [ OCF_ROOT_DIR="$withval" ] ) -AC_SUBST(OCF_ROOT_DIR) -AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"], - [OCF root directory for resource agents and libraries]) +dnl Get default from resource-agents if possible PKG_CHECK_VAR([OCF_RA_PATH], [resource-agents], [ocfrapath], [], [OCF_RA_PATH="$OCF_ROOT_DIR/resource.d"]) AC_ARG_WITH([ocfrapath], @@ -566,7 +497,6 @@ AC_ARG_WITH([ocfrapath], OCFDIR/resource.d@:>@]))], [ OCF_RA_PATH="$withval" ] ) -AC_SUBST(OCF_RA_PATH) OCF_RA_INSTALL_DIR="$OCF_ROOT_DIR/resource.d" AC_ARG_WITH([ocfrainstalldir], @@ -575,7 +505,6 @@ AC_ARG_WITH([ocfrainstalldir], @<:@OCFDIR/resource.d@:>@]))], [ OCF_RA_INSTALL_DIR="$withval" ] ) -AC_SUBST(OCF_RA_INSTALL_DIR) dnl Get default from fence-agents if available PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], @@ -587,7 +516,6 @@ AC_ARG_WITH([fence-bindir], package if available otherwise SBINDIR@:>@]))], [ PCMK__FENCE_BINDIR="$withval" ] ) -AC_SUBST(PCMK__FENCE_BINDIR) dnl --with-* options: non-production testing @@ -620,31 +548,198 @@ AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executabl AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) -dnl =============================================== -dnl General Processing -dnl =============================================== +dnl ============================================== +dnl Locate essential tools +dnl ============================================== -AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION", - [Version number of this Pacemaker build]) +PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" +export PATH -PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'` -AC_SUBST(PACKAGE_SERIES) +dnl Pacemaker's executable python scripts will invoke the python specified by +dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a +dnl built-in list with (unversioned) "python" having precedence. To configure +dnl Pacemaker to use a specific python interpreter version, define PYTHON +dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 + +dnl If PYTHON was specified, ensure it is an absolute path +AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])]) + +dnl Require a minimum Python version +AM_PATH_PYTHON([3.4]) AC_PROG_LN_S AC_PROG_MKDIR_P -# Check for fatal warning support -AS_IF([test $enable_fatal_warnings -ne $DISABLED && test x"$GCC" = x"yes" && cc_supports_flag -Werror], - [WERROR="-Werror"], +AC_PATH_PROG([GIT], [git], [false]) + +dnl Bash is needed for building man pages and running regression tests. +dnl We set "BASH_PATH" because "BASH" is already an environment variable. +REQUIRE_PROG([BASH_PATH], [bash]) + +AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) +AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) + + +dnl ============================================== +dnl Package and schema versioning +dnl ============================================== + +# Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case +# the user used --with-version. Unfortunately, this can only affect the +# substitution variables and later uses in this file, not the config.h +# constants, so we have to be careful to use only PACEMAKER_VERSION in C code. +PACKAGE_VERSION=$PACEMAKER_VERSION +VERSION=$PACEMAKER_VERSION + +AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION", + [Version number of this Pacemaker build]) + +AC_MSG_CHECKING([build version]) +AS_IF([test "$GIT" != "false" && test -d .git], [ - WERROR="" - AS_CASE([$enable_fatal_warnings], - [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])], - [$OPTIONAL], [ - AC_MSG_NOTICE([Compiler does not support fatal warnings]) - enable_fatal_warnings=$DISABLED - ]) + BUILD_VERSION=`"$GIT" log --pretty="format:%h" -n 1` + AC_MSG_RESULT([$BUILD_VERSION (git hash)]) + ], + [ + # The current directory name make a reasonable default + # Most generated archives will include the hash or tag + BASE=`basename $PWD` + BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` + AC_MSG_RESULT([$BUILD_VERSION (directory name)]) ]) +AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) +AC_SUBST(BUILD_VERSION) + +# schema_files +# List all manually edited RNG schemas (as opposed to auto-generated via make) +# in the given directory. Use git if available to list managed RNGs, in case +# there are leftover schema files from an earlier build of a different +# version. Otherwise, check all RNGs. +schema_files() { + local files="$("$GIT" ls-files "$1"/*.rng 2>/dev/null)" + + AS_IF([test x"$files" = x""], + [ + files="$(ls -1 "$1"/*.rng | grep -E -v \ + '/(pacemaker|api-result|crm_mon|versions)[^/]*\.rng')" + ]) + echo "$files" +} + +# latest_schema_version +# Determine highest RNG version in the given schema directory. +latest_schema_version() { + schema_files "$1" | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' dnl + | sort -V | tail -1 +} + +# schemas_for_make +# Like schema_files, but suitable for use in make variables. +schemas_for_make() { + local file + + for file in $(schema_files "$1"); do + AS_ECHO_N(["\$(top_srcdir)/$file "]) + done +} + +# Detect highest API schema version +API_VERSION=$(latest_schema_version "xml/api") +AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"], + [Highest API schema version]) + +# Detect highest CIB schema version +CIB_VERSION=$(latest_schema_version "xml") +AC_SUBST(CIB_VERSION) + +# Re-run configure at next make if schema files change, to re-detect versions +cib_schemas="$(schemas_for_make "xml")" +api_schemas="$(schemas_for_make "xml/api")" +CONFIG_STATUS_DEPENDENCIES="$cib_schemas $api_schemas" +AC_SUBST(CONFIG_STATUS_DEPENDENCIES) + + +dnl ============================================== +dnl Process simple options +dnl ============================================== + +AS_IF([test $enable_compat_2_0 -ne $DISABLED], + [ + AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1], + [Keep certain output compatible with 2.0 release series]) + PCMK_FEATURES="$PCMK_FEATURES compat-2.0" + ] +) + +AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED]) + +AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"]) + +AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT], + ["$with_concurrent_fencing_default"], + [Default value for concurrent-fencing cluster option]) + +AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT], + [$with_sbd_sync_default], + [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable]) + +AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT], + [$with_resource_stickiness_default], + [Default value for resource-stickiness resource meta-attribute]) + +AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [], + [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])]) +AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], + [GnuTLS cipher priorities]) +AC_SUBST(PCMK_GNUTLS_PRIORITIES) + +AC_SUBST(BUG_URL) +AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"], + [Where bugs should be reported]) + +AC_DEFINE_UNQUOTED([CRM_DAEMON_USER], ["$CRM_DAEMON_USER"], + [User to run Pacemaker daemons as]) +AC_SUBST(CRM_DAEMON_USER) + +AC_DEFINE_UNQUOTED([CRM_DAEMON_GROUP], ["$CRM_DAEMON_GROUP"], + [Group to run Pacemaker daemons as]) +AC_SUBST(CRM_DAEMON_GROUP) + + +dnl ============================================== +dnl Process file paths +dnl ============================================== + +# expand_path_option [] +# Given the name of a file path variable, expand any variable references +# inside it, use the specified default if it is not specified, and ensure it +# is a full path. +expand_path_option() { + # The first argument is the variable *name* (not value) + ac_path_varname="$1" + + # Get the original value of the variable + ac_path_value=$(eval echo "\${${ac_path_varname}}") + + # Expand any literal variable expressions in the value so that we don't + # end up with something like '${prefix}' in #defines etc. + # + # Autoconf deliberately leaves values unexpanded to allow overriding + # the configure script choices in make commands (for example, + # "make exec_prefix=/foo install"). No longer being able to do this seems + # like no great loss. + eval ac_path_value=$(eval echo "${ac_path_value}") + + # Use (expanded) default if necessary + AS_IF([test x"${ac_path_value}" = x""], + [eval ac_path_value=$(eval echo "$2")]) + + # Require a full path + AS_CASE(["$ac_path_value"], + [/*], [eval ${ac_path_varname}="$ac_path_value"], + [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])] + ) +} AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}]) AS_CASE([$INITDIR], @@ -670,6 +765,7 @@ expand_path_option exec_prefix expand_path_option bindir expand_path_option sbindir expand_path_option libexecdir +expand_path_option datarootdir expand_path_option datadir expand_path_option sysconfdir expand_path_option sharedstatedir @@ -680,10 +776,13 @@ expand_path_option oldincludedir expand_path_option infodir expand_path_option mandir -dnl Home-grown variables +AC_DEFUN([AC_DATAROOTDIR_CHECKED]) + +dnl Expand values of custom directory options expand_path_option localedir "${datadir}/locale" -AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], [Base directory for message catalogs]) +AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], + [Base directory for message catalogs]) AS_IF([test x"${runstatedir}" = x""], [runstatedir="${pcmk_runstatedir}"]) expand_path_option runstatedir "${localstatedir}/run" @@ -705,33 +804,88 @@ expand_path_option PCMK__COROSYNC_CONF "${sysconfdir}/corosync/corosync.conf" AC_SUBST(PCMK__COROSYNC_CONF) expand_path_option CRM_LOG_DIR "${localstatedir}/log/pacemaker" -AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) +AC_DEFINE_UNQUOTED([CRM_LOG_DIR], ["$CRM_LOG_DIR"], + [Location for Pacemaker log file]) AC_SUBST(CRM_LOG_DIR) expand_path_option CRM_BUNDLE_DIR "${localstatedir}/log/pacemaker/bundles" -AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) +AC_DEFINE_UNQUOTED([CRM_BUNDLE_DIR], ["$CRM_BUNDLE_DIR"], + [Location for Pacemaker bundle logs]) AC_SUBST(CRM_BUNDLE_DIR) expand_path_option PCMK__FENCE_BINDIR -AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR", +AC_SUBST(PCMK__FENCE_BINDIR) +AC_DEFINE_UNQUOTED([PCMK__FENCE_BINDIR], ["$PCMK__FENCE_BINDIR"], [Location for executable fence agents]) +expand_path_option OCF_ROOT_DIR +AC_SUBST(OCF_ROOT_DIR) +AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"], + [OCF root directory for resource agents and libraries]) + expand_path_option OCF_RA_PATH +AC_SUBST(OCF_RA_PATH) AC_DEFINE_UNQUOTED([OCF_RA_PATH], ["$OCF_RA_PATH"], [OCF directories to search for resource agents ]) -AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [], - [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])]) -AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], - [GnuTLS cipher priorities]) -AC_SUBST(PCMK_GNUTLS_PRIORITIES) +expand_path_option OCF_RA_INSTALL_DIR +AC_SUBST(OCF_RA_INSTALL_DIR) -AS_IF([test x"${BUG_URL}" = x""], - [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"]) -AC_SUBST(BUG_URL) -AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"], - [Where bugs should be reported]) +# Derived paths + +CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" +AC_DEFINE_UNQUOTED([CRM_SCHEMA_DIRECTORY], ["$CRM_SCHEMA_DIRECTORY"], + [Location for the Pacemaker Relax-NG Schema]) +AC_SUBST(CRM_SCHEMA_DIRECTORY) +CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" +AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"], + [Directory Pacemaker daemons should change to (without systemd, core files will go here)]) +AC_SUBST(CRM_CORE_DIR) + +CRM_PACEMAKER_DIR="${localstatedir}/lib/pacemaker" +AC_DEFINE_UNQUOTED([CRM_PACEMAKER_DIR], ["$CRM_PACEMAKER_DIR"], + [Location to store directory produced by Pacemaker daemons]) +AC_SUBST(CRM_PACEMAKER_DIR) + +CRM_BLACKBOX_DIR="${localstatedir}/lib/pacemaker/blackbox" +AC_DEFINE_UNQUOTED([CRM_BLACKBOX_DIR], ["$CRM_BLACKBOX_DIR"], + [Where to keep blackbox dumps]) +AC_SUBST(CRM_BLACKBOX_DIR) + +PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" +AC_DEFINE_UNQUOTED([PE_STATE_DIR], ["$PE_STATE_DIR"], + [Where to keep scheduler outputs]) +AC_SUBST(PE_STATE_DIR) + +CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" +AC_DEFINE_UNQUOTED([CRM_CONFIG_DIR], ["$CRM_CONFIG_DIR"], + [Where to keep configuration files]) +AC_SUBST(CRM_CONFIG_DIR) + +CRM_DAEMON_DIR="${libexecdir}/pacemaker" +AC_DEFINE_UNQUOTED([CRM_DAEMON_DIR], ["$CRM_DAEMON_DIR"], + [Location for Pacemaker daemons]) +AC_SUBST(CRM_DAEMON_DIR) + +CRM_STATE_DIR="${runstatedir}/crm" +AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], + [Where to keep state files and sockets]) +AC_SUBST(CRM_STATE_DIR) + +CRM_RSCTMP_DIR="${runstatedir}/resource-agents" +AC_DEFINE_UNQUOTED([CRM_RSCTMP_DIR], ["$CRM_RSCTMP_DIR"], + [Where resource agents should keep state files]) +AC_SUBST(CRM_RSCTMP_DIR) + +PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" +AC_DEFINE_UNQUOTED([PACEMAKER_CONFIG_DIR], ["$PACEMAKER_CONFIG_DIR"], + [Where to keep configuration files like authkey]) +AC_SUBST(PACEMAKER_CONFIG_DIR) + +AC_DEFINE_UNQUOTED([SBIN_DIR], ["$sbindir"], [Location for system binaries]) + +# Warn about any directories that don't exist (which may be OK) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR localedir @@ -741,6 +895,10 @@ do [AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])]) done +dnl =============================================== +dnl General Processing +dnl =============================================== + us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ @@ -786,6 +944,10 @@ AS_IF([test -z "${us_auth}"], [ dnl OS-based decision-making is poor autotools practice; feature-based dnl mechanisms are strongly preferred. Keep this section to a bare minimum; dnl regard as a "necessary evil". + +dnl Set host_os and host_cpu +AC_CANONICAL_HOST + INIT_EXT="" PROCFS=0 dnl Solaris and some *BSD versions support procfs but not files we need @@ -810,24 +972,10 @@ AS_CASE(["$host_cpu"], ]) ]) -dnl =============================================== -dnl Program Paths -dnl =============================================== - -PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" -export PATH - -dnl Pacemaker's executable python scripts will invoke the python specified by -dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a -dnl built-in list with (unversioned) "python" having precedence. To configure -dnl Pacemaker to use a specific python interpreter version, define PYTHON -dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 - -dnl Ensure PYTHON is an absolute path -AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])]) -dnl Require a minimum Python version -AM_PATH_PYTHON([3.4]) +dnl ============================================== +dnl Documentation build dependencies and checks +dnl ============================================== AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) @@ -836,15 +984,6 @@ AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) -dnl Bash is needed for building man pages and running regression tests. -dnl BASH is already an environment variable, so use something else. -AC_PATH_PROG([BASH_PATH], [bash]) -AS_IF([test x"${BASH_PATH}" != x""], [], - [AC_MSG_FAILURE([Could not find required build tool bash])]) - -AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) -AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) - AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") AS_IF([test x"${HELP2MAN}" != x""], [PCMK_FEATURES="$PCMK_FEATURES generated-manpages"]) @@ -913,28 +1052,6 @@ AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])], ]) AC_SUBST([GETOPT_PATH]) -dnl ======================================================================== -dnl checks for library functions to replace them -dnl -dnl NoSuchFunctionName: -dnl is a dummy function which no system supplies. It is here to make -dnl the system compile semi-correctly on OpenBSD which doesn't know -dnl how to create an empty archive -dnl -dnl scandir: Only on BSD. -dnl System-V systems may have it, but hidden and/or deprecated. -dnl A replacement function is supplied for it. -dnl -dnl strerror: returns a string that corresponds to an errno. -dnl A replacement function is supplied for it. -dnl -dnl strnlen: is a gnu function similar to strlen, but safer. -dnl We wrote a tolerably-fast replacement function for it. -dnl -dnl strndup: is a gnu function similar to strdup, but safer. -dnl We wrote a tolerably-fast replacement function for it. - -AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup) dnl =============================================== dnl Libraries @@ -973,12 +1090,24 @@ AS_IF([test x"$ac_cv_lib_c_dlopen" = x"yes"], [LIBADD_DL=-ldl], [LIBADD_DL=${lt_cv_dlopen_libs}]) -PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], +PKG_CHECK_MODULES(LIBXML2, [libxml-2.0 >= 2.6.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) REQUIRE_LIB([xslt], [xsltApplyStylesheet]) +AC_MSG_CHECKING([whether __progname and __progname_full are available]) +AC_LINK_IFELSE([AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]], + [[__progname = "foo"; + __progname_full = "foo bar";]])], + [ + have_progname="yes" + AC_DEFINE(HAVE_PROGNAME, 1, + [Define to 1 if processes can change their name]) + ], + [have_progname="no"]) +AC_MSG_RESULT([$have_progname]) + dnl ======================================================================== dnl Headers dnl ======================================================================== @@ -1000,18 +1129,30 @@ AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h]) REQUIRE_HEADER([arpa/inet.h]) REQUIRE_HEADER([ctype.h]) REQUIRE_HEADER([dirent.h]) +REQUIRE_HEADER([dlfcn.h]) REQUIRE_HEADER([errno.h]) +REQUIRE_HEADER([fcntl.h]) +REQUIRE_HEADER([float.h]) REQUIRE_HEADER([glib.h]) REQUIRE_HEADER([grp.h]) +REQUIRE_HEADER([inttypes.h]) +REQUIRE_HEADER([libgen.h]) REQUIRE_HEADER([limits.h]) +REQUIRE_HEADER([locale.h]) REQUIRE_HEADER([netdb.h]) REQUIRE_HEADER([netinet/in.h]) REQUIRE_HEADER([netinet/ip.h], [ #include #include ]) +REQUIRE_HEADER([netinet/tcp.h]) REQUIRE_HEADER([pwd.h]) +REQUIRE_HEADER([regex.h]) +REQUIRE_HEADER([sched.h]) REQUIRE_HEADER([signal.h]) +REQUIRE_HEADER([stdarg.h]) +REQUIRE_HEADER([stdbool.h]) +REQUIRE_HEADER([stdint.h]) REQUIRE_HEADER([stdio.h]) REQUIRE_HEADER([stdlib.h]) REQUIRE_HEADER([string.h]) @@ -1024,8 +1165,10 @@ REQUIRE_HEADER([sys/socket.h]) REQUIRE_HEADER([sys/stat.h]) REQUIRE_HEADER([sys/time.h]) REQUIRE_HEADER([sys/types.h]) +REQUIRE_HEADER([sys/uio.h]) REQUIRE_HEADER([sys/utsname.h]) REQUIRE_HEADER([sys/wait.h]) +REQUIRE_HEADER([termios.h]) REQUIRE_HEADER([time.h]) REQUIRE_HEADER([unistd.h]) REQUIRE_HEADER([libxml/xpath.h]) @@ -1033,21 +1176,6 @@ REQUIRE_HEADER([libxslt/xslt.h]) cc_restore_flags -AC_CHECK_FUNCS([uuid_unparse], [], - [AC_MSG_FAILURE([Could not find required C function uuid_unparse()])]) - -AC_CACHE_CHECK([whether __progname and __progname_full are available], - [pf_cv_var_progname], - [AC_LINK_IFELSE( - [AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]], - [[__progname = "foo"; __progname_full = "foo bar";]])], - [pf_cv_var_progname="yes"], - [pf_cv_var_progname="no"] - )] - ) -AS_IF([test x"$pf_cv_var_progname" = x"yes"], - [AC_DEFINE(HAVE_PROGNAME,1,[Define to 1 if processes can change their name])]) - dnl ======================================================================== dnl Generic declarations dnl ======================================================================== @@ -1101,25 +1229,42 @@ dnl ======================================================================== dnl Functions dnl ======================================================================== +REQUIRE_FUNC([alphasort]) REQUIRE_FUNC([getopt]) +REQUIRE_FUNC([scandir]) REQUIRE_FUNC([setenv]) +REQUIRE_FUNC([strndup]) +REQUIRE_FUNC([strnlen]) REQUIRE_FUNC([unsetenv]) +REQUIRE_FUNC([uuid_unparse]) REQUIRE_FUNC([vasprintf]) -AC_CACHE_CHECK(whether sscanf supports %m, - pf_cv_var_sscanf, - AC_RUN_IFELSE([AC_LANG_SOURCE([[ -#include -const char *s = "some-command-line-arg"; -int main(int argc, char **argv) { -char *name = NULL; -int n = sscanf(s, "%ms", &name); -return n == 1 ? 0 : 1; -} -]])], - pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) - -AS_IF([test x"$pf_cv_var_sscanf" = x"yes"], +AC_CHECK_FUNCS([strchrnul]) + +AC_CHECK_FUNCS([fopen64]) +AM_CONDITIONAL([WRAPPABLE_FOPEN64], [test x"$ac_cv_func_fopen64" = x"yes"]) + +AC_MSG_CHECKING([whether strerror always returns non-NULL]) +AC_RUN_IFELSE([AC_LANG_PROGRAM([[ + #include + #include + ]], [[ + return strerror(-1) == NULL; + ]])], + [AC_MSG_RESULT([yes])], + [AC_MSG_ERROR([strerror() is not C99-compliant])], + [AC_MSG_ERROR([strerror() is not C99-compliant])]) + +AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include ]], [[ + const char *s = "some-command-line-arg"; + char *name = NULL; + int n = sscanf(s, "%ms", &name); + return n != 1; + ]])], + [have_sscanf_m="yes"], + [have_sscanf_m="no"], + [have_sscanf_m="no"]) +AS_IF([test x"$have_sscanf_m" = x"yes"], [AC_DEFINE([HAVE_SSCANF_M], [1], [Define to 1 if sscanf %m modifier is available])]) @@ -1308,84 +1453,10 @@ AC_CHECK_HEADERS([stonith/stonith.h], ]) AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test x"$ac_cv_header_stonith_stonith_h" = x"yes"]) + dnl =============================================== -dnl Variables needed for substitution +dnl Detect DBus, systemd, and Upstart support dnl =============================================== -CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" -AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) -AC_SUBST(CRM_SCHEMA_DIRECTORY) - -CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" -AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"], - [Directory Pacemaker daemons should change to (without systemd, core files will go here)]) -AC_SUBST(CRM_CORE_DIR) - -AS_IF([test x"${CRM_DAEMON_USER}" = x""], - [CRM_DAEMON_USER="hacluster"]) -AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) -AC_SUBST(CRM_DAEMON_USER) - -AS_IF([test x"${CRM_DAEMON_GROUP}" = x""], - [CRM_DAEMON_GROUP="haclient"]) -AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) -AC_SUBST(CRM_DAEMON_GROUP) - -CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker -AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) -AC_SUBST(CRM_PACEMAKER_DIR) - -CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox -AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) -AC_SUBST(CRM_BLACKBOX_DIR) - -PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" -AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) -AC_SUBST(PE_STATE_DIR) - -CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" -AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) -AC_SUBST(CRM_CONFIG_DIR) - -CRM_DAEMON_DIR="${libexecdir}/pacemaker" -AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) -AC_SUBST(CRM_DAEMON_DIR) - -CRM_STATE_DIR="${runstatedir}/crm" -AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], - [Where to keep state files and sockets]) -AC_SUBST(CRM_STATE_DIR) - -CRM_RSCTMP_DIR="${runstatedir}/resource-agents" -AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) -AC_SUBST(CRM_RSCTMP_DIR) - -PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" -AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) -AC_SUBST(PACEMAKER_CONFIG_DIR) - -AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries]) - -AC_PATH_PROGS(GIT, git false) - -AC_MSG_CHECKING([build version]) -BUILD_VERSION=6fdc9deea29 -AS_IF([test $BUILD_VERSION != ":%h$"], - [AC_MSG_RESULT([$BUILD_VERSION (archive hash)])], - [test -x $GIT && test -d .git], - [ - BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` - AC_MSG_RESULT([$BUILD_VERSION (git hash)]) - ], - [ - # The current directory name make a reasonable default - # Most generated archives will include the hash or tag - BASE=`basename $PWD` - BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` - AC_MSG_RESULT([$BUILD_VERSION (directory name)]) - ]) - -AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) -AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], @@ -1400,6 +1471,14 @@ AS_IF([test $HAVE_dbus = 0], [PC_NAME_DBUS="dbus-1"]) AC_SUBST(PC_NAME_DBUS) +check_systemdsystemunitdir() { + AC_MSG_CHECKING([which system unit file directory to use]) + PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) + AC_MSG_RESULT([${systemdsystemunitdir}]) + test x"$systemdsystemunitdir" != x"" + return $? +} + AS_CASE([$enable_systemd], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], @@ -1489,6 +1568,11 @@ AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart], AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED]) AC_SUBST(SUPPORT_UPSTART) + +dnl ======================================================================== +dnl Detect Nagios support +dnl ======================================================================== + AS_CASE([$with_nagios], [$REQUIRED], [ AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], @@ -1977,19 +2061,17 @@ CONFIG_FILES_EXEC([agents/ocf/ClusterMon], [agents/ocf/remote], [agents/stonith/fence_legacy], [agents/stonith/fence_watchdog], + [cts/cluster_test], + [cts/cts], [cts/cts-attrd], [cts/cts-cli], [cts/cts-exec], [cts/cts-fencing], + [cts/cts-lab], + [cts/cts-log-watcher], [cts/cts-regression], [cts/cts-scheduler], - [cts/lxc_autogen.sh], [cts/benchmark/clubench], - [cts/lab/CTSlab.py], - [cts/lab/OCFIPraTest.py], - [cts/lab/cluster_test], - [cts/lab/cts], - [cts/lab/cts-log-watcher], [cts/support/LSBDummy], [cts/support/cts-support], [cts/support/fence_dummy], @@ -1998,13 +2080,13 @@ CONFIG_FILES_EXEC([agents/ocf/ClusterMon], [maint/bumplibs], [tools/cluster-clean], [tools/cluster-helper], - [tools/cluster-init], [tools/crm_failcount], [tools/crm_master], [tools/crm_report], [tools/crm_standby], [tools/cibsecret], - [tools/pcmk_simtimes]) + [tools/pcmk_simtimes], + [xml/version-diff.sh]) dnl Other files we output AC_CONFIG_FILES(Makefile \ @@ -2014,7 +2096,6 @@ AC_CONFIG_FILES(Makefile \ agents/stonith/Makefile \ cts/Makefile \ cts/benchmark/Makefile \ - cts/lab/Makefile \ cts/scheduler/Makefile \ cts/scheduler/dot/Makefile \ cts/scheduler/exp/Makefile \ @@ -2059,6 +2140,7 @@ AC_CONFIG_FILES(Makefile \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/acl/Makefile \ + lib/common/tests/actions/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/flags/Makefile \ @@ -2067,7 +2149,6 @@ AC_CONFIG_FILES(Makefile \ lib/common/tests/iso8601/Makefile \ lib/common/tests/lists/Makefile \ lib/common/tests/nvpair/Makefile \ - lib/common/tests/operations/Makefile \ lib/common/tests/options/Makefile \ lib/common/tests/output/Makefile \ lib/common/tests/procfs/Makefile \ @@ -2104,9 +2185,9 @@ AC_CONFIG_FILES(Makefile \ python/setup.py \ python/pacemaker/Makefile \ python/pacemaker/_cts/Makefile \ + python/pacemaker/_cts/tests/Makefile \ python/pacemaker/buildoptions.py \ python/tests/Makefile \ - replace/Makefile \ rpm/Makefile \ tests/Makefile \ tools/Makefile \ diff --git a/cts/Makefile.am b/cts/Makefile.am index a2e6738..598ae32 100644 --- a/cts/Makefile.am +++ b/cts/Makefile.am @@ -12,42 +12,29 @@ MAINTAINERCLEANFILES = Makefile.in # Test commands and globally applicable test files should be in $(testdir), # and command-specific test data should be in a command-specific subdirectory. testdir = $(datadir)/$(PACKAGE)/tests -test_SCRIPTS = cts-attrd \ +test_SCRIPTS = cts-attrd \ cts-cli \ cts-exec \ cts-fencing \ + cts-lab \ cts-regression \ cts-scheduler dist_test_DATA = README.md \ valgrind-pcmk.suppressions -ctsdir = $(testdir)/cts -cts_SCRIPTS = lxc_autogen.sh - clidir = $(testdir)/cli -dist_cli_DATA = cli/constraints.xml \ - cli/crmadmin-cluster-remote-guest-nodes.xml \ - cli/crm_diff_new.xml \ - cli/crm_diff_old.xml \ - cli/crm_mon.xml \ - cli/crm_mon-feature_set.xml \ - cli/crm_mon-partial.xml \ - cli/crm_mon-rsc-maint.xml \ - cli/crm_mon-T180.xml \ - cli/crm_mon-unmanaged.xml \ - cli/crm_resource_digests.xml \ - cli/regression.acls.exp \ - cli/regression.crm_mon.exp \ - cli/regression.daemons.exp \ - cli/regression.dates.exp \ - cli/regression.error_codes.exp \ - cli/regression.feature_set.exp \ - cli/regression.rules.exp \ - cli/regression.tools.exp \ - cli/regression.upgrade.exp \ - cli/regression.validity.exp \ - cli/regression.access_render.exp +dist_cli_DATA = $(wildcard cli/*.xml cli/*.exp) + +ctsdir = $(datadir)/$(PACKAGE)/tests/cts +cts_SCRIPTS = cts + +# Commands intended to be run only via other commands +halibdir = $(CRM_DAEMON_DIR) +dist_halib_SCRIPTS = cts-log-watcher +noinst_SCRIPTS = cluster_test + +.PHONY: scheduler-list scheduler-list: @for T in "$(srcdir)"/scheduler/xml/*.xml; do \ echo $$(basename $$T .xml); \ @@ -55,15 +42,35 @@ scheduler-list: CLEANFILES = $(builddir)/.regression.failed.diff +.PHONY: clean-local clean-local: rm -f scheduler/*/*.pe -SUBDIRS = benchmark lab scheduler support +SUBDIRS = benchmark \ + scheduler \ + support +.PHONY: cts-support-install cts-support-install: $(MAKE) $(AM_MAKEFLAGS) -C support cts-support $(builddir)/support/cts-support install +.PHONY: cts-support-uninstall cts-support-uninstall: $(MAKE) $(AM_MAKEFLAGS) -C support cts-support $(builddir)/support/cts-support uninstall + +# Everything listed here is a python script, typically generated from a .in file +# (though that is not a requirement). We want to run pylint on all of these +# things after they've been built. +python_files = cts-attrd \ + cts-exec \ + cts-fencing \ + cts-lab \ + cts-log-watcher \ + cts-regression \ + cts-scheduler + +.PHONY: pylint +pylint: $(python_files) + PYTHONPATH=$(top_builddir)/python pylint --rcfile $(top_srcdir)/python/pylintrc $(python_files) diff --git a/cts/README.md b/cts/README.md index 0ff1065..cbf319a 100644 --- a/cts/README.md +++ b/cts/README.md @@ -21,11 +21,10 @@ CTS includes: * The CTS lab: This is a cluster exerciser for intensively testing the behavior of an entire working cluster. It is primarily for developers and packagers of the Pacemaker source code, but it can be useful for users who wish to see how - their cluster will react to various situations. In an installed deployment, - the CTS lab is in the cts subdirectory of this directory; in a source - distibution, it is in cts/lab. + their cluster will react to various situations. Most of the lab code is in + the Pacemaker Python module. The front end, cts-lab, is in this directory. - The CTS lab runs a randomized series of predefined tests on the cluster. CTS + The CTS lab runs a randomized series of predefined tests on the cluster. It can be run against a pre-existing cluster configuration or overwrite the existing configuration with a test configuration. @@ -46,15 +45,13 @@ CTS includes: /usr/libexec/pacemaker/cts-support uninstall + (The actual directory location may vary depending on how Pacemaker was + built.) + * Cluster benchmark: The benchmark subdirectory of this directory contains some cluster test environment benchmarking code. It is not particularly useful for end users. -* LXC generator: The lxc\_autogen.sh script can be used to create some guest - nodes for testing using LXC containers. It is not particularly useful for end - users. In an installed deployment, it is in the cts subdirectory of this - directory; in a source distribution, it is in this directory. - * Valgrind suppressions: When memory-testing Pacemaker code with valgrind, various bugs in non-Pacemaker libraries and such can clutter the results. The valgrind-pcmk.suppressions file in this directory can be used with valgrind's @@ -109,9 +106,11 @@ CTS includes: ### Run -The primary interface to the CTS lab is the CTSlab.py executable: +The primary interface to the CTS lab is the cts-lab executable: - /usr/share/pacemaker/tests/cts/CTSlab.py [options] + /usr/share/pacemaker/tests/cts-lab [options] + +(The actual directory location may vary depending on how Pacemaker was built.) As part of the options, specify the cluster nodes with --nodes, for example: @@ -138,13 +137,13 @@ Configure some sort of fencing, for example to use fence\_xvm: Putting all the above together, a command line might look like: - /usr/share/pacemaker/tests/cts/CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" \ + /usr/share/pacemaker/tests/cts-lab --nodes "pcmk-1 pcmk-2 pcmk-3" \ --outputfile ~/cts.log --clobber-cib --populate-resources \ --test-ip-base 192.168.9.100 --stonith xvm 50 For more options, run with the --help option. -There are also a couple of wrappers for CTSlab.py that some users may find more +There are also a couple of wrappers for cts-lab that some users may find more convenient: cts, which is typically installed in the same place as the rest of the testing code; and cluster\_test, which is in the source directory and typically not installed. @@ -172,7 +171,7 @@ setting the following environment variables on all cluster nodes: --gen-suppressions=all" If running the CTS lab with valgrind enabled on the cluster nodes, add these -options to CTSlab.py: +options to cts-lab: --valgrind-tests --valgrind-procs "pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-schedulerd pacemaker-fenced" @@ -217,22 +216,22 @@ lab, but the C library variables may be set differently on different nodes. ### Optional: Remote node testing -If the pacemaker-remoted daemon is installed on all cluster nodes, CTS will -enable remote node tests. +If the pacemaker-remoted daemon is installed on all cluster nodes, the CTS lab +will enable remote node tests. The remote node tests choose a random node, stop the cluster on it, start pacemaker-remoted on it, and add an ocf:pacemaker:remote resource to turn it -into a remote node. When the test is done, CTS will turn the node back into +into a remote node. When the test is done, the lab will turn the node back into a cluster node. -To avoid conflicts, CTS will rename the node, prefixing the original node name -with "remote-". For example, "pcmk-1" will become "remote-pcmk-1". These names -do not need to be resolvable. +To avoid conflicts, the lab will rename the node, prefixing the original node +name with "remote-". For example, "pcmk-1" will become "remote-pcmk-1". These +names do not need to be resolvable. The name change may require special fencing configuration, if the fence agent expects the node name to be the same as its hostname. A common approach is to specify the "remote-" names in pcmk\_host\_list. If you use -pcmk\_host\_list=all, CTS will expand that to all cluster nodes and their +pcmk\_host\_list=all, the lab will expand that to all cluster nodes and their "remote-" names. You may additionally need a pcmk\_host\_map argument to map the "remote-" names to the hostnames. Example: @@ -267,34 +266,9 @@ valgrind. For example: EOF -### Optional: Container testing - -If the --container-tests option is given to CTSlab.py, it will enable -testing of LXC resources (currently only the RemoteLXC test, -which starts a remote node using an LXC container). - -The container tests have additional package dependencies (see the toplevel -INSTALL.md). Also, SELinux must be enabled (in either permissive or enforcing -mode), libvirtd must be enabled and running, and root must be able to ssh -without a password between all cluster nodes (not just from the exerciser). -Before running the tests, you can verify your environment with: - - /usr/share/pacemaker/tests/cts/lxc_autogen.sh -v - -LXC tests will create two containers with hardcoded parameters: a NAT'ed bridge -named virbr0 using the IP network 192.168.123.0/24 will be created on the -cluster node hosting the containers; the host will be assigned -52:54:00:A8:12:35 as the MAC address and 192.168.123.1 as the IP address. -Each container will be assigned a random MAC address starting with 52:54:, -the IP address 192.168.123.11 or 192.168.123.12, the hostname lxc1 or lxc2 -(which will be added to the host's /etc/hosts file), and 196MB RAM. - -The test will revert all of the configuration when it is done. - - ### Mini-HOWTO: Allow passwordless remote SSH connections -The CTS scripts run "ssh -l root" so you don't have to do any of your testing +The CTS lab runs "ssh -l root" so you don't have to do any of your testing logged in as root on the exerciser. Here is how to allow such connections without requiring a password to be entered each time: @@ -328,42 +302,20 @@ without requiring a password to be entered each time: If not, look at the documentation for your version of ssh. -## Note on the maintenance +## Upgrading scheduler test inputs for new XSLTs -### Tests for scheduler - -The source `*.xml` files are preferably kept in sync with the newest -major (and only major, which is enough) schema version, since these -tests are not meant to double as schema upgrade ones (except some cases +The scheduler/xml inputs should be kept in sync with the latest major schema +version, since these tests are not meant to test schema upgrades (unless expressly designated as such). -Currently and unless something goes wrong, the procedure of upgrading -these tests en masse is as easy as: +To upgrade the inputs to a new major schema version: - cd "$(git rev-parse --show-toplevel)/cts" # if not already - pushd "$(git rev-parse --show-toplevel)/xml" + cd "$(git rev-parse --show-toplevel)/xml" ./regression.sh cts_scheduler -G - popd + cd "$(git rev-parse --show-toplevel)/cts" git add --interactive . - git commit -m 'XML: upgrade-M.N.xsl: apply on scheduler CTS test cases' - git reset HEAD && git checkout . # if some differences still remain - ./cts-scheduler # absolutely vital to check nothing got broken! - -Now, sadly, there's no proved automated way to minimize instances like this: - - - - -that may be left behind into more canonical: - - - -so manual editing is tasked, or perhaps `--format` or `--c14n` -to `xmllint` will be of help (without any other side effects). + git commit -m 'Test: scheduler: upgrade test inputs to schema $X.$Y' + ./cts-scheduler || echo 'Investigate what went wrong' -If the overall process gets stuck anywhere, common sense to the rescue. -The initial part of the above recipe can be repeated anytime to verify -there's nothing to upgrade artificially like this, which is a desired -state. Note that `regression.sh` script performs validation of both -the input and output, should the upgrade take place, implicitly, so -there's no need of revalidation in the happy case. +The first two commands can be run anytime to verify no further upgrades are +needed. diff --git a/cts/benchmark/Makefile.am b/cts/benchmark/Makefile.am index 532abd2..703f18d 100644 --- a/cts/benchmark/Makefile.am +++ b/cts/benchmark/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2001-2017 the Pacemaker project contributors +# Copyright 2001-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -9,5 +9,6 @@ MAINTAINERCLEANFILES = Makefile.in benchdir = $(datadir)/$(PACKAGE)/tests/cts/benchmark -dist_bench_DATA = README.benchmark control +dist_bench_DATA = README.benchmark \ + control bench_SCRIPTS = clubench diff --git a/cts/benchmark/clubench.in b/cts/benchmark/clubench.in index e65b60d..d20e292 100644 --- a/cts/benchmark/clubench.in +++ b/cts/benchmark/clubench.in @@ -126,7 +126,7 @@ mkreports() { runcts() { RC_ODIR="$1" msg "Running CTS" - python "$CTSDIR/CTSlab.py" $CTSOPTS --nodes "$nodes" > "$RC_ODIR/ctsrun.out" 2>&1 & + python "$CTSDIR/cts-lab" $CTSOPTS --nodes "$nodes" > "$RC_ODIR/ctsrun.out" 2>&1 & ctspid=$! tail -f "$RC_ODIR/ctsrun.out" & tailpid=$! diff --git a/cts/cli/crm_verify_invalid_bz.xml b/cts/cli/crm_verify_invalid_bz.xml new file mode 100644 index 0000000..b92e563 --- /dev/null +++ b/cts/cli/crm_verify_invalid_bz.xml @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/cts/cli/crm_verify_invalid_no_stonith.xml b/cts/cli/crm_verify_invalid_no_stonith.xml new file mode 100644 index 0000000..ce1b3a5 --- /dev/null +++ b/cts/cli/crm_verify_invalid_no_stonith.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/cts/cli/regression.daemons.exp b/cts/cli/regression.daemons.exp index 66bd7b3..b34fba8 100644 --- a/cts/cli/regression.daemons.exp +++ b/cts/cli/regression.daemons.exp @@ -122,6 +122,11 @@ Do not lock resources to a cleanly shut down node longer than this + + Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + How long to wait for a node that has joined the cluster to join the controller process group + + =#=#=#= End test: Get controller metadata - OK (0) =#=#=#= @@ -349,6 +354,11 @@ Apply fencing delay targeting the lost nodes with the highest total resource priority + + Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + How long to wait for a node that has joined the cluster to join the controller process group + + The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. Maximum time for node-to-node communication diff --git a/cts/cli/regression.error_codes.exp b/cts/cli/regression.error_codes.exp index 6c6f4e1..7d705e2 100644 --- a/cts/cli/regression.error_codes.exp +++ b/cts/cli/regression.error_codes.exp @@ -145,6 +145,9 @@ pcmk_rc_node_unknown - Node not found =#=#=#= End test: Get negative Pacemaker return code (with name) (XML) - OK (0) =#=#=#= * Passed: crm_error - Get negative Pacemaker return code (with name) (XML) =#=#=#= Begin test: List Pacemaker return codes (non-positive) =#=#=#= +-1039: Compression/decompression error +-1038: Nameserver resolution error +-1037: No active transaction found -1036: Bad XML patch format -1035: Bad input value provided -1034: Disabled @@ -186,6 +189,9 @@ pcmk_rc_node_unknown - Node not found * Passed: crm_error - List Pacemaker return codes (non-positive) =#=#=#= Begin test: List Pacemaker return codes (non-positive) (XML) =#=#=#= + + + @@ -227,6 +233,9 @@ pcmk_rc_node_unknown - Node not found =#=#=#= End test: List Pacemaker return codes (non-positive) (XML) - OK (0) =#=#=#= * Passed: crm_error - List Pacemaker return codes (non-positive) (XML) =#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) =#=#=#= +-1039: pcmk_rc_compression Compression/decompression error +-1038: pcmk_rc_ns_resolution Nameserver resolution error +-1037: pcmk_rc_no_transaction No active transaction found -1036: pcmk_rc_bad_xml_patch Bad XML patch format -1035: pcmk_rc_bad_input Bad input value provided -1034: pcmk_rc_disabled Disabled @@ -268,6 +277,9 @@ pcmk_rc_node_unknown - Node not found * Passed: crm_error - List Pacemaker return codes (non-positive) (with names) =#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) (XML) =#=#=#= + + + diff --git a/cts/cli/regression.rules.exp b/cts/cli/regression.rules.exp index c3dccd7..cdfb5d1 100644 --- a/cts/cli/regression.rules.exp +++ b/cts/cli/regression.rules.exp @@ -33,6 +33,9 @@ crm_rule: --check requires use of --rule= log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found log_xmllib_err error: XML Error: invalidxml log_xmllib_err error: XML Error: ^ +log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found +log_xmllib_err error: XML Error: invalidxml +log_xmllib_err error: XML Error: ^ crm_rule: Couldn't parse input string: invalidxml =#=#=#= End test: crm_rule given invalid input XML - Invalid data given (65) =#=#=#= @@ -41,6 +44,9 @@ crm_rule: Couldn't parse input string: invalidxml log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found log_xmllib_err error: XML Error: invalidxml log_xmllib_err error: XML Error: ^ +log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found +log_xmllib_err error: XML Error: invalidxml +log_xmllib_err error: XML Error: ^ @@ -55,6 +61,9 @@ log_xmllib_err error: XML Error: ^ log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found log_xmllib_err error: XML Error: invalidxml log_xmllib_err error: XML Error: ^ +log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found +log_xmllib_err error: XML Error: invalidxml +log_xmllib_err error: XML Error: ^ crm_rule: Couldn't parse input from STDIN =#=#=#= End test: crm_rule given invalid input XML on stdin - Invalid data given (65) =#=#=#= @@ -63,6 +72,9 @@ crm_rule: Couldn't parse input from STDIN log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found log_xmllib_err error: XML Error: invalidxml log_xmllib_err error: XML Error: ^ +log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found +log_xmllib_err error: XML Error: invalidxml +log_xmllib_err error: XML Error: ^ diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index a8e2236..accf781 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -4706,7 +4706,7 @@ Resources prim2 is colocated with: - + @@ -4716,10 +4716,10 @@ Resources prim2 is colocated with: - - + + - + @@ -4752,9 +4752,9 @@ Resources prim3 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#= - + - + @@ -4764,11 +4764,11 @@ Resources prim3 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#= - + - + - + @@ -4802,9 +4802,9 @@ Resources prim4 is colocated with: - - - + + + @@ -4814,11 +4814,11 @@ Resources prim4 is colocated with: - - - + + + - + @@ -4848,7 +4848,7 @@ Resources colocated with prim5: =#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#= - + @@ -4858,11 +4858,11 @@ Resources colocated with prim5: =#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#= - + - - - + + + @@ -4910,7 +4910,7 @@ Resources prim7 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#= - + @@ -4919,7 +4919,7 @@ Resources prim7 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#= - + @@ -4938,7 +4938,7 @@ Resources prim8 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#= - + @@ -4947,7 +4947,7 @@ Resources prim8 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#= - + @@ -4966,7 +4966,7 @@ Resources prim9 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#= - + @@ -4975,7 +4975,7 @@ Resources prim9 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#= - + @@ -5000,7 +5000,7 @@ Resources prim10 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#= - + @@ -5010,9 +5010,9 @@ Resources prim10 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#= - + - + @@ -5043,8 +5043,8 @@ Resources prim11 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#= - - + + @@ -5053,12 +5053,12 @@ Resources prim11 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#= - - - - - - + + + + + + @@ -5089,8 +5089,8 @@ Resources prim12 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#= - - + + @@ -5099,12 +5099,12 @@ Resources prim12 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#= - - - - - - + + + + + + @@ -5135,8 +5135,8 @@ Resources prim13 is colocated with: =#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#= - - + + @@ -5145,12 +5145,12 @@ Resources prim13 is colocated with: =#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#= - - - - - - + + + + + + @@ -5169,7 +5169,7 @@ Resources colocated with group: =#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#= - + @@ -5178,7 +5178,7 @@ Resources colocated with group: =#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#= - + @@ -5197,7 +5197,7 @@ Resources colocated with clone: =#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#= - + @@ -5206,7 +5206,7 @@ Resources colocated with clone: =#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#= - + @@ -5529,34 +5529,34 @@ export overcloud-rabbit-2=overcloud-rabbit-2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -5711,26 +5711,26 @@ Original: cluster02 capacity: Original: httpd-bundle-0 capacity: Original: httpd-bundle-1 capacity: Original: httpd-bundle-2 capacity: -pcmk__finalize_assignment: ping:0 utilization on cluster02: -pcmk__finalize_assignment: ping:1 utilization on cluster01: -pcmk__finalize_assignment: Fencing utilization on cluster01: -pcmk__finalize_assignment: dummy utilization on cluster02: -pcmk__finalize_assignment: httpd-bundle-docker-0 utilization on cluster01: -pcmk__finalize_assignment: httpd-bundle-docker-1 utilization on cluster02: -pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.131 utilization on cluster01: -pcmk__finalize_assignment: httpd-bundle-0 utilization on cluster01: -pcmk__finalize_assignment: httpd:0 utilization on httpd-bundle-0: -pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.132 utilization on cluster02: -pcmk__finalize_assignment: httpd-bundle-1 utilization on cluster02: -pcmk__finalize_assignment: httpd:1 utilization on httpd-bundle-1: -pcmk__finalize_assignment: httpd-bundle-2 utilization on cluster01: -pcmk__finalize_assignment: httpd:2 utilization on httpd-bundle-2: -pcmk__finalize_assignment: Public-IP utilization on cluster02: -pcmk__finalize_assignment: Email utilization on cluster02: -pcmk__finalize_assignment: mysql-proxy:0 utilization on cluster02: -pcmk__finalize_assignment: mysql-proxy:1 utilization on cluster01: -pcmk__finalize_assignment: promotable-rsc:0 utilization on cluster02: -pcmk__finalize_assignment: promotable-rsc:1 utilization on cluster01: +pcmk__assign_resource: ping:0 utilization on cluster02: +pcmk__assign_resource: ping:1 utilization on cluster01: +pcmk__assign_resource: Fencing utilization on cluster01: +pcmk__assign_resource: dummy utilization on cluster02: +pcmk__assign_resource: httpd-bundle-docker-0 utilization on cluster01: +pcmk__assign_resource: httpd-bundle-docker-1 utilization on cluster02: +pcmk__assign_resource: httpd-bundle-ip-192.168.122.131 utilization on cluster01: +pcmk__assign_resource: httpd-bundle-0 utilization on cluster01: +pcmk__assign_resource: httpd:0 utilization on httpd-bundle-0: +pcmk__assign_resource: httpd-bundle-ip-192.168.122.132 utilization on cluster02: +pcmk__assign_resource: httpd-bundle-1 utilization on cluster02: +pcmk__assign_resource: httpd:1 utilization on httpd-bundle-1: +pcmk__assign_resource: httpd-bundle-2 utilization on cluster01: +pcmk__assign_resource: httpd:2 utilization on httpd-bundle-2: +pcmk__assign_resource: Public-IP utilization on cluster02: +pcmk__assign_resource: Email utilization on cluster02: +pcmk__assign_resource: mysql-proxy:0 utilization on cluster02: +pcmk__assign_resource: mysql-proxy:1 utilization on cluster01: +pcmk__assign_resource: promotable-rsc:0 utilization on cluster02: +pcmk__assign_resource: promotable-rsc:1 utilization on cluster01: Remaining: cluster01 capacity: Remaining: cluster02 capacity: Remaining: httpd-bundle-0 capacity: @@ -5961,7 +5961,7 @@ Transition Summary: * Move Public-IP ( cluster02 -> cluster01 ) * Move Email ( cluster02 -> cluster01 ) * Stop mysql-proxy:0 ( cluster02 ) due to node availability - * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability + * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability Executing Cluster Transition: * Pseudo action: httpd-bundle-1_stop_0 @@ -7048,7 +7048,7 @@ Diff: +++ 1.4.1 (null) - + @@ -7667,7 +7667,7 @@ Diff: +++ 0.1.0 (null) -- /cib/status/node_state[@id='1'] -- /cib/status/node_state[@id='httpd-bundle-0'] -- /cib/status/node_state[@id='httpd-bundle-1'] -+ /cib: @crm_feature_set=3.17.4, @num_updates=0, @admin_epoch=0 ++ /cib: @crm_feature_set=3.19.0, @num_updates=0, @admin_epoch=0 -- /cib: @cib-last-written, @update-origin, @update-client, @update-user, @have-quorum, @dc-uuid =#=#=#= End test: Get active shadow instance's diff (empty CIB) - Error occurred (1) =#=#=#= * Passed: crm_shadow - Get active shadow instance's diff (empty CIB) @@ -7679,29 +7679,29 @@ Diff: +++ 0.1.0 (null) - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - + + + + - + @@ -7898,3 +7898,95 @@ crm_shadow: Could not access shadow instance 'cts-cli': No such file or director =#=#=#= End test: Switch to nonexistent shadow instance (force) (XML) - No such object (105) =#=#=#= * Passed: crm_shadow - Switch to nonexistent shadow instance (force) (XML) +=#=#=#= Begin test: Verify a file-specified invalid configuration, outputting as xml =#=#=#= + + + + Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource + Ignoring <clone> resource 'test2-clone' because configuration is invalid + crm_verify: Errors found during check: config not valid + + + +=#=#=#= End test: Verify a file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify a file-specified invalid configuration, outputting as xml +=#=#=#= Begin test: Verify another file-specified invalid configuration, outputting as xml =#=#=#= + + + + Resource start-up disabled since no STONITH resources have been defined + Either configure some or disable STONITH with the stonith-enabled option + NOTE: Clusters with shared data need STONITH to ensure data integrity + Node pcmk-1 is unclean but cannot be fenced + Node pcmk-2 is unclean but cannot be fenced + crm_verify: Errors found during check: config not valid + + + +=#=#=#= End test: Verify another file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify another file-specified invalid configuration, outputting as xml +=#=#=#= Begin test: Verbosely verify a file-specified invalid configuration, outputting as xml =#=#=#= +unpack_config warning: Blind faith: not fencing unseen nodes + + + + Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource + Ignoring <clone> resource 'test2-clone' because configuration is invalid + crm_verify: Errors found during check: config not valid + + + +=#=#=#= End test: Verbosely verify a file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verbosely verify a file-specified invalid configuration, outputting as xml +=#=#=#= Begin test: Verbosely verify another file-specified invalid configuration, outputting as xml =#=#=#= +(cluster_status@status.c:113) warning: Fencing and resource management disabled due to lack of quorum + + + + Resource start-up disabled since no STONITH resources have been defined + Either configure some or disable STONITH with the stonith-enabled option + NOTE: Clusters with shared data need STONITH to ensure data integrity + Node pcmk-1 is unclean but cannot be fenced + Node pcmk-2 is unclean but cannot be fenced + crm_verify: Errors found during check: config not valid + + + +=#=#=#= End test: Verbosely verify another file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verbosely verify another file-specified invalid configuration, outputting as xml +=#=#=#= Begin test: Verify a file-specified valid configuration, outputting as xml =#=#=#= + + + +=#=#=#= End test: Verify a file-specified valid configuration, outputting as xml - OK (0) =#=#=#= +* Passed: crm_verify - Verify a file-specified valid configuration, outputting as xml +=#=#=#= Begin test: Verify a piped-in valid configuration, outputting as xml =#=#=#= + + + +=#=#=#= End test: Verify a piped-in valid configuration, outputting as xml - OK (0) =#=#=#= +* Passed: cat - Verify a piped-in valid configuration, outputting as xml +=#=#=#= Begin test: Verbosely verify a file-specified valid configuration, outputting as xml =#=#=#= + + + +=#=#=#= End test: Verbosely verify a file-specified valid configuration, outputting as xml - OK (0) =#=#=#= +* Passed: crm_verify - Verbosely verify a file-specified valid configuration, outputting as xml +=#=#=#= Begin test: Verbosely verify a piped-in valid configuration, outputting as xml =#=#=#= + + + +=#=#=#= End test: Verbosely verify a piped-in valid configuration, outputting as xml - OK (0) =#=#=#= +* Passed: cat - Verbosely verify a piped-in valid configuration, outputting as xml +=#=#=#= Begin test: Verify a string-supplied valid configuration, outputting as xml =#=#=#= + + + +=#=#=#= End test: Verify a string-supplied valid configuration, outputting as xml - OK (0) =#=#=#= +* Passed: crm_verify - Verify a string-supplied valid configuration, outputting as xml +=#=#=#= Begin test: Verbosely verify a string-supplied valid configuration, outputting as xml =#=#=#= + + + +=#=#=#= End test: Verbosely verify a string-supplied valid configuration, outputting as xml - OK (0) =#=#=#= +* Passed: crm_verify - Verbosely verify a string-supplied valid configuration, outputting as xml diff --git a/cts/cluster_test.in b/cts/cluster_test.in new file mode 100755 index 0000000..f5cb3e8 --- /dev/null +++ b/cts/cluster_test.in @@ -0,0 +1,175 @@ +#!@BASH_PATH@ +# +# Copyright 2008-2020 the Pacemaker project contributors +# +# The version control history for this file may have further details. +# +# This source code is licensed under the GNU General Public License version 2 +# or later (GPLv2+) WITHOUT ANY WARRANTY. +# +if [ -e ~/.cts ]; then + . ~/.cts +fi +anyAsked=0 + +[ $# -lt 1 ] || CTS_numtests=$1 + +die() { echo "$@"; exit 1; } + +if [ -z "$CTS_asked_once" ]; then + anyAsked=1 + echo "This script should only be executed on the test exerciser." + echo "The test exerciser will remotely execute the actions required by the" + echo "tests and should not be part of the cluster itself." + + read -p "Is this host intended to be the test exerciser? (yN) " doUnderstand + [ "$doUnderstand" = "y" ] \ + || die "This script must be executed on the test exerciser" +fi + +if [ -z "$CTS_node_list" ]; then + anyAsked=1 + read -p "Please list your cluster nodes (eg. node1 node2 node3): " CTS_node_list +else + echo "Beginning test of cluster: $CTS_node_list" +fi + +if [ -z "$CTS_stack" ]; then + anyAsked=1 + read -p "Which cluster stack are you using? ([corosync]): " CTS_stack + [ -n "$CTS_stack" ] || CTS_stack=corosync +else + echo "Using the $CTS_stack cluster stack" +fi + +[ "${CTS_node_list}" = "${CTS_node_list/$HOSTNAME/}" ] \ + || die "This script must be executed on the test exerciser, and the test exerciser cannot be part of the cluster" + +printf "+ Bootstrapping ssh... " +if [ -z "$SSH_AUTH_SOCK" ]; then + printf "\n + Initializing SSH " + eval "$(ssh-agent)" + echo " + Adding identities..." + ssh-add + rc=$? + if [ $rc -ne 0 ]; then + echo " -- No identities added" + printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n" + + read -p " - Do you want this program to help you create one? (yN) " auto_fix + if [ "$auto_fix" = "y" ]; then + ssh-keygen -t dsa + ssh-add + else + die "Please run 'ssh-keygen -t dsa' to create a new key" + fi + fi +else + echo "OK" +fi + +test_ok=1 +printf "+ Testing ssh configuration... " +for n in $CTS_node_list; do + ssh -l root -o PasswordAuthentication=no -o ConnectTimeout=5 "$n" /bin/true + rc=$? + if [ $rc -ne 0 ]; then + echo " - connection to $n failed" + test_ok=0 + fi +done + +if [ $test_ok -eq 0 ]; then + printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n" + + read -p " - Do you want this program to help you with such a setup? (yN) " auto_fix + if [ "$auto_fix" = "y" ]; then + # XXX are we picking the most suitable identity? + privKey=$(ssh-add -L | head -n1 | cut -d" " -f3) + sshCopyIdOpts="-o User=root" + [ -z "$privKey" ] || sshCopyIdOpts+=" -i \"${privKey}.pub\"" + for n in $CTS_node_list; do + eval "ssh-copy-id $sshCopyIdOpts \"${n}\"" \ + || die "Attempt to 'ssh-copy-id $sshCopyIdOpts \"$n\"' failed" + done + else + die "Please install one of your SSH public keys to root's account on all cluster nodes" + fi +fi +echo "OK" + +if [ -z "$CTS_logfile" ]; then + anyAsked=1 + read -p " + Where does/should syslog store logs from remote hosts? (/var/log/messages) " CTS_logfile + [ -n "$CTS_logfile" ] || CTS_logfile=/var/log/messages +fi + +[ -e "$CTS_logfile" ] || die "$CTS_logfile doesn't exist" + +if [ -z "$CTS_logfacility" ]; then + anyAsked=1 + read -p " + Which log facility does the cluster use? (daemon) " CTS_logfacility + [ -n "$CTS_logfacility" ] || CTS_logfacility=daemon +fi + +if [ -z "$CTS_boot" ]; then + read -p "+ Is the cluster software started automatically when a node boots? [yN] " CTS_boot + if [ -z "$CTS_boot" ]; then + CTS_boot=0 + else + case $CTS_boot in + 1|y|Y) CTS_boot=1;; + *) CTS_boot=0;; + esac + fi +fi + +if [ -z "$CTS_numtests" ]; then + read -p "+ How many test iterations should be performed? (500) " CTS_numtests + [ -n "$CTS_numtests" ] || CTS_numtests=500 +fi + +if [ -z "$CTS_asked_once" ]; then + anyAsked=1 + read -p "+ What type of STONITH agent do you use? (none) " CTS_stonith + [ -z "$CTS_stonith" ] \ + || read -p "+ List any STONITH agent parameters (eq. device_host=switch.power.com): " CTS_stonith_args + [ -n "$CTS_adv" ] \ + || read -p "+ (Advanced) Any extra CTS parameters? (none) " CTS_adv +fi + +[ $anyAsked -eq 0 ] \ + || read -p "+ Save values to ~/.cts for next time? (yN) " doSave + +if [ "$doSave" = "y" ]; then + cat > ~/.cts <<-EOF + # CTS Test data + CTS_stack="$CTS_stack" + CTS_node_list="$CTS_node_list" + CTS_logfile="$CTS_logfile" + CTS_logport="$CTS_logport" + CTS_logfacility="$CTS_logfacility" + CTS_asked_once=1 + CTS_adv="$CTS_adv" + CTS_stonith="$CTS_stonith" + CTS_stonith_args="$CTS_stonith_args" + CTS_boot="$CTS_boot" +EOF +fi + +cts_extra="" +if [ -n "$CTS_stonith" ]; then + cts_extra="$cts_extra --stonith-type $CTS_stonith" + [ -z "$CTS_stonith_args" ] \ + || cts_extra="$cts_extra --stonith-params \"$CTS_stonith_args\"" +else + cts_extra="$cts_extra --stonith 0" + echo " - Testing a cluster without STONITH is like a blunt pencil... pointless" +fi + +printf "\nAll set to go for %d iterations!\n" "$CTS_numtests" +[ $anyAsked -ne 0 ] \ + || echo "+ To use a different configuration, remove ~/.cts and re-run cts (or edit it manually)." + +echo Now paste the following command into this shell: +echo "@PYTHON@ `dirname "$0"`/cts-lab -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests --stack \"$CTS_stack\" $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\"" diff --git a/cts/cts-attrd.in b/cts/cts-attrd.in index b7ad538..b594ac3 100644 --- a/cts/cts-attrd.in +++ b/cts/cts-attrd.in @@ -126,7 +126,15 @@ class AttributeTests(Tests): test.add_cmd("attrd_updater", "--name AAA -B 111 -d 5 --output-as=xml") test.add_cmd_check_stdout("attrd_updater", "--name AAA -Q --output-as=xml", "name=\"AAA\" value=\"111\"") - test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: \(unset\) -> 111 | from .* with 5s write delay", + test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: \(unset\) -> 111 \| from .* with 5s write delay", + regex=True) + + test = self.new_test("set_attr_4", + "Update an attribute that does not exist with a delay") + test.add_cmd("attrd_updater", "--name BBB -U 999 -d 10 --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name BBB -Q --output-as=xml", + "name=\"BBB\" value=\"999\"") + test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: \(unset\) -> 999 \| from .* with 10s write delay", regex=True) test = self.new_test("update_attr_1", @@ -140,6 +148,13 @@ class AttributeTests(Tests): test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: 222 -> 333", regex=True) + test = self.new_test("update_attr_2", + "Update an attribute using a delay other than its default") + test.add_cmd("attrd_updater", "--name BBB -U 777 -d 10 --output-as=xml") + test.add_cmd("attrd_updater", "--name BBB -U 888 -d 7 --output-as=xml") + test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: 777 -> 888 \| from .* with 10s write delay", + regex=True) + test = self.new_test("update_attr_delay_1", "Update the delay of an attribute that already exists") test.add_cmd("attrd_updater", "--name BBB -U 222 --output-as=xml") diff --git a/cts/cts-cli.in b/cts/cts-cli.in index fdad002..f4cb7c3 100755 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,6 +1,6 @@ #!@BASH_PATH@ # -# Copyright 2008-2022 the Pacemaker project contributors +# Copyright 2008-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -143,7 +143,7 @@ function _test_assert() { target=$1; shift validate=$1; shift cib=$1; shift - app=`echo "$cmd" | sed 's/\ .*//'` + app=$(echo "$cmd" | head -n 1 | sed 's/\ .*//') printf "* Running: $app - $desc\n" 1>&2 printf "=#=#=#= Begin test: $desc =#=#=#=\n" @@ -2289,6 +2289,53 @@ function test_tools() { desc="Switch to nonexistent shadow instance (force) (XML)" cmd="crm_shadow --switch $shadow --batch --force --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 + + CIB_file_invalid_1="$test_home/cli/crm_verify_invalid_bz.xml" + CIB_file_invalid_2="$test_home/cli/crm_verify_invalid_no_stonith.xml" + + desc="Verify a file-specified invalid configuration, outputting as xml" + cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml" + test_assert_validate $CRM_EX_CONFIG 0 + + desc="Verify another file-specified invalid configuration, outputting as xml" + cmd="crm_verify --xml-file '$CIB_file_invalid_2' --output-as=xml" + test_assert_validate $CRM_EX_CONFIG 0 + + desc="Verbosely verify a file-specified invalid configuration, outputting as xml" + cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml --verbose" + test_assert_validate $CRM_EX_CONFIG 0 + + desc="Verbosely verify another file-specified invalid configuration, outputting as xml" + cmd="crm_verify --xml-file '$CIB_file_invalid_2' --output-as=xml --verbose" + test_assert_validate $CRM_EX_CONFIG 0 + + export CIB_file="$test_home/cli/crm_mon.xml" + + desc="Verify a file-specified valid configuration, outputting as xml" + cmd="crm_verify --xml-file '$CIB_file' --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="Verify a piped-in valid configuration, outputting as xml" + cmd="cat '$CIB_file' | crm_verify -p --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="Verbosely verify a file-specified valid configuration, outputting as xml" + cmd="crm_verify --xml-file '$CIB_file' --output-as=xml --verbose" + test_assert_validate $CRM_EX_OK 0 + + desc="Verbosely verify a piped-in valid configuration, outputting as xml" + cmd="cat '$CIB_file' | crm_verify -p --output-as=xml --verbose" + test_assert_validate $CRM_EX_OK 0 + + CIB_file_contents=$(cat "$CIB_file") + + desc="Verify a string-supplied valid configuration, outputting as xml" + cmd="crm_verify -X '$CIB_file_contents' --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="Verbosely verify a string-supplied valid configuration, outputting as xml" + cmd="crm_verify -X '$CIB_file_contents' --output-as=xml --verbose" + test_assert_validate $CRM_EX_OK 0 unset CIB_file unset CIB_shadow @@ -3382,6 +3429,11 @@ function print_or_remove_file() { rm -f "$TMPFILE" else echo " $TMPFILE" + if [ $verbose -ne 0 ]; then + echo "======================================================" + cat "$TMPFILE" + echo "======================================================" + fi fi } diff --git a/cts/cts-lab.in b/cts/cts-lab.in new file mode 100644 index 0000000..01bf9aa --- /dev/null +++ b/cts/cts-lab.in @@ -0,0 +1,136 @@ +#!@PYTHON@ +""" Command-line interface to Pacemaker's Cluster Test Suite (CTS) +""" + +# pylint doesn't like the module name "cts-lab" which is an invalid complaint for this file +# This also disables various other invalid names - it thinks scenario and match are constants +# that should have all caps names, and that cm and n are too short. +# pylint: disable=invalid-name + +__copyright__ = "Copyright 2001-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +import signal +import sys + +from pacemaker._cts.CTS import CtsLab +from pacemaker._cts.cmcorosync import Corosync2 +from pacemaker._cts.audits import audit_list +from pacemaker._cts.logging import LogFactory +from pacemaker._cts.scenarios import AllOnce, Boot, BootCluster, LeaveBooted, RandomTests, Sequence +from pacemaker._cts.tests import test_list + +# These are globals so they can be used by the signal handler. +scenario = None +LogFactory().add_stderr() + + +def sig_handler(signum, _frame): + """ Handle the given signal number """ + + LogFactory().log("Interrupted by signal %d" % signum) + + if scenario: + scenario.summarize() + + if signum == 15: + if scenario: + scenario.teardown() + + sys.exit(1) + + +def plural_s(n): + """ Return a string suffix depending on whether or not n is > 1 """ + + if n == 1: + return "" + + return "S" + + +if __name__ == '__main__': + environment = CtsLab(sys.argv[1:]) + iters = environment["iterations"] + tests = [] + + # Set the signal handler + signal.signal(15, sig_handler) + signal.signal(10, sig_handler) + + # Create the Cluster Manager object + cm = None + + if environment["Stack"] == "corosync 2+": + cm = Corosync2() + else: + LogFactory().log("Unknown stack: %s" % environment["stack"]) + sys.exit(1) + + if environment["TruncateLog"]: + if environment["OutputFile"] is None: + LogFactory().log("Ignoring truncate request because no output file specified") + else: + LogFactory().log("Truncating %s" % environment["OutputFile"]) + + with open(environment["OutputFile"], "w", encoding="utf-8") as outputfile: + outputfile.truncate(0) + + audits = audit_list(cm) + + if environment["Listtests"]: + tests = test_list(cm, audits) + LogFactory().log("Total %d tests" % len(tests)) + + for test in tests: + LogFactory().log(test.name) + + sys.exit(0) + + elif len(environment["tests"]) == 0: + tests = test_list(cm, audits) + + else: + chosen = environment["tests"] + for test_case in chosen: + match = None + + for test in test_list(cm, audits): + if test.name == test_case: + match = test + + if not match: + LogFactory().log("--choose: No applicable/valid tests chosen") + sys.exit(1) + else: + tests.append(match) + + # Scenario selection + if environment["scenario"] == "all-once": + iters = len(tests) + scenario = AllOnce(cm, [ BootCluster(cm, environment) ], audits, tests) + elif environment["scenario"] == "sequence": + scenario = Sequence(cm, [ BootCluster(cm, environment) ], audits, tests) + elif environment["scenario"] == "boot": + scenario = Boot(cm, [ LeaveBooted(cm, environment)], audits, []) + else: + scenario = RandomTests(cm, [ BootCluster(cm, environment) ], audits, tests) + + LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING %r TEST%s" % (iters, plural_s(iters))) + LogFactory().log("Stack: %s (%s)" % (environment["Stack"], environment["Name"])) + LogFactory().log("Schema: %s" % environment["Schema"]) + LogFactory().log("Scenario: %s" % scenario.__doc__) + LogFactory().log("CTS Exerciser: %s" % environment["cts-exerciser"]) + LogFactory().log("CTS Logfile: %s" % environment["OutputFile"]) + LogFactory().log("Random Seed: %s" % environment["RandSeed"]) + LogFactory().log("Syslog variant: %s" % environment["syslogd"].strip()) + LogFactory().log("System log files: %s" % environment["LogFileName"]) + + if "IPBase" in environment: + LogFactory().log("Base IP for resources: %s" % environment["IPBase"]) + + LogFactory().log("Cluster starts at boot: %d" % environment["at-boot"]) + + environment.dump() + rc = environment.run(scenario, iters) + sys.exit(rc) diff --git a/cts/cts-log-watcher.in b/cts/cts-log-watcher.in new file mode 100644 index 0000000..cee9c94 --- /dev/null +++ b/cts/cts-log-watcher.in @@ -0,0 +1,84 @@ +#!@PYTHON@ +""" Remote log reader for Pacemaker's Cluster Test Suite (CTS) + +Reads a specified number of lines from the supplied offset +Returns the current offset +Contains logic for handling truncation +""" + +__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +import sys +import os +import fcntl + +if __name__ == '__main__': + + limit = 0 + offset = 0 + prefix = '' + filename = '/var/log/messages' + + skipthis=None + args=sys.argv[1:] + for i in range(0, len(args)): + if skipthis: + skipthis=None + continue + + elif args[i] == '-l' or args[i] == '--limit': + skipthis=1 + limit = int(args[i+1]) + + elif args[i] == '-f' or args[i] == '--filename': + skipthis=1 + filename = args[i+1] + + elif args[i] == '-o' or args[i] == '--offset': + skipthis=1 + offset = args[i+1] + + elif args[i] == '-p' or args[i] == '--prefix': + skipthis=1 + prefix = args[i+1] + + elif args[i] == '-t' or args[i] == '--tag': + skipthis=1 + + if not os.access(filename, os.R_OK): + print(prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)) + sys.exit(1) + + logfile=open(filename, 'r') + logfile.seek(0, os.SEEK_END) + newsize=logfile.tell() + + if offset != 'EOF': + offset = int(offset) + if newsize >= offset: + logfile.seek(offset) + else: + print(prefix + ('File truncated from %d to %d' % (offset, newsize))) + if (newsize*1.05) < offset: + logfile.seek(0) + # else: we probably just lost a few logs after a fencing op + # continue from the new end + # TODO: accept a timestamp and discard all messages older than it + + # Don't block when we reach EOF + fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) + + count = 0 + while True: + if logfile.tell() >= newsize: break + elif limit and count >= limit: break + + line = logfile.readline() + if not line: break + + print(line.strip()) + count += 1 + + print(prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)) + logfile.close() diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index ee0cb7b..50c32f6 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -80,6 +80,10 @@ TESTS = [ [ "group-dependents", "Account for the location preferences of things colocated with a group" ], [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ], [ "colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged" ], + [ + "coloc-with-inner-group-member", + "Consider explicit colocations with inner group members" + ], ], [ [ "rsc_dep1", "Must not" ], @@ -205,6 +209,7 @@ TESTS = [ [ "rule-int-parse-fail-default-str-no-match", "Integer rule values fail to parse, default to string " "comparison: no match" ], + [ "timeout-by-node", "Start timeout varies by node" ], ], [ [ "order1", "Order start 1" ], @@ -253,6 +258,18 @@ TESTS = [ [ "anti-colocation-promoted", "Organize order of actions for promoted resources in anti-colocations" ], [ "anti-colocation-unpromoted", "Organize order of actions for unpromoted resources in anti-colocations" ], [ "group-anticolocation", "Group with failed last member anti-colocated with another group" ], + [ "group-anticolocation-2", + "Group with failed last member anti-colocated with another sticky group" + ], + [ "group-anticolocation-3", + "Group with failed last member mandatorily anti-colocated with another group" + ], + [ "group-anticolocation-4", + "Group with failed last member anti-colocated without influence with another group" + ], + [ "group-anticolocation-5", + "Group with failed last member anti-colocated with another group (third node allowed)" + ], [ "group-colocation-failure", "Group with sole member failed, colocated with another group" ], @@ -441,6 +458,39 @@ TESTS = [ [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], + [ "clone-recover-no-shuffle-1", + "Don't shuffle instances when starting a new primitive instance" ], + [ "clone-recover-no-shuffle-2", + "Don't shuffle instances when starting a new group instance" ], + [ "clone-recover-no-shuffle-3", + "Don't shuffle instances when starting a new bundle instance" ], + [ "clone-recover-no-shuffle-4", + "Don't shuffle instances when starting a new primitive instance with " + "location preference "], + [ "clone-recover-no-shuffle-5", + "Don't shuffle instances when starting a new group instance with " + "location preference" ], + [ "clone-recover-no-shuffle-6", + "Don't shuffle instances when starting a new bundle instance with " + "location preference" ], + [ "clone-recover-no-shuffle-7", + "Don't shuffle instances when starting a new primitive instance that " + "will be promoted" ], + [ "clone-recover-no-shuffle-8", + "Don't shuffle instances when starting a new group instance that " + "will be promoted " ], + [ "clone-recover-no-shuffle-9", + "Don't shuffle instances when starting a new bundle instance that " + "will be promoted " ], + [ "clone-recover-no-shuffle-10", + "Don't shuffle instances when starting a new primitive instance that " + "won't be promoted" ], + [ "clone-recover-no-shuffle-11", + "Don't shuffle instances when starting a new group instance that " + "won't be promoted " ], + [ "clone-recover-no-shuffle-12", + "Don't shuffle instances when starting a new bundle instance that " + "won't be promoted " ], [ "clone-max-zero", "Orphan processing with clone-max=0" ], [ "clone-anon-dup", "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], @@ -715,6 +765,8 @@ TESTS = [ "cl#5301 - respect order constraints when relevant resources are being probed" ], [ "concurrent-fencing", "Allow performing fencing operations in parallel" ], [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ], + [ "pending-node-no-uname", "Do not fence a pending node that doesn't have an uname in node state yet" ], + [ "node-pending-timeout", "Fence a pending node that has reached `node-pending-timeout`" ], ], [ [ "systemhealth1", "System Health () #1" ], @@ -990,6 +1042,42 @@ TESTS = [ [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ], [ "bundle-connection-with-container", "Don't move a container due to connection preferences" ], [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ], + [ "bundle-promoted-location-1", + "Promotable bundle, positive location" ], + [ "bundle-promoted-location-2", + "Promotable bundle, negative location" ], + [ "bundle-promoted-location-3", + "Promotable bundle, positive location for promoted role" ], + [ "bundle-promoted-location-4", + "Promotable bundle, negative location for promoted role" ], + [ "bundle-promoted-location-5", + "Promotable bundle, positive location for unpromoted role" ], + [ "bundle-promoted-location-6", + "Promotable bundle, negative location for unpromoted role" ], + [ "bundle-promoted-colocation-1", + "Primary promoted bundle, dependent primitive (mandatory coloc)" ], + [ "bundle-promoted-colocation-2", + "Primary promoted bundle, dependent primitive (optional coloc)" ], + [ "bundle-promoted-colocation-3", + "Dependent promoted bundle, primary primitive (mandatory coloc)" ], + [ "bundle-promoted-colocation-4", + "Dependent promoted bundle, primary primitive (optional coloc)" ], + [ "bundle-promoted-colocation-5", + "Primary and dependent promoted bundle instances (mandatory coloc)" ], + [ "bundle-promoted-colocation-6", + "Primary and dependent promoted bundle instances (optional coloc)" ], + [ "bundle-promoted-anticolocation-1", + "Primary promoted bundle, dependent primitive (mandatory anti)" ], + [ "bundle-promoted-anticolocation-2", + "Primary promoted bundle, dependent primitive (optional anti)" ], + [ "bundle-promoted-anticolocation-3", + "Dependent promoted bundle, primary primitive (mandatory anti)" ], + [ "bundle-promoted-anticolocation-4", + "Dependent promoted bundle, primary primitive (optional anti)" ], + [ "bundle-promoted-anticolocation-5", + "Primary and dependent promoted bundle instances (mandatory anti)" ], + [ "bundle-promoted-anticolocation-6", + "Primary and dependent promoted bundle instances (optional anti)" ], ], [ [ "whitebox-fail1", "Fail whitebox container rsc" ], diff --git a/cts/cts.in b/cts/cts.in new file mode 100755 index 0000000..24339aa --- /dev/null +++ b/cts/cts.in @@ -0,0 +1,404 @@ +#!@BASH_PATH@ +# +# Copyright 2012-2023 the Pacemaker project contributors +# +# The version control history for this file may have further details. +# +# This source code is licensed under the GNU General Public License version 2 +# or later (GPLv2+) WITHOUT ANY WARRANTY. +# + +# e.g. /etc/sysconfig or /etc/default +CONFIG_DIR=@CONFIGDIR@ + +cts_root=`dirname $0` + +logfile=0 +summary=0 +verbose=0 +watch=0 +saved=0 +tests="" + +install=0 +clean=0 +kill=0 +run=0 +boot=0 +target=rhel-7 +cmd="" +trace="" + +custom_log="" +patterns="-e CTS:" + +function sed_in_place_remotely() { + cluster-helper -g $cluster_name -- cp -p "\"$1\"" "\"$1.sed\"" \&\& sed -e "\"$2\"" "\"$1\"" \> "\"$1.sed\"" \&\& mv "\"$1.sed\"" "\"$1\"" +} + + +helpmsg=$(cat </dev/null +if [ $? != 0 ]; then + echo $0 needs the cluster-helper script to be in your path + exit 1 +fi + +which cluster-clean &>/dev/null +if [ $? != 0 ]; then + echo $0 needs the cluster-clean script to be in your path + exit 1 +fi + +if [ "x$cluster_name" = x ] || [ "x$cluster_name" = xpick ]; then + clusters=`ls -1 ~/.dsh/group/[a-z]+[0-9] | sed s/.*group.// | tr '\n' ' ' ` + + echo "custom) interactively define a cluster" + for i in $clusters; do + echo "$i) `cluster-helper --list short -g $i`" + done + + read -p "Choose a cluster [custom]: " cluster_name + echo +fi + +if [ -z $cluster_name ]; then + cluster_name=custom +fi + + +case $cluster_name in + custom) + read -p "Cluster name: " cluster_name + read -p "Cluster hosts: " cluster_hosts + read -p "Cluster log file: " cluster_log + cluster-helper add -g "$cluster_name" -w "$cluster_hosts" + ;; + *) + cluster_hosts=`cluster-helper --list short -g $cluster_name` + cluster_log=~/cluster-$cluster_name.log + ;; +esac + +# NOTES ABOUT THESE AWESOME REGULAR EXPRESSIONS: +# +# * We can't assume GNU sed. Unfortunately, + and * are GNU extensions. Thus, +# we have to use {1,} for + and {0,} for *. +# * You don't need to add an extra set of escaped quotes around the sed expression +# arguments here - sed_in_place_remotely will do that for you. +# * Only literal quotes need the triple backslashes. All other special characters +# are fine with just a single one. +# * sed needs a LOT of characters escaped - \, {, }, (, ), and | at least. + +if [ x$cmd != x ]; then + config="${CONFIG_DIR}/pacemaker" + case $cmd in + trace-ls|tls) + cluster-helper -g $cluster_name -- grep "^[[:space:]]*PCMK_trace_functions" $config + ;; + trace-add|tadd) + echo "Adding $trace to PCMK_trace_functions" + # Note that this only works if there's already a PCMK_trace_functions line. + # If there isn't one, create it with trace-set first. + # + # Match optional whitespace; then PCMK_trace_functions; then an equals + # surrounded by optional whitespace; then an optional quote; then whatever + # else (presumably, this is the list of previously traced functions with + # an optional trailing quote). Replace the entire line with + # PCMK_trace_functions=, + sed_in_place_remotely "$config" "s/^[ \t]\{0,\}PCMK_trace_functions[ \t]\{0,\}=[ \t]\{0,\}\(\\\"\{0,1\}\)\(.\{1,\}\)/PCMK_trace_functions=\1$trace,\2/" + ;; + trace-rm|trm) + echo "Removing $trace from PCMK_trace_functions" + # A bunch of simple regexes are easier to follow than one giant one. + # Look for $trace in the following places on any line containing + # PCMK_trace_functions near the beginning: + # + # (1) At the start of a list - + # Match one of a leading quote, or an equals followed by optional + # whitespace; then $trace; then a comma. Replace $trace with whatever + # came before it. + # (2) In the middle of a list - + # Match a comma; then $trace; then a comma. Replace $trace with a + # single comma. + # (3) At the end of a list - + # Match a comma; then $trace; then one of a quote, whitespace, or + # the EOL. Replace $trace with whatever came after it. + # (4) All by itself - + # Match one of a leading quote, whitespace, or equals followed by + # optional whitespace; then $trace; then one of a trailing quote, + # whitespace, or the EOL. Replace $trace with whatever came before + # and after it. + sed_in_place_remotely "$config" "/^[ \t]\{0,\}PCMK_trace_functions/ { \ + s/\(\\\"\|=\|[ \t]\{1,\}\)$trace,/\1/ ; \ + s/,$trace,/,/ ; \ + s/,$trace\(\\\"\|[ \t]\{1,\}\|$\)/\1/ ; \ + s/\(\\\"\|[ \t]\{1,\}\|=[ \t]\{0,\}\)$trace\(\\\"\|[ \t]\{1,\}\|$\)/\1\2/ }" + ;; + trace-set|tset) + echo "Setting PCMK_trace_functions to '$trace'" + # Do this in two separate sed commands: + # + # (1) Unconditionally remove any existing PCMK_trace_functions= lines. + # (2) Add a new line with $trace after the example line, which therefore + # must exist. Note that GNU sed would support "a PCMK_trace_functions=$trace", + # but that's an extension. For all other seds, we have to put the + # command and the text on separate lines. + sed_in_place_remotely "$config" "/^[ \t]*PCMK_trace_functions/ d ; /^# Example: PCMK_trace_functions/ a\\\ +PCMK_trace_functions=\\\"$trace\\\"" + ;; + esac + exit 0 +fi + +if [ $run = 1 ]; then + install=1 + clean=1 +fi + +if [ $clean = 1 ]; then + rm -f $cluster_log + cluster-clean -g $cluster_name --kill +elif [ $kill = 1 ]; then + cluster-clean -g $cluster_name --kill-only + exit 0 +fi + +if [ $install = 1 ]; then + cluster-helper -g $cluster_name -- yum install -y pacemaker pacemaker-debuginfo pacemaker-cts libqb libqb-debuginfo +fi + +if [ $boot = 1 ]; then + $cts_root/cts-lab -r -c -g $cluster_name --boot + rc=$? + if [ $rc = 0 ]; then + echo "The cluster is ready..." + fi + exit $rc + +elif [ $run = 1 ]; then + $cts_root/cts-lab -r -c -g $cluster_name 500 "$@" + exit $? + +elif [ $clean = 1 ]; then + exit 0 +fi + +screen -ls | grep cts-$cluster_name &>/dev/null +active=$? + +if [ ! -z $custom_log ]; then + cluster_log=$custom_log +fi + +if [ "x$tests" != x ] && [ "x$tests" != "x " ]; then + for t in $tests; do + echo "crm_report --cts-log $cluster_log -d -T $t" + crm_report --cts-log $cluster_log -d -T $t + done + +elif [ $logfile = 1 ]; then + echo $cluster_log + +elif [ $summary = 1 ]; then + files=$cluster_log + if [ $saved = 1 ]; then + files=`ls -1tr ~/CTS-*/cluster-log.txt` + fi + for f in $files; do + echo $f + case $verbose in + 0) + cat -n $f | grep $patterns | grep -v "CTS: debug:" + ;; + 1) + cat -n $f | grep $patterns | grep -v "CTS:.* cmd:" + ;; + *) + cat -n $f | grep $patterns + ;; + esac + echo "" + done + +elif [ $watch = 1 ]; then + case $verbose in + 0) + tail -F $cluster_log | grep $patterns | grep -v "CTS: debug:" + ;; + 1) + tail -F $cluster_log | grep $patterns | grep -v "CTS:.* cmd:" + ;; + *) + tail -F $cluster_log | grep $patterns + ;; + esac + +elif [ $active = 0 ]; then + screen -x cts-$cluster_name + +else + touch $cluster_log + export cluster_name cluster_hosts cluster_log + screen -S cts-$cluster_name bash +fi diff --git a/cts/lab/CIB.py b/cts/lab/CIB.py deleted file mode 100644 index 5981654..0000000 --- a/cts/lab/CIB.py +++ /dev/null @@ -1,518 +0,0 @@ -""" CIB generator for Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import os -import warnings -import tempfile - -from pacemaker.buildoptions import BuildOptions -from pacemaker._cts.CTS import CtsLab - - -class CibBase(object): - def __init__(self, Factory, tag, _id, **kwargs): - self.tag = tag - self.name = _id - self.kwargs = kwargs - self.children = [] - self.Factory = Factory - - def __repr__(self): - return "%s-%s" % (self.tag, self.name) - - def add_child(self, child): - self.children.append(child) - - def __setitem__(self, key, value): - if value: - self.kwargs[key] = value - else: - self.kwargs.pop(key, None) - -from cts.cib_xml import * - - -class ConfigBase(object): - cts_cib = None - version = "unknown" - Factory = None - - def __init__(self, CM, factory, tmpfile=None): - self.CM = CM - self.Factory = factory - - if not tmpfile: - warnings.filterwarnings("ignore") - f=tempfile.NamedTemporaryFile(delete=True) - f.close() - tmpfile = f.name - warnings.resetwarnings() - - self.Factory.tmpfile = tmpfile - - def version(self): - return self.version - - def NextIP(self): - ip = self.CM.Env["IPBase"] - if ":" in ip: - (prefix, sep, suffix) = ip.rpartition(":") - suffix = str(hex(int(suffix, 16)+1)).lstrip("0x") - else: - (prefix, sep, suffix) = ip.rpartition(".") - suffix = str(int(suffix)+1) - - ip = prefix + sep + suffix - self.CM.Env["IPBase"] = ip - return ip.strip() - - -class CIB12(ConfigBase): - version = "pacemaker-1.2" - counter = 1 - - def _show(self, command=""): - output = "" - (_, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, verbose=1) - for line in result: - output += line - self.Factory.debug("Generated Config: "+line) - return output - - def NewIP(self, name=None, standard="ocf"): - if self.CM.Env["IPagent"] == "IPaddr2": - ip = self.NextIP() - if not name: - if ":" in ip: - (prefix, sep, suffix) = ip.rpartition(":") - name = "r"+suffix - else: - name = "r"+ip - - r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) - r["ip"] = ip - - if ":" in ip: - r["cidr_netmask"] = "64" - r["nic"] = "eth0" - else: - r["cidr_netmask"] = "32" - - else: - if not name: - name = "r%s%d" % (self.CM.Env["IPagent"], self.counter) - self.counter = self.counter + 1 - r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) - - r.add_op("monitor", "5s") - return r - - def get_node_id(self, node_name): - """ Check the cluster configuration for a node ID. """ - - # We can't account for every possible configuration, - # so we only return a node ID if: - # * The node is specified in /etc/corosync/corosync.conf - # with "ring0_addr:" equal to node_name and "nodeid:" - # explicitly specified. - # In all other cases, we return 0. - node_id = 0 - - # awkward command: use } as record separator - # so each corosync.conf "object" is one record; - # match the "node {" record that has "ring0_addr: node_name"; - # then print the substring of that record after "nodeid:" - (rc, output) = self.Factory.rsh(self.Factory.target, - r"""awk -v RS="}" """ - r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/""" - r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s""" - % (node_name, BuildOptions.COROSYNC_CONFIG_FILE), verbose=1) - - if rc == 0 and len(output) == 1: - try: - node_id = int(output[0]) - except ValueError: - node_id = 0 - - return node_id - - def install(self, target): - old = self.Factory.tmpfile - - # Force a rebuild - self.cts_cib = None - - self.Factory.tmpfile = BuildOptions.CIB_DIR + "/cib.xml" - self.contents(target) - self.Factory.rsh(self.Factory.target, "chown " + BuildOptions.DAEMON_USER + " " + self.Factory.tmpfile) - - self.Factory.tmpfile = old - - def contents(self, target=None): - # fencing resource - if self.cts_cib: - return self.cts_cib - - if target: - self.Factory.target = target - - self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile)) - self.num_nodes = len(self.CM.Env["nodes"]) - - no_quorum = "stop" - if self.num_nodes < 3: - no_quorum = "ignore" - self.Factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self.num_nodes) - - # We don't need a nodes section unless we add attributes - stn = None - - # Fencing resource - # Define first so that the shell doesn't reject every update - if self.CM.Env["DoFencing"]: - - # Define the "real" fencing device - st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith") - - # Set a threshold for unreliable stonith devices such as the vmware one - st.add_meta("migration-threshold", "5") - st.add_op("monitor", "120s", timeout="120s") - st.add_op("stop", "0", timeout="60s") - st.add_op("start", "0", timeout="60s") - - # For remote node tests, a cluster node is stopped and brought back up - # as a remote node with the name "remote-OLDNAME". To allow fencing - # devices to fence these nodes, create a list of all possible node names. - all_node_names = [ prefix+n for n in self.CM.Env["nodes"] for prefix in ('', 'remote-') ] - - # Add all parameters specified by user - entries = self.CM.Env["stonith-params"].split(',') - for entry in entries: - try: - (name, value) = entry.split('=', 1) - except ValueError: - print("Warning: skipping invalid fencing parameter: %s" % entry) - continue - - # Allow user to specify "all" as the node list, and expand it here - if name in [ "hostlist", "pcmk_host_list" ] and value == "all": - value = ' '.join(all_node_names) - - st[name] = value - - st.commit() - - # Test advanced fencing logic - if True: - stf_nodes = [] - stt_nodes = [] - attr_nodes = {} - - # Create the levels - stl = FencingTopology(self.Factory) - for node in self.CM.Env["nodes"]: - # Remote node tests will rename the node - remote_node = "remote-" + node - - # Randomly assign node to a fencing method - ftype = self.CM.Env.random_gen.choice(["levels-and", "levels-or ", "broadcast "]) - - # For levels-and, randomly choose targeting by node name or attribute - by = "" - if ftype == "levels-and": - node_id = self.get_node_id(node) - if node_id == 0 or self.CM.Env.random_gen.choice([True, False]): - by = " (by name)" - else: - attr_nodes[node] = node_id - by = " (by attribute)" - - self.CM.log(" - Using %s fencing for node: %s%s" % (ftype, node, by)) - - if ftype == "levels-and": - # If targeting by name, add a topology level for this node - if node not in attr_nodes: - stl.level(1, node, "FencingPass,Fencing") - - # Always target remote nodes by name, otherwise we would need to add - # an attribute to the remote node only during remote tests (we don't - # want nonexistent remote nodes showing up in the non-remote tests). - # That complexity is not worth the effort. - stl.level(1, remote_node, "FencingPass,Fencing") - - # Add the node (and its remote equivalent) to the list of levels-and nodes. - stt_nodes.extend([node, remote_node]) - - elif ftype == "levels-or ": - for n in [ node, remote_node ]: - stl.level(1, n, "FencingFail") - stl.level(2, n, "Fencing") - stf_nodes.extend([node, remote_node]) - - # If any levels-and nodes were targeted by attribute, - # create the attributes and a level for the attribute. - if attr_nodes: - stn = Nodes(self.Factory) - for (node_name, node_id) in list(attr_nodes.items()): - stn.add_node(node_name, node_id, { "cts-fencing" : "levels-and" }) - stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and") - - # Create a Dummy agent that always passes for levels-and - if len(stt_nodes): - stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith") - stt["pcmk_host_list"] = " ".join(stt_nodes) - # Wait this many seconds before doing anything, handy for letting disks get flushed too - stt["random_sleep_range"] = "30" - stt["mode"] = "pass" - stt.commit() - - # Create a Dummy agent that always fails for levels-or - if len(stf_nodes): - stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith") - stf["pcmk_host_list"] = " ".join(stf_nodes) - # Wait this many seconds before doing anything, handy for letting disks get flushed too - stf["random_sleep_range"] = "30" - stf["mode"] = "fail" - stf.commit() - - # Now commit the levels themselves - stl.commit() - - o = Option(self.Factory) - o["stonith-enabled"] = self.CM.Env["DoFencing"] - o["start-failure-is-fatal"] = "false" - o["pe-input-series-max"] = "5000" - o["shutdown-escalation"] = "5min" - o["batch-limit"] = "10" - o["dc-deadtime"] = "5s" - o["no-quorum-policy"] = no_quorum - - if self.CM.Env["DoBSC"]: - o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!" - - o.commit() - - o = OpDefaults(self.Factory) - o["timeout"] = "90s" - o.commit() - - # Commit the nodes section if we defined one - if stn is not None: - stn.commit() - - # Add an alerts section if possible - if self.Factory.rsh.exists_on_all(self.CM.Env["notification-agent"], self.CM.Env["nodes"]): - alerts = Alerts(self.Factory) - alerts.add_alert(self.CM.Env["notification-agent"], - self.CM.Env["notification-recipient"]) - alerts.commit() - - # Add resources? - if self.CM.Env["CIBResource"]: - self.add_resources() - - if self.CM.cluster_monitor == 1: - mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker") - mon.add_op("start", "0", requires="nothing") - mon.add_op("monitor", "5s", requires="nothing") - mon["update"] = "10" - mon["extra_options"] = "-r -n" - mon["user"] = "abeekhof" - mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html" - mon.commit() - - #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''') - - # generate cib - self.cts_cib = self._show() - - if self.Factory.tmpfile != BuildOptions.CIB_DIR + "/cib.xml": - self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile) - - return self.cts_cib - - def add_resources(self): - # Per-node resources - for node in self.CM.Env["nodes"]: - name = "rsc_"+node - r = self.NewIP(name) - r.prefer(node, "100") - r.commit() - - # Migrator - # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach - m = Resource(self.Factory, "migrator","Dummy", "ocf", "pacemaker") - m["passwd"] = "whatever" - m.add_meta("resource-stickiness","1") - m.add_meta("allow-migrate", "1") - m.add_op("monitor", "P10S") - m.commit() - - # Ping the test exerciser - p = Resource(self.Factory, "ping-1","ping", "ocf", "pacemaker") - p.add_op("monitor", "60s") - p["host_list"] = self.CM.Env["cts-exerciser"] - p["name"] = "connected" - p["debug"] = "true" - - c = Clone(self.Factory, "Connectivity", p) - c["globally-unique"] = "false" - c.commit() - - # promotable clone resource - s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker") - s.add_op("monitor", "15s", timeout="60s") - s.add_op("monitor", "16s", timeout="60s", role="Promoted") - ms = Clone(self.Factory, "promotable-1", s) - ms["promotable"] = "true" - ms["clone-max"] = self.num_nodes - ms["clone-node-max"] = 1 - ms["promoted-max"] = 1 - ms["promoted-node-max"] = 1 - - # Require connectivity to run the promotable clone - r = Rule(self.Factory, "connected", "-INFINITY", op="or") - r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1")) - r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None)) - ms.prefer("connected", rule=r) - - ms.commit() - - # Group Resource - g = Group(self.Factory, "group-1") - g.add_child(self.NewIP()) - - if self.CM.Env["have_systemd"]: - sysd = Resource(self.Factory, "petulant", - "pacemaker-cts-dummyd@10", "service") - sysd.add_op("monitor", "P10S") - g.add_child(sysd) - else: - g.add_child(self.NewIP()) - - g.add_child(self.NewIP()) - - # Make group depend on the promotable clone - g.after("promotable-1", first="promote", then="start") - g.colocate("promotable-1", "INFINITY", withrole="Promoted") - - g.commit() - - # LSB resource - lsb = Resource(self.Factory, "lsb-dummy", "LSBDummy", "lsb") - lsb.add_op("monitor", "5s") - - # LSB with group - lsb.after("group-1") - lsb.colocate("group-1") - - lsb.commit() - - -class CIB20(CIB12): - version = "pacemaker-2.5" - -class CIB30(CIB12): - version = "pacemaker-3.7" - -#class HASI(CIB10): -# def add_resources(self): -# # DLM resource -# self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''') -# self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''') - - # O2CB resource -# self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''') -# self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''') -# self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''') -# self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''') - - -class ConfigFactory(object): - def __init__(self, CM): - self.CM = CM - self.rsh = self.CM.rsh - self.register("pacemaker12", CIB12, CM, self) - self.register("pacemaker20", CIB20, CM, self) - self.register("pacemaker30", CIB30, CM, self) -# self.register("hae", HASI, CM, self) - if not self.CM.Env["ListTests"]: - self.target = self.CM.Env["nodes"][0] - self.tmpfile = None - - def log(self, args): - self.CM.log("cib: %s" % args) - - def debug(self, args): - self.CM.debug("cib: %s" % args) - - def register(self, methodName, constructor, *args, **kargs): - """register a constructor""" - _args = [constructor] - _args.extend(args) - setattr(self, methodName, ConfigFactoryItem(*_args, **kargs)) - - def unregister(self, methodName): - """unregister a constructor""" - delattr(self, methodName) - - def createConfig(self, name="pacemaker-1.0"): - if name == "pacemaker-1.0": - name = "pacemaker10"; - elif name == "pacemaker-1.2": - name = "pacemaker12"; - elif name == "pacemaker-2.0": - name = "pacemaker20"; - elif name.startswith("pacemaker-3."): - name = "pacemaker30"; - elif name == "hasi": - name = "hae"; - - if hasattr(self, name): - return getattr(self, name)() - else: - self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name) - - return self.pacemaker30() - - -class ConfigFactoryItem(object): - def __init__(self, function, *args, **kargs): - self._function = function - self._args = args - self._kargs = kargs - - def __call__(self, *args, **kargs): - """call function""" - _args = list(self._args) - _args.extend(args) - _kargs = self._kargs.copy() - _kargs.update(kargs) - return self._function(*_args,**_kargs) - -if __name__ == '__main__': - """ Unit test (pass cluster node names as command line arguments) """ - - import cts.CM_corosync - import sys - - if len(sys.argv) < 2: - print("Usage: %s ..." % sys.argv[0]) - sys.exit(1) - - args = [ - "--nodes", " ".join(sys.argv[1:]), - "--clobber-cib", - "--populate-resources", - "--stack", "corosync", - "--test-ip-base", "fe80::1234:56:7890:1000", - "--stonith", "rhcs", - ] - env = CtsLab(args) - cm = CM_corosync.crm_corosync() - CibFactory = ConfigFactory(cm) - cib = CibFactory.createConfig("pacemaker-3.0") - print(cib.contents()) diff --git a/cts/lab/CM_corosync.py b/cts/lab/CM_corosync.py deleted file mode 100644 index dce7e98..0000000 --- a/cts/lab/CM_corosync.py +++ /dev/null @@ -1,60 +0,0 @@ -""" Corosync-specific class for Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = "Copyright 2007-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -from cts.ClusterManager import ClusterManager - -from pacemaker._cts.CTS import Process -from pacemaker._cts.patterns import PatternSelector - -class crm_corosync(ClusterManager): - ''' - Corosync version 2 cluster manager class - ''' - def __init__(self, name=None): - if not name: name="crm-corosync" - ClusterManager.__init__(self) - - self.fullcomplist = {} - self.templates = PatternSelector(self.name) - - def Components(self): - complist = [] - if not len(list(self.fullcomplist.keys())): - for c in [ "pacemaker-based", "pacemaker-controld", "pacemaker-attrd", "pacemaker-execd", "pacemaker-fenced" ]: - self.fullcomplist[c] = Process( - self, c, - pats = self.templates.get_component(c), - badnews_ignore = self.templates.get_component("%s-ignore" % c) + - self.templates.get_component("common-ignore")) - - # the scheduler uses dc_pats instead of pats - self.fullcomplist["pacemaker-schedulerd"] = Process( - self, "pacemaker-schedulerd", - dc_pats = self.templates.get_component("pacemaker-schedulerd"), - badnews_ignore = self.templates.get_component("pacemaker-schedulerd-ignore") + - self.templates.get_component("common-ignore")) - - # add (or replace) extra components - self.fullcomplist["corosync"] = Process( - self, "corosync", - pats = self.templates.get_component("corosync"), - badnews_ignore = self.templates.get_component("corosync-ignore") + - self.templates.get_component("common-ignore") - ) - - # Processes running under valgrind can't be shot with "killall -9 processname", - # so don't include them in the returned list - vgrind = self.Env["valgrind-procs"].split() - for key in list(self.fullcomplist.keys()): - if self.Env["valgrind-tests"]: - if key in vgrind: - self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) - continue - if key == "pacemaker-fenced" and not self.Env["DoFencing"]: - continue - complist.append(self.fullcomplist[key]) - - return complist diff --git a/cts/lab/CTSaudits.py b/cts/lab/CTSaudits.py deleted file mode 100755 index 51a04f8..0000000 --- a/cts/lab/CTSaudits.py +++ /dev/null @@ -1,879 +0,0 @@ -""" Auditing classes for Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import time, re, uuid - -from pacemaker.buildoptions import BuildOptions -from pacemaker._cts.watcher import LogKind, LogWatcher - -class ClusterAudit(object): - - def __init__(self, cm): - self.CM = cm - - def __call__(self): - raise ValueError("Abstract Class member (__call__)") - - def is_applicable(self): - '''Return TRUE if we are applicable in the current test configuration''' - raise ValueError("Abstract Class member (is_applicable)") - return 1 - - def log(self, args): - self.CM.log("audit: %s" % args) - - def debug(self, args): - self.CM.debug("audit: %s" % args) - - def name(self): - raise ValueError("Abstract Class member (name)") - -AllAuditClasses = [ ] - - -class LogAudit(ClusterAudit): - - def name(self): - return "LogAudit" - - def __init__(self, cm): - self.CM = cm - - def RestartClusterLogging(self, nodes=None): - if not nodes: - nodes = self.CM.Env["nodes"] - - self.CM.debug("Restarting logging on: %s" % repr(nodes)) - - for node in nodes: - if self.CM.Env["have_systemd"]: - (rc, _) = self.CM.rsh(node, "systemctl stop systemd-journald.socket") - if rc != 0: - self.CM.log ("ERROR: Cannot stop 'systemd-journald' on %s" % node) - - (rc, _) = self.CM.rsh(node, "systemctl start systemd-journald.service") - if rc != 0: - self.CM.log ("ERROR: Cannot start 'systemd-journald' on %s" % node) - - (rc, _) = self.CM.rsh(node, "service %s restart" % self.CM.Env["syslogd"]) - if rc != 0: - self.CM.log ("ERROR: Cannot restart '%s' on %s" % (self.CM.Env["syslogd"], node)) - - def _create_watcher(self, patterns, kind): - watch = LogWatcher(self.CM.Env["LogFileName"], patterns, - self.CM.Env["nodes"], kind, "LogAudit", 5, - silent=True) - watch.set_watch() - return watch - - def TestLogging(self): - patterns = [] - prefix = "Test message from" - suffix = str(uuid.uuid4()) - watch = {} - - for node in self.CM.Env["nodes"]: - # Look for the node name in two places to make sure - # that syslog is logging with the correct hostname - m = re.search("^([^.]+).*", node) - if m: - simple = m.group(1) - else: - simple = node - patterns.append("%s.*%s %s %s" % (simple, prefix, node, suffix)) - - watch_pref = self.CM.Env["LogWatcher"] - if watch_pref == LogKind.ANY: - kinds = [ LogKind.FILE ] - if self.CM.Env["have_systemd"]: - kinds += [ LogKind.JOURNAL ] - kinds += [ LogKind.REMOTE_FILE ] - for k in kinds: - watch[k] = self._create_watcher(patterns, k) - self.CM.log("Logging test message with identifier %s" % (suffix)) - else: - watch[watch_pref] = self._create_watcher(patterns, watch_pref) - - for node in self.CM.Env["nodes"]: - cmd = "logger -p %s.info %s %s %s" % (self.CM.Env["SyslogFacility"], prefix, node, suffix) - - (rc, _) = self.CM.rsh(node, cmd, synchronous=False, verbose=0) - if rc != 0: - self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node)) - - for k in list(watch.keys()): - w = watch[k] - if watch_pref == LogKind.ANY: - self.CM.log("Checking for test message in %s logs" % (k)) - w.look_for_all(silent=True) - if w.unmatched: - for regex in w.unmatched: - self.CM.log("Test message [%s] not found in %s logs" % (regex, w.kind)) - else: - if watch_pref == LogKind.ANY: - self.CM.log("Found test message in %s logs" % (k)) - self.CM.Env["LogWatcher"] = k - return 1 - - return 0 - - def __call__(self): - max = 3 - attempt = 0 - - self.CM.ns.wait_for_all_nodes(self.CM.Env["nodes"]) - while attempt <= max and self.TestLogging() == 0: - attempt = attempt + 1 - self.RestartClusterLogging() - time.sleep(60*attempt) - - if attempt > max: - self.CM.log("ERROR: Cluster logging unrecoverable.") - return 0 - - return 1 - - def is_applicable(self): - if self.CM.Env["DoBSC"]: - return 0 - if self.CM.Env["LogAuditDisabled"]: - return 0 - return 1 - - -class DiskAudit(ClusterAudit): - - def name(self): - return "DiskspaceAudit" - - def __init__(self, cm): - self.CM = cm - - def __call__(self): - result = 1 - # @TODO Use directory of PCMK_logfile if set on host - dfcmd = "df -BM " + BuildOptions.LOG_DIR + " | tail -1 | awk '{print $(NF-1)\" \"$(NF-2)}' | tr -d 'M%'" - - self.CM.ns.wait_for_all_nodes(self.CM.Env["nodes"]) - for node in self.CM.Env["nodes"]: - (_, dfout) = self.CM.rsh(node, dfcmd, verbose=1) - if not dfout: - self.CM.log ("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node)) - else: - dfout = dfout[0].strip() - - try: - (used, remain) = dfout.split() - used_percent = int(used) - remaining_mb = int(remain) - except (ValueError, TypeError): - self.CM.log("Warning: df output '%s' from %s was invalid [%s, %s]" - % (dfout, node, used, remain)) - else: - if remaining_mb < 10 or used_percent > 95: - self.CM.log("CRIT: Out of log disk space on %s (%d%% / %dMB)" - % (node, used_percent, remaining_mb)) - result = None - if self.CM.Env["continue"]: - answer = "Y" - else: - try: - answer = input('Continue? [nY]') - except EOFError as e: - answer = "n" - - if answer and answer == "n": - raise ValueError("Disk full on %s" % (node)) - - elif remaining_mb < 100 or used_percent > 90: - self.CM.log("WARN: Low on log disk space (%dMB) on %s" % (remaining_mb, node)) - return result - - def is_applicable(self): - if self.CM.Env["DoBSC"]: - return 0 - return 1 - - -class FileAudit(ClusterAudit): - - def name(self): - return "FileAudit" - - def __init__(self, cm): - self.CM = cm - self.known = [] - - def __call__(self): - result = 1 - - self.CM.ns.wait_for_all_nodes(self.CM.Env["nodes"]) - for node in self.CM.Env["nodes"]: - - (_, lsout) = self.CM.rsh(node, "ls -al /var/lib/pacemaker/cores/* | grep core.[0-9]", verbose=1) - for line in lsout: - line = line.strip() - if line not in self.known: - result = 0 - self.known.append(line) - self.CM.log("Warning: Pacemaker core file on %s: %s" % (node, line)) - - (_, lsout) = self.CM.rsh(node, "ls -al /var/lib/corosync | grep core.[0-9]", verbose=1) - for line in lsout: - line = line.strip() - if line not in self.known: - result = 0 - self.known.append(line) - self.CM.log("Warning: Corosync core file on %s: %s" % (node, line)) - - if node in self.CM.ShouldBeStatus and self.CM.ShouldBeStatus[node] == "down": - clean = 0 - (_, lsout) = self.CM.rsh(node, "ls -al /dev/shm | grep qb-", verbose=1) - for line in lsout: - result = 0 - clean = 1 - self.CM.log("Warning: Stale IPC file on %s: %s" % (node, line)) - - if clean: - (_, lsout) = self.CM.rsh(node, "ps axf | grep -e pacemaker -e corosync", verbose=1) - for line in lsout: - self.CM.debug("ps[%s]: %s" % (node, line)) - - self.CM.rsh(node, "rm -rf /dev/shm/qb-*") - - else: - self.CM.debug("Skipping %s" % node) - - return result - - def is_applicable(self): - return 1 - - -class AuditResource(object): - def __init__(self, cm, line): - fields = line.split() - self.CM = cm - self.line = line - self.type = fields[1] - self.id = fields[2] - self.clone_id = fields[3] - self.parent = fields[4] - self.rprovider = fields[5] - self.rclass = fields[6] - self.rtype = fields[7] - self.host = fields[8] - self.needs_quorum = fields[9] - self.flags = int(fields[10]) - self.flags_s = fields[11] - - if self.parent == "NA": - self.parent = None - - def unique(self): - if self.flags & int("0x00000020", 16): - return 1 - return 0 - - def orphan(self): - if self.flags & int("0x00000001", 16): - return 1 - return 0 - - def managed(self): - if self.flags & int("0x00000002", 16): - return 1 - return 0 - - -class AuditConstraint(object): - def __init__(self, cm, line): - fields = line.split() - self.CM = cm - self.line = line - self.type = fields[1] - self.id = fields[2] - self.rsc = fields[3] - self.target = fields[4] - self.score = fields[5] - self.rsc_role = fields[6] - self.target_role = fields[7] - - if self.rsc_role == "NA": - self.rsc_role = None - if self.target_role == "NA": - self.target_role = None - - -class PrimitiveAudit(ClusterAudit): - def name(self): - return "PrimitiveAudit" - - def __init__(self, cm): - self.CM = cm - - def doResourceAudit(self, resource, quorum): - rc = 1 - active = self.CM.ResourceLocation(resource.id) - - if len(active) == 1: - if quorum: - self.debug("Resource %s active on %s" % (resource.id, repr(active))) - - elif resource.needs_quorum == 1: - self.CM.log("Resource %s active without quorum: %s" - % (resource.id, repr(active))) - rc = 0 - - elif not resource.managed(): - self.CM.log("Resource %s not managed. Active on %s" - % (resource.id, repr(active))) - - elif not resource.unique(): - # TODO: Figure out a clever way to actually audit these resource types - if len(active) > 1: - self.debug("Non-unique resource %s is active on: %s" - % (resource.id, repr(active))) - else: - self.debug("Non-unique resource %s is not active" % resource.id) - - elif len(active) > 1: - self.CM.log("Resource %s is active multiple times: %s" - % (resource.id, repr(active))) - rc = 0 - - elif resource.orphan(): - self.debug("Resource %s is an inactive orphan" % resource.id) - - elif len(self.inactive_nodes) == 0: - self.CM.log("WARN: Resource %s not served anywhere" % resource.id) - rc = 0 - - elif self.CM.Env["warn-inactive"]: - if quorum or not resource.needs_quorum: - self.CM.log("WARN: Resource %s not served anywhere (Inactive nodes: %s)" - % (resource.id, repr(self.inactive_nodes))) - else: - self.debug("Resource %s not served anywhere (Inactive nodes: %s)" - % (resource.id, repr(self.inactive_nodes))) - - elif quorum or not resource.needs_quorum: - self.debug("Resource %s not served anywhere (Inactive nodes: %s)" - % (resource.id, repr(self.inactive_nodes))) - - return rc - - def setup(self): - self.target = None - self.resources = [] - self.constraints = [] - self.active_nodes = [] - self.inactive_nodes = [] - - for node in self.CM.Env["nodes"]: - if self.CM.ShouldBeStatus[node] == "up": - self.active_nodes.append(node) - else: - self.inactive_nodes.append(node) - - for node in self.CM.Env["nodes"]: - if self.target == None and self.CM.ShouldBeStatus[node] == "up": - self.target = node - - if not self.target: - # TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource - # with CIB_file=/path/to/cib.xml even when the cluster isn't running - self.debug("No nodes active - skipping %s" % self.name()) - return 0 - - (_, lines) = self.CM.rsh(self.target, "crm_resource -c", verbose=1) - - for line in lines: - if re.search("^Resource", line): - self.resources.append(AuditResource(self.CM, line)) - elif re.search("^Constraint", line): - self.constraints.append(AuditConstraint(self.CM, line)) - else: - self.CM.log("Unknown entry: %s" % line); - - return 1 - - def __call__(self): - rc = 1 - - if not self.setup(): - return 1 - - quorum = self.CM.HasQuorum(None) - for resource in self.resources: - if resource.type == "primitive": - if self.doResourceAudit(resource, quorum) == 0: - rc = 0 - return rc - - def is_applicable(self): - # @TODO Due to long-ago refactoring, this name test would never match, - # so this audit (and those derived from it) would never run. - # Uncommenting the next lines fixes the name test, but that then - # exposes pre-existing bugs that need to be fixed. - #if self.CM["Name"] == "crm-corosync": - # return 1 - return 0 - - -class GroupAudit(PrimitiveAudit): - def name(self): - return "GroupAudit" - - def __call__(self): - rc = 1 - if not self.setup(): - return 1 - - for group in self.resources: - if group.type == "group": - first_match = 1 - group_location = None - for child in self.resources: - if child.parent == group.id: - nodes = self.CM.ResourceLocation(child.id) - - if first_match and len(nodes) > 0: - group_location = nodes[0] - - first_match = 0 - - if len(nodes) > 1: - rc = 0 - self.CM.log("Child %s of %s is active more than once: %s" - % (child.id, group.id, repr(nodes))) - - elif len(nodes) == 0: - # Groups are allowed to be partially active - # However we do need to make sure later children aren't running - group_location = None - self.debug("Child %s of %s is stopped" % (child.id, group.id)) - - elif nodes[0] != group_location: - rc = 0 - self.CM.log("Child %s of %s is active on the wrong node (%s) expected %s" - % (child.id, group.id, nodes[0], group_location)) - else: - self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0])) - - return rc - - -class CloneAudit(PrimitiveAudit): - def name(self): - return "CloneAudit" - - def __call__(self): - rc = 1 - if not self.setup(): - return 1 - - for clone in self.resources: - if clone.type == "clone": - for child in self.resources: - if child.parent == clone.id and child.type == "primitive": - self.debug("Checking child %s of %s..." % (child.id, clone.id)) - # Check max and node_max - # Obtain with: - # crm_resource -g clone_max --meta -r child.id - # crm_resource -g clone_node_max --meta -r child.id - - return rc - - -class ColocationAudit(PrimitiveAudit): - def name(self): - return "ColocationAudit" - - def crm_location(self, resource): - (rc, lines) = self.CM.rsh(self.target, "crm_resource -W -r %s -Q"%resource, verbose=1) - hosts = [] - if rc == 0: - for line in lines: - fields = line.split() - hosts.append(fields[0]) - - return hosts - - def __call__(self): - rc = 1 - if not self.setup(): - return 1 - - for coloc in self.constraints: - if coloc.type == "rsc_colocation": - source = self.crm_location(coloc.rsc) - target = self.crm_location(coloc.target) - if len(source) == 0: - self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc)) - else: - for node in source: - if not node in target: - rc = 0 - self.CM.log("Colocation audit (%s): %s running on %s (not in %s)" - % (coloc.id, coloc.rsc, node, repr(target))) - else: - self.debug("Colocation audit (%s): %s running on %s (in %s)" - % (coloc.id, coloc.rsc, node, repr(target))) - - return rc - - -class ControllerStateAudit(ClusterAudit): - def __init__(self, cm): - self.CM = cm - self.Stats = {"calls":0 - , "success":0 - , "failure":0 - , "skipped":0 - , "auditfail":0} - - def has_key(self, key): - return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value - - def __getitem__(self, key): - return self.Stats[key] - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' - if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - - def __call__(self): - passed = 1 - up_are_down = 0 - down_are_up = 0 - unstable_list = [] - - for node in self.CM.Env["nodes"]: - should_be = self.CM.ShouldBeStatus[node] - rc = self.CM.test_node_CM(node) - if rc > 0: - if should_be == "down": - down_are_up = down_are_up + 1 - if rc == 1: - unstable_list.append(node) - elif should_be == "up": - up_are_down = up_are_down + 1 - - if len(unstable_list) > 0: - passed = 0 - self.CM.log("Cluster is not stable: %d (of %d): %s" - % (len(unstable_list), self.CM.upcount(), repr(unstable_list))) - - if up_are_down > 0: - passed = 0 - self.CM.log("%d (of %d) nodes expected to be up were down." - % (up_are_down, len(self.CM.Env["nodes"]))) - - if down_are_up > 0: - passed = 0 - self.CM.log("%d (of %d) nodes expected to be down were up." - % (down_are_up, len(self.CM.Env["nodes"]))) - - return passed - - def name(self): - return "ControllerStateAudit" - - def is_applicable(self): - # @TODO Due to long-ago refactoring, this name test would never match, - # so this audit (and those derived from it) would never run. - # Uncommenting the next lines fixes the name test, but that then - # exposes pre-existing bugs that need to be fixed. - #if self.CM["Name"] == "crm-corosync": - # return 1 - return 0 - - -class CIBAudit(ClusterAudit): - def __init__(self, cm): - self.CM = cm - self.Stats = {"calls":0 - , "success":0 - , "failure":0 - , "skipped":0 - , "auditfail":0} - - def has_key(self, key): - return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value - - def __getitem__(self, key): - return self.Stats[key] - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' - if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - - def __call__(self): - passed = 1 - ccm_partitions = self.CM.find_partitions() - - if len(ccm_partitions) == 0: - self.debug("\tNo partitions to audit") - return 1 - - for partition in ccm_partitions: - self.debug("\tAuditing CIB consistency for: %s" % partition) - partition_passed = 0 - if self.audit_cib_contents(partition) == 0: - passed = 0 - - return passed - - def audit_cib_contents(self, hostlist): - passed = 1 - node0 = None - node0_xml = None - - partition_hosts = hostlist.split() - for node in partition_hosts: - node_xml = self.store_remote_cib(node, node0) - - if node_xml == None: - self.CM.log("Could not perform audit: No configuration from %s" % node) - passed = 0 - - elif node0 == None: - node0 = node - node0_xml = node_xml - - elif node0_xml == None: - self.CM.log("Could not perform audit: No configuration from %s" % node0) - passed = 0 - - else: - (rc, result) = self.CM.rsh( - node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), verbose=1) - - if rc != 0: - self.CM.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc)) - passed = 0 - - for line in result: - if not re.search("", line): - passed = 0 - self.debug("CibDiff[%s-%s]: %s" % (node0, node, line)) - else: - self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line)) - -# self.CM.rsh(node0, "rm -f %s" % node_xml) -# self.CM.rsh(node0, "rm -f %s" % node0_xml) - return passed - - def store_remote_cib(self, node, target): - combined = "" - filename = "/tmp/ctsaudit.%s.xml" % node - - if not target: - target = node - - (rc, lines) = self.CM.rsh(node, self.CM["CibQuery"], verbose=1) - if rc != 0: - self.CM.log("Could not retrieve configuration") - return None - - self.CM.rsh("localhost", "rm -f %s" % filename) - for line in lines: - self.CM.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename), verbose=0) - - if self.CM.rsh.copy(filename, "root@%s:%s" % (target, filename), silent=True) != 0: - self.CM.log("Could not store configuration") - return None - return filename - - def name(self): - return "CibAudit" - - def is_applicable(self): - # @TODO Due to long-ago refactoring, this name test would never match, - # so this audit (and those derived from it) would never run. - # Uncommenting the next lines fixes the name test, but that then - # exposes pre-existing bugs that need to be fixed. - #if self.CM["Name"] == "crm-corosync": - # return 1 - return 0 - - -class PartitionAudit(ClusterAudit): - def __init__(self, cm): - self.CM = cm - self.Stats = {"calls":0 - , "success":0 - , "failure":0 - , "skipped":0 - , "auditfail":0} - self.NodeEpoch = {} - self.NodeState = {} - self.NodeQuorum = {} - - def has_key(self, key): - return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value - - def __getitem__(self, key): - return self.Stats[key] - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' - if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - - def __call__(self): - passed = 1 - ccm_partitions = self.CM.find_partitions() - - if ccm_partitions == None or len(ccm_partitions) == 0: - return 1 - - self.CM.cluster_stable(double_check=True) - - if len(ccm_partitions) != self.CM.partitions_expected: - self.CM.log("ERROR: %d cluster partitions detected:" % len(ccm_partitions)) - passed = 0 - for partition in ccm_partitions: - self.CM.log("\t %s" % partition) - - for partition in ccm_partitions: - partition_passed = 0 - if self.audit_partition(partition) == 0: - passed = 0 - - return passed - - def trim_string(self, avalue): - if not avalue: - return None - if len(avalue) > 1: - return avalue[:-1] - - def trim2int(self, avalue): - if not avalue: - return None - if len(avalue) > 1: - return int(avalue[:-1]) - - def audit_partition(self, partition): - passed = 1 - dc_found = [] - dc_allowed_list = [] - lowest_epoch = None - node_list = partition.split() - - self.debug("Auditing partition: %s" % (partition)) - for node in node_list: - if self.CM.ShouldBeStatus[node] != "up": - self.CM.log("Warn: Node %s appeared out of nowhere" % (node)) - self.CM.ShouldBeStatus[node] = "up" - # not in itself a reason to fail the audit (not what we're - # checking for in this audit) - - (_, out) = self.CM.rsh(node, self.CM["StatusCmd"] % node, verbose=1) - self.NodeState[node] = out[0].strip() - - (_, out) = self.CM.rsh(node, self.CM["EpochCmd"], verbose=1) - self.NodeEpoch[node] = out[0].strip() - - (_, out) = self.CM.rsh(node, self.CM["QuorumCmd"], verbose=1) - self.NodeQuorum[node] = out[0].strip() - - self.debug("Node %s: %s - %s - %s." % (node, self.NodeState[node], self.NodeEpoch[node], self.NodeQuorum[node])) - self.NodeState[node] = self.trim_string(self.NodeState[node]) - self.NodeEpoch[node] = self.trim2int(self.NodeEpoch[node]) - self.NodeQuorum[node] = self.trim_string(self.NodeQuorum[node]) - - if not self.NodeEpoch[node]: - self.CM.log("Warn: Node %s dissappeared: cant determin epoch" % (node)) - self.CM.ShouldBeStatus[node] = "down" - # not in itself a reason to fail the audit (not what we're - # checking for in this audit) - elif lowest_epoch == None or self.NodeEpoch[node] < lowest_epoch: - lowest_epoch = self.NodeEpoch[node] - - if not lowest_epoch: - self.CM.log("Lowest epoch not determined in %s" % (partition)) - passed = 0 - - for node in node_list: - if self.CM.ShouldBeStatus[node] == "up": - if self.CM.is_node_dc(node, self.NodeState[node]): - dc_found.append(node) - if self.NodeEpoch[node] == lowest_epoch: - self.debug("%s: OK" % node) - elif not self.NodeEpoch[node]: - self.debug("Check on %s ignored: no node epoch" % node) - elif not lowest_epoch: - self.debug("Check on %s ignored: no lowest epoch" % node) - else: - self.CM.log("DC %s is not the oldest node (%d vs. %d)" - % (node, self.NodeEpoch[node], lowest_epoch)) - passed = 0 - - if len(dc_found) == 0: - self.CM.log("DC not found on any of the %d allowed nodes: %s (of %s)" - % (len(dc_allowed_list), str(dc_allowed_list), str(node_list))) - - elif len(dc_found) > 1: - self.CM.log("%d DCs (%s) found in cluster partition: %s" - % (len(dc_found), str(dc_found), str(node_list))) - passed = 0 - - if passed == 0: - for node in node_list: - if self.CM.ShouldBeStatus[node] == "up": - self.CM.log("epoch %s : %s" - % (self.NodeEpoch[node], self.NodeState[node])) - - return passed - - def name(self): - return "PartitionAudit" - - def is_applicable(self): - # @TODO Due to long-ago refactoring, this name test would never match, - # so this audit (and those derived from it) would never run. - # Uncommenting the next lines fixes the name test, but that then - # exposes pre-existing bugs that need to be fixed. - #if self.CM["Name"] == "crm-corosync": - # return 1 - return 0 - -AllAuditClasses.append(DiskAudit) -AllAuditClasses.append(FileAudit) -AllAuditClasses.append(LogAudit) -AllAuditClasses.append(ControllerStateAudit) -AllAuditClasses.append(PartitionAudit) -AllAuditClasses.append(PrimitiveAudit) -AllAuditClasses.append(GroupAudit) -AllAuditClasses.append(CloneAudit) -AllAuditClasses.append(ColocationAudit) -AllAuditClasses.append(CIBAudit) - - -def AuditList(cm): - result = [] - for auditclass in AllAuditClasses: - a = auditclass(cm) - if a.is_applicable(): - result.append(a) - return result diff --git a/cts/lab/CTSlab.py.in b/cts/lab/CTSlab.py.in deleted file mode 100644 index bd990fd..0000000 --- a/cts/lab/CTSlab.py.in +++ /dev/null @@ -1,135 +0,0 @@ -#!@PYTHON@ -""" Command-line interface to Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = "Copyright 2001-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import sys, signal, os - -pdir = os.path.dirname(sys.path[0]) -sys.path.insert(0, pdir) # So that things work from the source directory - -try: - from cts.CM_corosync import * - from cts.CTSaudits import AuditList - from cts.CTStests import TestList - from cts.CTSscenarios import * - - from pacemaker._cts.CTS import CtsLab - from pacemaker._cts.logging import LogFactory -except ImportError as e: - sys.stderr.write("abort: %s\n" % e) - sys.stderr.write("check your install and PYTHONPATH; couldn't find cts libraries in:\n%s\n" % - ' '.join(sys.path)) - sys.exit(1) - -# These are globals so they can be used by the signal handler. -scenario = None -LogFactory().add_stderr() - - -def sig_handler(signum, frame) : - LogFactory().log("Interrupted by signal %d"%signum) - if scenario: scenario.summarize() - if signum == 15 : - if scenario: scenario.TearDown() - sys.exit(1) - - -def plural_s(n, uppercase=False): - if n == 1: - return "" - elif uppercase: - return "S" - else: - return "s" - - -if __name__ == '__main__': - - Environment = CtsLab(sys.argv[1:]) - NumIter = Environment["iterations"] - Tests = [] - - # Set the signal handler - signal.signal(15, sig_handler) - signal.signal(10, sig_handler) - - # Create the Cluster Manager object - cm = None - if Environment["Stack"] == "corosync 2+": - cm = crm_corosync() - - else: - LogFactory().log("Unknown stack: "+Environment["stack"]) - sys.exit(1) - - if Environment["TruncateLog"]: - if Environment["OutputFile"] is None: - LogFactory().log("Ignoring truncate request because no output file specified") - else: - LogFactory().log("Truncating %s" % Environment["OutputFile"]) - with open(Environment["OutputFile"], "w") as outputfile: - outputfile.truncate(0) - - Audits = AuditList(cm) - - if Environment["ListTests"]: - Tests = TestList(cm, Audits) - LogFactory().log("Total %d tests"%len(Tests)) - for test in Tests : - LogFactory().log(str(test.name)); - sys.exit(0) - - elif len(Environment["tests"]) == 0: - Tests = TestList(cm, Audits) - - else: - Chosen = Environment["tests"] - for TestCase in Chosen: - match = None - - for test in TestList(cm, Audits): - if test.name == TestCase: - match = test - - if not match: - LogFactory().log("--choose: No applicable/valid tests chosen") - sys.exit(1) - else: - Tests.append(match) - - # Scenario selection - if Environment["scenario"] == "basic-sanity": - scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests) - - elif Environment["scenario"] == "all-once": - NumIter = len(Tests) - scenario = AllOnce( - cm, [ BootCluster(Environment) ], Audits, Tests) - elif Environment["scenario"] == "sequence": - scenario = Sequence( - cm, [ BootCluster(Environment) ], Audits, Tests) - elif Environment["scenario"] == "boot": - scenario = Boot(cm, [ LeaveBooted(Environment)], Audits, []) - else: - scenario = RandomTests( - cm, [ BootCluster(Environment) ], Audits, Tests) - - LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TEST" + plural_s(NumIter, True) + " ") - LogFactory().log("Stack: %s (%s)" % (Environment["Stack"], Environment["Name"])) - LogFactory().log("Schema: %s" % Environment["Schema"]) - LogFactory().log("Scenario: %s" % scenario.__doc__) - LogFactory().log("CTS Exerciser: %s" % Environment["cts-exerciser"]) - LogFactory().log("CTS Logfile: %s" % Environment["OutputFile"]) - LogFactory().log("Random Seed: %s" % Environment["RandSeed"]) - LogFactory().log("Syslog variant: %s" % Environment["syslogd"].strip()) - LogFactory().log("System log files: %s" % Environment["LogFileName"]) - if Environment.has_key("IPBase"): - LogFactory().log("Base IP for resources: %s" % Environment["IPBase"]) - LogFactory().log("Cluster starts at boot: %d" % Environment["at-boot"]) - - Environment.dump() - rc = Environment.run(scenario, NumIter) - sys.exit(rc) diff --git a/cts/lab/CTSscenarios.py b/cts/lab/CTSscenarios.py deleted file mode 100644 index 37cb094..0000000 --- a/cts/lab/CTSscenarios.py +++ /dev/null @@ -1,563 +0,0 @@ -""" Test scenario classes for Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import os -import re -import sys -import time - -from cts.CTStests import CTSTest -from cts.CTSaudits import ClusterAudit - -from pacemaker._cts.watcher import LogWatcher - -class ScenarioComponent(object): - - def __init__(self, Env): - self.Env = Env - - def IsApplicable(self): - '''Return True if the current ScenarioComponent is applicable - in the given LabEnvironment given to the constructor. - ''' - - raise ValueError("Abstract Class member (IsApplicable)") - - def SetUp(self, CM): - '''Set up the given ScenarioComponent''' - raise ValueError("Abstract Class member (Setup)") - - def TearDown(self, CM): - '''Tear down (undo) the given ScenarioComponent''' - raise ValueError("Abstract Class member (Setup)") - - -class Scenario(object): - ( -'''The basic idea of a scenario is that of an ordered list of -ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn, -and then after the tests have been run, they are torn down using TearDown() -(in reverse order). - -A Scenario is applicable to a particular cluster manager iff each -ScenarioComponent is applicable. - -A partially set up scenario is torn down if it fails during setup. -''') - - def __init__(self, ClusterManager, Components, Audits, Tests): - - "Initialize the Scenario from the list of ScenarioComponents" - - self.ClusterManager = ClusterManager - self.Components = Components - self.Audits = Audits - self.Tests = Tests - - self.BadNews = None - self.TestSets = [] - self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0} - self.Sets = [] - - #self.ns=CTS.NodeStatus(self.Env) - - for comp in Components: - if not issubclass(comp.__class__, ScenarioComponent): - raise ValueError("Init value must be subclass of ScenarioComponent") - - for audit in Audits: - if not issubclass(audit.__class__, ClusterAudit): - raise ValueError("Init value must be subclass of ClusterAudit") - - for test in Tests: - if not issubclass(test.__class__, CTSTest): - raise ValueError("Init value must be a subclass of CTSTest") - - def IsApplicable(self): - ( -'''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable() -''' - ) - - for comp in self.Components: - if not comp.IsApplicable(): - return None - return True - - def SetUp(self): - '''Set up the Scenario. Return TRUE on success.''' - - self.ClusterManager.prepare() - self.audit() # Also detects remote/local log config - self.ClusterManager.ns.wait_for_all_nodes(self.ClusterManager.Env["nodes"]) - - self.audit() - self.ClusterManager.install_support() - - self.BadNews = LogWatcher(self.ClusterManager.Env["LogFileName"], - self.ClusterManager.templates.get_patterns("BadNews"), - self.ClusterManager.Env["nodes"], - self.ClusterManager.Env["LogWatcher"], - "BadNews", 0) - self.BadNews.set_watch() # Call after we've figured out what type of log watching to do in LogAudit - - j = 0 - while j < len(self.Components): - if not self.Components[j].SetUp(self.ClusterManager): - # OOPS! We failed. Tear partial setups down. - self.audit() - self.ClusterManager.log("Tearing down partial setup") - self.TearDown(j) - return None - j = j + 1 - - self.audit() - return 1 - - def TearDown(self, max=None): - - '''Tear Down the Scenario - in reverse order.''' - - if max == None: - max = len(self.Components)-1 - j = max - while j >= 0: - self.Components[j].TearDown(self.ClusterManager) - j = j - 1 - - self.audit() - self.ClusterManager.install_support("uninstall") - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' - if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - - def run(self, Iterations): - self.ClusterManager.oprofileStart() - try: - self.run_loop(Iterations) - self.ClusterManager.oprofileStop() - except: - self.ClusterManager.oprofileStop() - raise - - def run_loop(self, Iterations): - raise ValueError("Abstract Class member (run_loop)") - - def run_test(self, test, testcount): - nodechoice = self.ClusterManager.Env.random_node() - - ret = 1 - where = "" - did_run = 0 - - self.ClusterManager.instance_errorstoignore_clear() - self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) + "[" + ("%d" % testcount).rjust(3) + "]") - - starttime = test.set_timer() - if not test.setup(nodechoice): - self.ClusterManager.log("Setup failed") - ret = 0 - - elif not test.canrunnow(nodechoice): - self.ClusterManager.log("Skipped") - test.skipped() - - else: - did_run = 1 - ret = test(nodechoice) - - if not test.teardown(nodechoice): - self.ClusterManager.log("Teardown failed") - if self.ClusterManager.Env["continue"]: - answer = "Y" - else: - try: - answer = input('Continue? [nY]') - except EOFError as e: - answer = "n" - if answer and answer == "n": - raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice)) - ret = 0 - - stoptime = time.time() - self.ClusterManager.oprofileSave(testcount) - - elapsed_time = stoptime - starttime - test_time = stoptime - test.get_timer() - if not test["min_time"]: - test["elapsed_time"] = elapsed_time - test["min_time"] = test_time - test["max_time"] = test_time - else: - test["elapsed_time"] = test["elapsed_time"] + elapsed_time - if test_time < test["min_time"]: - test["min_time"] = test_time - if test_time > test["max_time"]: - test["max_time"] = test_time - - if ret: - self.incr("success") - test.log_timer() - else: - self.incr("failure") - self.ClusterManager.statall() - did_run = 1 # Force the test count to be incremented anyway so test extraction works - - self.audit(test.errorstoignore()) - return did_run - - def summarize(self): - self.ClusterManager.log("****************") - self.ClusterManager.log("Overall Results:" + repr(self.Stats)) - self.ClusterManager.log("****************") - - stat_filter = { - "calls":0, - "failure":0, - "skipped":0, - "auditfail":0, - } - self.ClusterManager.log("Test Summary") - for test in self.Tests: - for key in list(stat_filter.keys()): - stat_filter[key] = test.Stats[key] - self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter)) - - self.ClusterManager.debug("Detailed Results") - for test in self.Tests: - self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats)) - - self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED") - - def audit(self, LocalIgnore=[]): - errcount = 0 - ignorelist = [] - ignorelist.append("CTS:") - ignorelist.extend(LocalIgnore) - ignorelist.extend(self.ClusterManager.errorstoignore()) - ignorelist.extend(self.ClusterManager.instance_errorstoignore()) - - # This makes sure everything is stabilized before starting... - failed = 0 - for audit in self.Audits: - if not audit(): - self.ClusterManager.log("Audit " + audit.name() + " FAILED.") - failed += 1 - else: - self.ClusterManager.debug("Audit " + audit.name() + " passed.") - - while errcount < 1000: - match = None - if self.BadNews: - match = self.BadNews.look(0) - - if match: - add_err = 1 - for ignore in ignorelist: - if add_err == 1 and re.search(ignore, match): - add_err = 0 - if add_err == 1: - self.ClusterManager.log("BadNews: " + match) - self.incr("BadNews") - errcount = errcount + 1 - else: - break - else: - if self.ClusterManager.Env["continue"]: - answer = "Y" - else: - try: - answer = input('Big problems. Continue? [nY]') - except EOFError as e: - answer = "n" - if answer and answer == "n": - self.ClusterManager.log("Shutting down.") - self.summarize() - self.TearDown() - raise ValueError("Looks like we hit a BadNews jackpot!") - - if self.BadNews: - self.BadNews.end() - return failed - - -class AllOnce(Scenario): - '''Every Test Once''' # Accessable as __doc__ - def run_loop(self, Iterations): - testcount = 1 - for test in self.Tests: - self.run_test(test, testcount) - testcount += 1 - - -class RandomTests(Scenario): - '''Random Test Execution''' - def run_loop(self, Iterations): - testcount = 1 - while testcount <= Iterations: - test = self.ClusterManager.Env.random_gen.choice(self.Tests) - self.run_test(test, testcount) - testcount += 1 - - -class BasicSanity(Scenario): - '''Basic Cluster Sanity''' - def run_loop(self, Iterations): - testcount = 1 - while testcount <= Iterations: - test = self.Environment.random_gen.choice(self.Tests) - self.run_test(test, testcount) - testcount += 1 - - -class Sequence(Scenario): - '''Named Tests in Sequence''' - def run_loop(self, Iterations): - testcount = 1 - while testcount <= Iterations: - for test in self.Tests: - self.run_test(test, testcount) - testcount += 1 - - -class Boot(Scenario): - '''Start the Cluster''' - def run_loop(self, Iterations): - testcount = 0 - - -class BootCluster(ScenarioComponent): - ( -'''BootCluster is the most basic of ScenarioComponents. -This ScenarioComponent simply starts the cluster manager on all the nodes. -It is fairly robust as it waits for all nodes to come up before starting -as they might have been rebooted or crashed for some reason beforehand. -''') - def __init__(self, Env): - pass - - def IsApplicable(self): - '''BootCluster is so generic it is always Applicable''' - return True - - def SetUp(self, CM): - '''Basic Cluster Manager startup. Start everything''' - - CM.prepare() - - # Clear out the cobwebs ;-) - CM.stopall(verbose=True, force=True) - - # Now start the Cluster Manager on all the nodes. - CM.log("Starting Cluster Manager on all nodes.") - return CM.startall(verbose=True, quick=True) - - def TearDown(self, CM, force=False): - '''Set up the given ScenarioComponent''' - - # Stop the cluster manager everywhere - - CM.log("Stopping Cluster Manager on all nodes") - return CM.stopall(verbose=True, force=force) - - -class LeaveBooted(BootCluster): - def TearDown(self, CM): - '''Set up the given ScenarioComponent''' - - # Stop the cluster manager everywhere - - CM.log("Leaving Cluster running on all nodes") - return 1 - - -class PingFest(ScenarioComponent): - ( -'''PingFest does a flood ping to each node in the cluster from the test machine. - -If the LabEnvironment Parameter PingSize is set, it will be used as the size -of ping packet requested (via the -s option). If it is not set, it defaults -to 1024 bytes. - -According to the manual page for ping: - Outputs packets as fast as they come back or one hundred times per - second, whichever is more. For every ECHO_REQUEST sent a period ``.'' - is printed, while for every ECHO_REPLY received a backspace is printed. - This provides a rapid display of how many packets are being dropped. - Only the super-user may use this option. This can be very hard on a net- - work and should be used with caution. -''' ) - - def __init__(self, Env): - self.Env = Env - - def IsApplicable(self): - '''PingFests are always applicable ;-) - ''' - - return True - - def SetUp(self, CM): - '''Start the PingFest!''' - - self.PingSize = 1024 - if "PingSize" in list(CM.Env.keys()): - self.PingSize = CM.Env["PingSize"] - - CM.log("Starting %d byte flood pings" % self.PingSize) - - self.PingPids = [] - for node in CM.Env["nodes"]: - self.PingPids.append(self._pingchild(node)) - - CM.log("Ping PIDs: " + repr(self.PingPids)) - return 1 - - def TearDown(self, CM): - '''Stop it right now! My ears are pinging!!''' - - for pid in self.PingPids: - if pid != None: - CM.log("Stopping ping process %d" % pid) - os.kill(pid, signal.SIGKILL) - - def _pingchild(self, node): - - Args = ["ping", "-qfn", "-s", str(self.PingSize), node] - - sys.stdin.flush() - sys.stdout.flush() - sys.stderr.flush() - pid = os.fork() - - if pid < 0: - self.Env.log("Cannot fork ping child") - return None - if pid > 0: - return pid - - # Otherwise, we're the child process. - - os.execvp("ping", Args) - self.Env.log("Cannot execvp ping: " + repr(Args)) - sys.exit(1) - - -class BasicSanityCheck(ScenarioComponent): - ( -''' -''') - - def IsApplicable(self): - return self.Env["DoBSC"] - - def SetUp(self, CM): - - CM.prepare() - - # Clear out the cobwebs - self.TearDown(CM) - - # Now start the Cluster Manager on all the nodes. - CM.log("Starting Cluster Manager on BSC node(s).") - return CM.startall() - - def TearDown(self, CM): - CM.log("Stopping Cluster Manager on BSC node(s).") - return CM.stopall() - - -class Benchmark(ScenarioComponent): - ( -''' -''') - - def IsApplicable(self): - return self.Env["benchmark"] - - def SetUp(self, CM): - - CM.prepare() - - # Clear out the cobwebs - self.TearDown(CM, force=True) - - # Now start the Cluster Manager on all the nodes. - CM.log("Starting Cluster Manager on all node(s).") - return CM.startall() - - def TearDown(self, CM): - CM.log("Stopping Cluster Manager on all node(s).") - return CM.stopall() - - -class RollingUpgrade(ScenarioComponent): - ( -''' -Test a rolling upgrade between two versions of the stack -''') - - def __init__(self, Env): - self.Env = Env - - def IsApplicable(self): - if not self.Env["rpm-dir"]: - return None - if not self.Env["current-version"]: - return None - if not self.Env["previous-version"]: - return None - - return True - - def install(self, node, version): - - target_dir = "/tmp/rpm-%s" % version - src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version) - - self.CM.rsh(node, "mkdir -p %s" % target_dir) - rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir)) - self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir)) - - return self.success() - - def upgrade(self, node): - return self.install(node, self.CM.Env["current-version"]) - - def downgrade(self, node): - return self.install(node, self.CM.Env["previous-version"]) - - def SetUp(self, CM): - print(repr(self)+"prepare") - CM.prepare() - - # Clear out the cobwebs - CM.stopall(force=True) - - CM.log("Downgrading all nodes to %s." % self.Env["previous-version"]) - - for node in self.Env["nodes"]: - if not self.downgrade(node): - CM.log("Couldn't downgrade %s" % node) - return None - - return 1 - - def TearDown(self, CM): - # Stop everything - CM.log("Stopping Cluster Manager on Upgrade nodes.") - CM.stopall() - - CM.log("Upgrading all nodes to %s." % self.Env["current-version"]) - for node in self.Env["nodes"]: - if not self.upgrade(node): - CM.log("Couldn't upgrade %s" % node) - return None - - return 1 diff --git a/cts/lab/CTStests.py b/cts/lab/CTStests.py deleted file mode 100644 index 61766ce..0000000 --- a/cts/lab/CTStests.py +++ /dev/null @@ -1,3178 +0,0 @@ -""" Test-specific classes for Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -# -# SPECIAL NOTE: -# -# Tests may NOT implement any cluster-manager-specific code in them. -# EXTEND the ClusterManager object to provide the base capabilities -# the test needs if you need to do something that the current CM classes -# do not. Otherwise you screw up the whole point of the object structure -# in CTS. -# -# Thank you. -# - -import os -import re -import time -import subprocess -import tempfile - -from stat import * -from cts.CTSaudits import * - -from pacemaker import BuildOptions -from pacemaker._cts.CTS import NodeStatus -from pacemaker._cts.environment import EnvFactory -from pacemaker._cts.logging import LogFactory -from pacemaker._cts.patterns import PatternSelector -from pacemaker._cts.remote import RemoteFactory -from pacemaker._cts.watcher import LogWatcher - -AllTestClasses = [ ] - - -class CTSTest(object): - ''' - A Cluster test. - We implement the basic set of properties and behaviors for a generic - cluster test. - - Cluster tests track their own statistics. - We keep each of the kinds of counts we track as separate {name,value} - pairs. - ''' - - def __init__(self, cm): - #self.name="the unnamed test" - self.Stats = {"calls":0 - , "success":0 - , "failure":0 - , "skipped":0 - , "auditfail":0} - -# if not issubclass(cm.__class__, ClusterManager): -# raise ValueError("Must be a ClusterManager object") - self.CM = cm - self.Env = EnvFactory().getInstance() - self.rsh = RemoteFactory().getInstance() - self.logger = LogFactory() - self.templates = PatternSelector(cm["Name"]) - self.Audits = [] - self.timeout = 120 - self.passed = 1 - self.is_loop = 0 - self.is_unsafe = 0 - self.is_experimental = 0 - self.is_container = 0 - self.is_valgrind = 0 - self.benchmark = 0 # which tests to benchmark - self.timer = {} # timers - - def log(self, args): - self.logger.log(args) - - def debug(self, args): - self.logger.debug(args) - - def has_key(self, key): - return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value - - def __getitem__(self, key): - if str(key) == "0": - raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") - - if key in self.Stats: - return self.Stats[key] - return None - - def log_mark(self, msg): - self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) - return - - def get_timer(self,key = "test"): - try: return self.timer[key] - except: return 0 - - def set_timer(self,key = "test"): - self.timer[key] = time.time() - return self.timer[key] - - def log_timer(self,key = "test"): - elapsed = 0 - if key in self.timer: - elapsed = time.time() - self.timer[key] - s = key == "test" and self.name or "%s:%s" % (self.name,key) - self.debug("%s runtime: %.2f" % (s, elapsed)) - del self.timer[key] - return elapsed - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' - if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - - # Reset the test passed boolean - if name == "calls": - self.passed = 1 - - def failure(self, reason="none"): - '''Increment the failure count''' - self.passed = 0 - self.incr("failure") - self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) - return None - - def success(self): - '''Increment the success count''' - self.incr("success") - return 1 - - def skipped(self): - '''Increment the skipped count''' - self.incr("skipped") - return 1 - - def __call__(self, node): - '''Perform the given test''' - raise ValueError("Abstract Class member (__call__)") - self.incr("calls") - return self.failure() - - def audit(self): - passed = 1 - if len(self.Audits) > 0: - for audit in self.Audits: - if not audit(): - self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) - self.incr("auditfail") - passed = 0 - return passed - - def setup(self, node): - '''Setup the given test''' - return self.success() - - def teardown(self, node): - '''Tear down the given test''' - return self.success() - - def create_watch(self, patterns, timeout, name=None): - if not name: - name = self.name - return LogWatcher(self.Env["LogFileName"], patterns, self.Env["nodes"], self.Env["LogWatcher"], name, timeout) - - def local_badnews(self, prefix, watch, local_ignore=[]): - errcount = 0 - if not prefix: - prefix = "LocalBadNews:" - - ignorelist = [] - ignorelist.append(" CTS: ") - ignorelist.append(prefix) - ignorelist.extend(local_ignore) - - while errcount < 100: - match = watch.look(0) - if match: - add_err = 1 - for ignore in ignorelist: - if add_err == 1 and re.search(ignore, match): - add_err = 0 - if add_err == 1: - self.logger.log(prefix + " " + match) - errcount = errcount + 1 - else: - break - else: - self.logger.log("Too many errors!") - - watch.end() - return errcount - - def is_applicable(self): - return self.is_applicable_common() - - def is_applicable_common(self): - '''Return True if we are applicable in the current test configuration''' - #raise ValueError("Abstract Class member (is_applicable)") - - if self.is_loop and not self.Env["loop-tests"]: - return False - elif self.is_unsafe and not self.Env["unsafe-tests"]: - return False - elif self.is_valgrind and not self.Env["valgrind-tests"]: - return False - elif self.is_experimental and not self.Env["experimental-tests"]: - return False - elif self.is_container and not self.Env["container-tests"]: - return False - elif self.Env["benchmark"] and self.benchmark == 0: - return False - - return True - - def find_ocfs2_resources(self, node): - self.r_o2cb = None - self.r_ocfs2 = [] - - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if re.search("^Resource", line): - r = AuditResource(self.CM, line) - if r.rtype == "o2cb" and r.parent != "NA": - self.debug("Found o2cb: %s" % self.r_o2cb) - self.r_o2cb = r.parent - if re.search("^Constraint", line): - c = AuditConstraint(self.CM, line) - if c.type == "rsc_colocation" and c.target == self.r_o2cb: - self.r_ocfs2.append(c.rsc) - - self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) - return len(self.r_ocfs2) - - def canrunnow(self, node): - '''Return TRUE if we can meaningfully run right now''' - return 1 - - def errorstoignore(self): - '''Return list of errors which are 'normal' and should be ignored''' - return [] - - -class StopTest(CTSTest): - '''Stop (deactivate) the cluster manager on a node''' - def __init__(self, cm): - CTSTest.__init__(self, cm) - self.name = "Stop" - - def __call__(self, node): - '''Perform the 'stop' test. ''' - self.incr("calls") - if self.CM.ShouldBeStatus[node] != "up": - return self.skipped() - - patterns = [] - # Technically we should always be able to notice ourselves stopping - patterns.append(self.templates["Pat:We_stopped"] % node) - - # Any active node needs to notice this one left - # (note that this won't work if we have multiple partitions) - for other in self.Env["nodes"]: - if self.CM.ShouldBeStatus[other] == "up" and other != node: - patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) - #self.debug("Checking %s will notice %s left"%(other, node)) - - watch = self.create_watch(patterns, self.Env["DeadTime"]) - watch.set_watch() - - if node == self.CM.OurNode: - self.incr("us") - else: - if self.CM.upcount() <= 1: - self.incr("all") - else: - self.incr("them") - - self.CM.StopaCM(node) - watch_result = watch.look_for_all() - - failreason = None - UnmatchedList = "||" - if watch.unmatched: - (_, output) = self.rsh(node, "/bin/ps axf", verbose=1) - for line in output: - self.debug(line) - - (_, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", verbose=1) - for line in output: - self.debug(line) - - for regex in watch.unmatched: - self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex)) - UnmatchedList += regex + "||"; - failreason = "Missing shutdown pattern" - - self.CM.cluster_stable(self.Env["DeadTime"]) - - if not watch.unmatched or self.CM.upcount() == 0: - return self.success() - - if len(watch.unmatched) >= self.CM.upcount(): - return self.failure("no match against (%s)" % UnmatchedList) - - if failreason == None: - return self.success() - else: - return self.failure(failreason) -# -# We don't register StopTest because it's better when called by -# another test... -# - - -class StartTest(CTSTest): - '''Start (activate) the cluster manager on a node''' - def __init__(self, cm, debug=None): - CTSTest.__init__(self,cm) - self.name = "start" - self.debug = debug - - def __call__(self, node): - '''Perform the 'start' test. ''' - self.incr("calls") - - if self.CM.upcount() == 0: - self.incr("us") - else: - self.incr("them") - - if self.CM.ShouldBeStatus[node] != "down": - return self.skipped() - elif self.CM.StartaCM(node): - return self.success() - else: - return self.failure("Startup %s on node %s failed" - % (self.Env["Name"], node)) - -# -# We don't register StartTest because it's better when called by -# another test... -# - - -class FlipTest(CTSTest): - '''If it's running, stop it. If it's stopped start it. - Overthrow the status quo... - ''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "Flip" - self.start = StartTest(cm) - self.stop = StopTest(cm) - - def __call__(self, node): - '''Perform the 'Flip' test. ''' - self.incr("calls") - if self.CM.ShouldBeStatus[node] == "up": - self.incr("stopped") - ret = self.stop(node) - type = "up->down" - # Give the cluster time to recognize it's gone... - time.sleep(self.Env["StableTime"]) - elif self.CM.ShouldBeStatus[node] == "down": - self.incr("started") - ret = self.start(node) - type = "down->up" - else: - return self.skipped() - - self.incr(type) - if ret: - return self.success() - else: - return self.failure("%s failure" % type) - -# Register FlipTest as a good test to run -AllTestClasses.append(FlipTest) - - -class RestartTest(CTSTest): - '''Stop and restart a node''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "Restart" - self.start = StartTest(cm) - self.stop = StopTest(cm) - self.benchmark = 1 - - def __call__(self, node): - '''Perform the 'restart' test. ''' - self.incr("calls") - - self.incr("node:" + node) - - ret1 = 1 - if self.CM.StataCM(node): - self.incr("WasStopped") - if not self.start(node): - return self.failure("start (setup) failure: "+node) - - self.set_timer() - if not self.stop(node): - return self.failure("stop failure: "+node) - if not self.start(node): - return self.failure("start failure: "+node) - return self.success() - -# Register RestartTest as a good test to run -AllTestClasses.append(RestartTest) - - -class StonithdTest(CTSTest): - def __init__(self, cm): - CTSTest.__init__(self, cm) - self.name = "Stonithd" - self.startall = SimulStartLite(cm) - self.benchmark = 1 - - def __call__(self, node): - self.incr("calls") - if len(self.Env["nodes"]) < 2: - return self.skipped() - - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - is_dc = self.CM.is_node_dc(node) - - watchpats = [] - watchpats.append(self.templates["Pat:Fencing_ok"] % node) - watchpats.append(self.templates["Pat:NodeFenced"] % node) - - if not self.Env["at-boot"]: - self.debug("Expecting %s to stay down" % node) - self.CM.ShouldBeStatus[node] = "down" - else: - self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"])) - watchpats.append("%s.* S_STARTING -> S_PENDING" % node) - watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node) - - watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) - watch.set_watch() - - origin = self.Env.random_gen.choice(self.Env["nodes"]) - - (rc, _) = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) - - if rc == 124: # CRM_EX_TIMEOUT - # Look for the patterns, usually this means the required - # device was running on the node to be fenced - or that - # the required devices were in the process of being loaded - # and/or moved - # - # Effectively the node committed suicide so there will be - # no confirmation, but pacemaker should be watching and - # fence the node again - - self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) - - elif origin != node and rc != 0: - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - - self.debug("Waiting for fenced node to come back up") - self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600) - - self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) - - elif origin == node and rc != 255: - # 255 == broken pipe, ie. the node was fenced as expected - self.logger.log("Locally originated fencing returned %d" % rc) - - self.set_timer("fence") - matched = watch.look_for_all() - self.log_timer("fence") - self.set_timer("reform") - if watch.unmatched: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - - self.debug("Waiting for fenced node to come back up") - self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600) - - self.debug("Waiting for the cluster to re-stabilize with all nodes") - is_stable = self.CM.cluster_stable(self.Env["StartTime"]) - - if not matched: - return self.failure("Didn't find all expected patterns") - elif not is_stable: - return self.failure("Cluster did not become stable") - - self.log_timer("reform") - return self.success() - - def errorstoignore(self): - return [ - self.templates["Pat:Fencing_start"] % ".*", - self.templates["Pat:Fencing_ok"] % ".*", - self.templates["Pat:Fencing_active"], - r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired", - ] - - def is_applicable(self): - if not self.is_applicable_common(): - return False - - if "DoFencing" in list(self.Env.keys()): - return self.Env["DoFencing"] - - return True - -AllTestClasses.append(StonithdTest) - - -class StartOnebyOne(CTSTest): - '''Start all the nodes ~ one by one''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "StartOnebyOne" - self.stopall = SimulStopLite(cm) - self.start = StartTest(cm) - self.ns = NodeStatus(cm.Env) - - def __call__(self, dummy): - '''Perform the 'StartOnebyOne' test. ''' - self.incr("calls") - - # We ignore the "node" parameter... - - # Shut down all the nodes... - ret = self.stopall(None) - if not ret: - return self.failure("Test setup failed") - - failed = [] - self.set_timer() - for node in self.Env["nodes"]: - if not self.start(node): - failed.append(node) - - if len(failed) > 0: - return self.failure("Some node failed to start: " + repr(failed)) - - return self.success() - -# Register StartOnebyOne as a good test to run -AllTestClasses.append(StartOnebyOne) - - -class SimulStart(CTSTest): - '''Start all the nodes ~ simultaneously''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "SimulStart" - self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) - - def __call__(self, dummy): - '''Perform the 'SimulStart' test. ''' - self.incr("calls") - - # We ignore the "node" parameter... - - # Shut down all the nodes... - ret = self.stopall(None) - if not ret: - return self.failure("Setup failed") - - if not self.startall(None): - return self.failure("Startall failed") - - return self.success() - -# Register SimulStart as a good test to run -AllTestClasses.append(SimulStart) - - -class SimulStop(CTSTest): - '''Stop all the nodes ~ simultaneously''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "SimulStop" - self.startall = SimulStartLite(cm) - self.stopall = SimulStopLite(cm) - - def __call__(self, dummy): - '''Perform the 'SimulStop' test. ''' - self.incr("calls") - - # We ignore the "node" parameter... - - # Start up all the nodes... - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - if not self.stopall(None): - return self.failure("Stopall failed") - - return self.success() - -# Register SimulStop as a good test to run -AllTestClasses.append(SimulStop) - - -class StopOnebyOne(CTSTest): - '''Stop all the nodes in order''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "StopOnebyOne" - self.startall = SimulStartLite(cm) - self.stop = StopTest(cm) - - def __call__(self, dummy): - '''Perform the 'StopOnebyOne' test. ''' - self.incr("calls") - - # We ignore the "node" parameter... - - # Start up all the nodes... - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - failed = [] - self.set_timer() - for node in self.Env["nodes"]: - if not self.stop(node): - failed.append(node) - - if len(failed) > 0: - return self.failure("Some node failed to stop: " + repr(failed)) - - return self.success() - -# Register StopOnebyOne as a good test to run -AllTestClasses.append(StopOnebyOne) - - -class RestartOnebyOne(CTSTest): - '''Restart all the nodes in order''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "RestartOnebyOne" - self.startall = SimulStartLite(cm) - - def __call__(self, dummy): - '''Perform the 'RestartOnebyOne' test. ''' - self.incr("calls") - - # We ignore the "node" parameter... - - # Start up all the nodes... - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - did_fail = [] - self.set_timer() - self.restart = RestartTest(self.CM) - for node in self.Env["nodes"]: - if not self.restart(node): - did_fail.append(node) - - if did_fail: - return self.failure("Could not restart %d nodes: %s" - % (len(did_fail), repr(did_fail))) - return self.success() - -# Register StopOnebyOne as a good test to run -AllTestClasses.append(RestartOnebyOne) - - -class PartialStart(CTSTest): - '''Start a node - but tell it to stop before it finishes starting up''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "PartialStart" - self.startall = SimulStartLite(cm) - self.stopall = SimulStopLite(cm) - self.stop = StopTest(cm) - #self.is_unsafe = 1 - - def __call__(self, node): - '''Perform the 'PartialStart' test. ''' - self.incr("calls") - - ret = self.stopall(None) - if not ret: - return self.failure("Setup failed") - - watchpats = [] - watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure") - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) - watch.set_watch() - - self.CM.StartaCMnoBlock(node) - ret = watch.look_for_all() - if not ret: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) - return self.failure("Setup of %s failed" % node) - - ret = self.stop(node) - if not ret: - return self.failure("%s did not stop in time" % node) - - return self.success() - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - - # We might do some fencing in the 2-node case if we make it up far enough - return [ - r"Executing reboot fencing operation", - r"Requesting fencing \([^)]+\) targeting node ", - ] - -# Register StopOnebyOne as a good test to run -AllTestClasses.append(PartialStart) - - -class StandbyTest(CTSTest): - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "Standby" - self.benchmark = 1 - - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - - # make sure the node is active - # set the node to standby mode - # check resources, none resource should be running on the node - # set the node to active mode - # check resouces, resources should have been migrated back (SHOULD THEY?) - - def __call__(self, node): - - self.incr("calls") - ret = self.startall(None) - if not ret: - return self.failure("Start all nodes failed") - - self.debug("Make sure node %s is active" % node) - if self.CM.StandbyStatus(node) != "off": - if not self.CM.SetStandbyMode(node, "off"): - return self.failure("can't set node %s to active mode" % node) - - self.CM.cluster_stable() - - status = self.CM.StandbyStatus(node) - if status != "off": - return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) - - self.debug("Getting resources running on node %s" % node) - rsc_on_node = self.CM.active_resources(node) - - watchpats = [] - watchpats.append(r"State transition .* -> S_POLICY_ENGINE") - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) - watch.set_watch() - - self.debug("Setting node %s to standby mode" % node) - if not self.CM.SetStandbyMode(node, "on"): - return self.failure("can't set node %s to standby mode" % node) - - self.set_timer("on") - - ret = watch.look_for_all() - if not ret: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) - self.CM.SetStandbyMode(node, "off") - return self.failure("cluster didn't react to standby change on %s" % node) - - self.CM.cluster_stable() - - status = self.CM.StandbyStatus(node) - if status != "on": - return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) - self.log_timer("on") - - self.debug("Checking resources") - bad_run = self.CM.active_resources(node) - if len(bad_run) > 0: - rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) - self.debug("Setting node %s to active mode" % node) - self.CM.SetStandbyMode(node, "off") - return rc - - self.debug("Setting node %s to active mode" % node) - if not self.CM.SetStandbyMode(node, "off"): - return self.failure("can't set node %s to active mode" % node) - - self.set_timer("off") - self.CM.cluster_stable() - - status = self.CM.StandbyStatus(node) - if status != "off": - return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) - self.log_timer("off") - - return self.success() - -AllTestClasses.append(StandbyTest) - - -class ValgrindTest(CTSTest): - '''Check for memory leaks''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "Valgrind" - self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) - self.is_valgrind = 1 - self.is_loop = 1 - - def setup(self, node): - self.incr("calls") - - ret = self.stopall(None) - if not ret: - return self.failure("Stop all nodes failed") - - # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind, - # and clear any valgrind logs from previous runs. For now, we rely on - # the user to do this manually. - - ret = self.startall(None) - if not ret: - return self.failure("Start all nodes failed") - - return self.success() - - def teardown(self, node): - # Return all nodes to normal - # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind - ret = self.stopall(None) - if not ret: - return self.failure("Stop all nodes failed") - - return self.success() - - def find_leaks(self): - # Check for leaks - # (no longer used but kept in case feature is restored) - leaked = [] - self.stop = StopTest(self.CM) - - for node in self.Env["nodes"]: - rc = self.stop(node) - if not rc: - self.failure("Couldn't shut down %s" % node) - - (rc, _) = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat) - if rc != 1: - leaked.append(node) - self.failure("Valgrind errors detected on %s" % node) - (_, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, verbose=1) - for line in output: - self.logger.log(line) - (_, output) = self.rsh(node, "cat %s" % self.logger.logPat, verbose=1) - for line in output: - self.debug(line) - - self.rsh(node, "rm -f %s" % self.logger.logPat, verbose=1) - return leaked - - def __call__(self, node): - #leaked = self.find_leaks() - #if len(leaked) > 0: - # return self.failure("Nodes %s leaked" % repr(leaked)) - - return self.success() - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", - r"pacemaker-based.*: .* avoid confusing Valgrind", - r"HA_VALGRIND_ENABLED", - ] - - -class StandbyLoopTest(ValgrindTest): - '''Check for memory leaks by putting a node in and out of standby for an hour''' - # @TODO This is not a useful test for memory leaks - def __init__(self, cm): - ValgrindTest.__init__(self,cm) - self.name = "StandbyLoop" - - def __call__(self, node): - - lpc = 0 - delay = 2 - failed = 0 - done = time.time() + self.Env["loop-minutes"] * 60 - while time.time() <= done and not failed: - lpc = lpc + 1 - - time.sleep(delay) - if not self.CM.SetStandbyMode(node, "on"): - self.failure("can't set node %s to standby mode" % node) - failed = lpc - - time.sleep(delay) - if not self.CM.SetStandbyMode(node, "off"): - self.failure("can't set node %s to active mode" % node) - failed = lpc - - leaked = self.find_leaks() - if failed: - return self.failure("Iteration %d failed" % failed) - elif len(leaked) > 0: - return self.failure("Nodes %s leaked" % repr(leaked)) - - return self.success() - -#AllTestClasses.append(StandbyLoopTest) - - -class BandwidthTest(CTSTest): -# Tests should not be cluster-manager-specific -# If you need to find out cluster manager configuration to do this, then -# it should be added to the generic cluster manager API. - '''Test the bandwidth which the cluster uses''' - def __init__(self, cm): - CTSTest.__init__(self, cm) - self.name = "Bandwidth" - self.start = StartTest(cm) - self.__setitem__("min",0) - self.__setitem__("max",0) - self.__setitem__("totalbandwidth",0) - (handle, self.tempfile) = tempfile.mkstemp(".cts") - os.close(handle) - self.startall = SimulStartLite(cm) - - def __call__(self, node): - '''Perform the Bandwidth test''' - self.incr("calls") - - if self.CM.upcount() < 1: - return self.skipped() - - Path = self.CM.InternalCommConfig() - if "ip" not in Path["mediatype"]: - return self.skipped() - - port = Path["port"][0] - port = int(port) - - ret = self.startall(None) - if not ret: - return self.failure("Test setup failed") - time.sleep(5) # We get extra messages right after startup. - - fstmpfile = "/var/run/band_estimate" - dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ - % (port, fstmpfile) - - (rc, _) = self.rsh(node, dumpcmd) - if rc == 0: - farfile = "root@%s:%s" % (node, fstmpfile) - self.rsh.copy(farfile, self.tempfile) - Bandwidth = self.countbandwidth(self.tempfile) - if not Bandwidth: - self.logger.log("Could not compute bandwidth.") - return self.success() - intband = int(Bandwidth + 0.5) - self.logger.log("...bandwidth: %d bits/sec" % intband) - self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth - if self.Stats["min"] == 0: - self.Stats["min"] = Bandwidth - if Bandwidth > self.Stats["max"]: - self.Stats["max"] = Bandwidth - if Bandwidth < self.Stats["min"]: - self.Stats["min"] = Bandwidth - self.rsh(node, "rm -f %s" % fstmpfile) - os.unlink(self.tempfile) - return self.success() - else: - return self.failure("no response from tcpdump command [%d]!" % rc) - - def countbandwidth(self, file): - fp = open(file, "r") - fp.seek(0) - count = 0 - sum = 0 - while 1: - line = fp.readline() - if not line: - return None - if re.search("udp",line) or re.search("UDP,", line): - count = count + 1 - linesplit = line.split(" ") - for j in range(len(linesplit)-1): - if linesplit[j] == "udp": break - if linesplit[j] == "length:": break - - try: - sum = sum + int(linesplit[j+1]) - except ValueError: - self.logger.log("Invalid tcpdump line: %s" % line) - return None - T1 = linesplit[0] - timesplit = T1.split(":") - time2split = timesplit[2].split(".") - time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 - break - - while count < 100: - line = fp.readline() - if not line: - return None - if re.search("udp",line) or re.search("UDP,", line): - count = count+1 - linessplit = line.split(" ") - for j in range(len(linessplit)-1): - if linessplit[j] == "udp": break - if linessplit[j] == "length:": break - try: - sum = int(linessplit[j+1]) + sum - except ValueError: - self.logger.log("Invalid tcpdump line: %s" % line) - return None - - T2 = linessplit[0] - timesplit = T2.split(":") - time2split = timesplit[2].split(".") - time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 - time = time2-time1 - if (time <= 0): - return 0 - return int((sum*8)/time) - - def is_applicable(self): - '''BandwidthTest never applicable''' - return False - -AllTestClasses.append(BandwidthTest) - - -################################################################### -class MaintenanceMode(CTSTest): -################################################################### - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "MaintenanceMode" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - self.max = 30 - #self.is_unsafe = 1 - self.benchmark = 1 - self.action = "asyncmon" - self.interval = 0 - self.rid = "maintenanceDummy" - - def toggleMaintenanceMode(self, node, action): - pats = [] - pats.append(self.templates["Pat:DC_IDLE"]) - - # fail the resource right after turning Maintenance mode on - # verify it is not recovered until maintenance mode is turned off - if action == "On": - pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid)) - else: - pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) - pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) - - watch = self.create_watch(pats, 60) - watch.set_watch() - - self.debug("Turning maintenance mode %s" % action) - self.rsh(node, self.templates["MaintenanceMode%s" % (action)]) - if (action == "On"): - self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) - - self.set_timer("recover%s" % (action)) - watch.look_for_all() - self.log_timer("recover%s" % (action)) - if watch.unmatched: - self.debug("Failed to find patterns when turning maintenance mode %s" % action) - return repr(watch.unmatched) - - return "" - - def insertMaintenanceDummy(self, node): - pats = [] - pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid))) - - watch = self.create_watch(pats, 60) - watch.set_watch() - - self.CM.AddDummyRsc(node, self.rid) - - self.set_timer("addDummy") - watch.look_for_all() - self.log_timer("addDummy") - - if watch.unmatched: - self.debug("Failed to find patterns when adding maintenance dummy resource") - return repr(watch.unmatched) - return "" - - def removeMaintenanceDummy(self, node): - pats = [] - pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) - - watch = self.create_watch(pats, 60) - watch.set_watch() - self.CM.RemoveDummyRsc(node, self.rid) - - self.set_timer("removeDummy") - watch.look_for_all() - self.log_timer("removeDummy") - - if watch.unmatched: - self.debug("Failed to find patterns when removing maintenance dummy resource") - return repr(watch.unmatched) - return "" - - def managedRscList(self, node): - rscList = [] - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if re.search("^Resource", line): - tmp = AuditResource(self.CM, line) - if tmp.managed(): - rscList.append(tmp.id) - - return rscList - - def verifyResources(self, node, rscList, managed): - managedList = list(rscList) - managed_str = "managed" - if not managed: - managed_str = "unmanaged" - - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if re.search("^Resource", line): - tmp = AuditResource(self.CM, line) - if managed and not tmp.managed(): - continue - elif not managed and tmp.managed(): - continue - elif managedList.count(tmp.id): - managedList.remove(tmp.id) - - if len(managedList) == 0: - self.debug("Found all %s resources on %s" % (managed_str, node)) - return True - - self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) - return False - - def __call__(self, node): - '''Perform the 'MaintenanceMode' test. ''' - self.incr("calls") - verify_managed = False - verify_unmanaged = False - failPat = "" - - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - # get a list of all the managed resources. We use this list - # after enabling maintenance mode to verify all managed resources - # become un-managed. After maintenance mode is turned off, we use - # this list to verify all the resources become managed again. - managedResources = self.managedRscList(node) - if len(managedResources) == 0: - self.logger.log("No managed resources on %s" % node) - return self.skipped() - - # insert a fake resource we can fail during maintenance mode - # so we can verify recovery does not take place until after maintenance - # mode is disabled. - failPat = failPat + self.insertMaintenanceDummy(node) - - # toggle maintenance mode ON, then fail dummy resource. - failPat = failPat + self.toggleMaintenanceMode(node, "On") - - # verify all the resources are now unmanaged - if self.verifyResources(node, managedResources, False): - verify_unmanaged = True - - # Toggle maintenance mode OFF, verify dummy is recovered. - failPat = failPat + self.toggleMaintenanceMode(node, "Off") - - # verify all the resources are now managed again - if self.verifyResources(node, managedResources, True): - verify_managed = True - - # Remove our maintenance dummy resource. - failPat = failPat + self.removeMaintenanceDummy(node) - - self.CM.cluster_stable() - - if failPat != "": - return self.failure("Unmatched patterns: %s" % (failPat)) - elif verify_unmanaged is False: - return self.failure("Failed to verify resources became unmanaged during maintenance mode") - elif verify_managed is False: - return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode") - - return self.success() - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"Updating failcount for %s" % self.rid, - r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self.rid, - r"Unknown operation: fail", - self.templates["Pat:RscOpOK"] % (self.action, self.rid), - r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), - ] - -AllTestClasses.append(MaintenanceMode) - - -class ResourceRecover(CTSTest): - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "ResourceRecover" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - self.max = 30 - self.rid = None - self.rid_alt = None - #self.is_unsafe = 1 - self.benchmark = 1 - - # these are the values used for the new LRM API call - self.action = "asyncmon" - self.interval = 0 - - def __call__(self, node): - '''Perform the 'ResourceRecover' test. ''' - self.incr("calls") - - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - # List all resources active on the node (skip test if none) - resourcelist = self.CM.active_resources(node) - if len(resourcelist) == 0: - self.logger.log("No active resources on %s" % node) - return self.skipped() - - # Choose one resource at random - rsc = self.choose_resource(node, resourcelist) - if rsc is None: - return self.failure("Could not get details of resource '%s'" % self.rid) - if rsc.id == rsc.clone_id: - self.debug("Failing " + rsc.id) - else: - self.debug("Failing " + rsc.id + " (also known as " + rsc.clone_id + ")") - - # Log patterns to watch for (failure, plus restart if managed) - pats = [] - pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id)) - if rsc.managed(): - pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) - if rsc.unique(): - pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) - else: - # Anonymous clones may get restarted with a different clone number - pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) - - # Fail resource. (Ideally, we'd fail it twice, to ensure the fail count - # is incrementing properly, but it might restart on a different node. - # We'd have to temporarily ban it from all other nodes and ensure the - # migration-threshold hasn't been reached.) - if self.fail_resource(rsc, node, pats) is None: - return None # self.failure() already called - - return self.success() - - def choose_resource(self, node, resourcelist): - """ Choose a random resource to target """ - - self.rid = self.Env.random_gen.choice(resourcelist) - self.rid_alt = self.rid - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if line.startswith("Resource: "): - rsc = AuditResource(self.CM, line) - if rsc.id == self.rid: - # Handle anonymous clones that get renamed - self.rid = rsc.clone_id - return rsc - return None - - def get_failcount(self, node): - """ Check the fail count of targeted resource on given node """ - - (rc, lines) = self.rsh(node, - "crm_failcount --quiet --query --resource %s " - "--operation %s --interval %d " - "--node %s" % (self.rid, self.action, - self.interval, node), verbose=1) - if rc != 0 or len(lines) != 1: - self.logger.log("crm_failcount on %s failed (%d): %s" % (node, rc, - " // ".join(map(str.strip, lines)))) - return -1 - try: - failcount = int(lines[0]) - except (IndexError, ValueError): - self.logger.log("crm_failcount output on %s unparseable: %s" % (node, - ' '.join(lines))) - return -1 - return failcount - - def fail_resource(self, rsc, node, pats): - """ Fail the targeted resource, and verify as expected """ - - orig_failcount = self.get_failcount(node) - - watch = self.create_watch(pats, 60) - watch.set_watch() - - self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) - - self.set_timer("recover") - watch.look_for_all() - self.log_timer("recover") - - self.CM.cluster_stable() - recovered = self.CM.ResourceLocation(self.rid) - - if watch.unmatched: - return self.failure("Patterns not found: %s" % repr(watch.unmatched)) - - elif rsc.unique() and len(recovered) > 1: - return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) - - elif len(recovered) > 0: - self.debug("%s is running on: %s" % (self.rid, repr(recovered))) - - elif rsc.managed(): - return self.failure("%s was not recovered and is inactive" % self.rid) - - new_failcount = self.get_failcount(node) - if new_failcount != (orig_failcount + 1): - return self.failure("%s fail count is %d not %d" % (self.rid, - new_failcount, orig_failcount + 1)) - - return 0 # Anything but None is success - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"Updating failcount for %s" % self.rid, - r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self.rid, self.rid_alt), - r"Unknown operation: fail", - self.templates["Pat:RscOpOK"] % (self.action, self.rid), - r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), - ] - -AllTestClasses.append(ResourceRecover) - - -class ComponentFail(CTSTest): - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "ComponentFail" - self.startall = SimulStartLite(cm) - self.complist = cm.Components() - self.patterns = [] - self.okerrpatterns = [] - self.is_unsafe = 1 - - def __call__(self, node): - '''Perform the 'ComponentFail' test. ''' - self.incr("calls") - self.patterns = [] - self.okerrpatterns = [] - - # start all nodes - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - if not self.CM.cluster_stable(self.Env["StableTime"]): - return self.failure("Setup failed - unstable") - - node_is_dc = self.CM.is_node_dc(node, None) - - # select a component to kill - chosen = self.Env.random_gen.choice(self.complist) - while chosen.dc_only and node_is_dc == 0: - chosen = self.Env.random_gen.choice(self.complist) - - self.debug("...component %s (dc=%d)" % (chosen.name, node_is_dc)) - self.incr(chosen.name) - - if chosen.name != "corosync": - self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) - self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) - - self.patterns.extend(chosen.pats) - if node_is_dc: - self.patterns.extend(chosen.dc_pats) - - # @TODO this should be a flag in the Component - if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]: - # Ignore actions for fence devices if fencer will respawn - # (their registration will be lost, and probes will fail) - self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ] - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if re.search("^Resource", line): - r = AuditResource(self.CM, line) - if r.rclass == "stonith": - self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id) - self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id) - - # supply a copy so self.patterns doesn't end up empty - tmpPats = [] - tmpPats.extend(self.patterns) - self.patterns.extend(chosen.badnews_ignore) - - # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status - stonithPats = [] - stonithPats.append(self.templates["Pat:Fencing_ok"] % node) - stonith = self.create_watch(stonithPats, 0) - stonith.set_watch() - - # set the watch for stable - watch = self.create_watch( - tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) - watch.set_watch() - - # kill the component - chosen.kill(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - - self.debug("Waiting for any fenced node to come back up") - self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600) - - self.debug("Waiting for the cluster to re-stabilize with all nodes") - self.CM.cluster_stable(self.Env["StartTime"]) - - self.debug("Checking if %s was shot" % node) - shot = stonith.look(60) - if shot: - self.debug("Found: " + repr(shot)) - self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node) - - if not self.Env["at-boot"]: - self.CM.ShouldBeStatus[node] = "down" - - # If fencing occurred, chances are many (if not all) the expected logs - # will not be sent - or will be lost when the node reboots - return self.success() - - # check for logs indicating a graceful recovery - matched = watch.look_for_all(allow_multiple_matches=True) - if watch.unmatched: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) - - self.debug("Waiting for the cluster to re-stabilize with all nodes") - is_stable = self.CM.cluster_stable(self.Env["StartTime"]) - - if not matched: - return self.failure("Didn't find all expected %s patterns" % chosen.name) - elif not is_stable: - return self.failure("Cluster did not become stable after killing %s" % chosen.name) - - return self.success() - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - # Note that okerrpatterns refers to the last time we ran this test - # The good news is that this works fine for us... - self.okerrpatterns.extend(self.patterns) - return self.okerrpatterns - -AllTestClasses.append(ComponentFail) - - -class SplitBrainTest(CTSTest): - '''It is used to test split-brain. when the path between the two nodes break - check the two nodes both take over the resource''' - def __init__(self,cm): - CTSTest.__init__(self,cm) - self.name = "SplitBrain" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - self.is_experimental = 1 - - def isolate_partition(self, partition): - other_nodes = [] - other_nodes.extend(self.Env["nodes"]) - - for node in partition: - try: - other_nodes.remove(node) - except ValueError: - self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition)) - - if len(other_nodes) == 0: - return 1 - - self.debug("Creating partition: " + repr(partition)) - self.debug("Everyone else: " + repr(other_nodes)) - - for node in partition: - if not self.CM.isolate_node(node, other_nodes): - self.logger.log("Could not isolate %s" % node) - return 0 - - return 1 - - def heal_partition(self, partition): - other_nodes = [] - other_nodes.extend(self.Env["nodes"]) - - for node in partition: - try: - other_nodes.remove(node) - except ValueError: - self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"])) - - if len(other_nodes) == 0: - return 1 - - self.debug("Healing partition: " + repr(partition)) - self.debug("Everyone else: " + repr(other_nodes)) - - for node in partition: - self.CM.unisolate_node(node, other_nodes) - - def __call__(self, node): - '''Perform split-brain test''' - self.incr("calls") - self.passed = 1 - partitions = {} - - ret = self.startall(None) - if not ret: - return self.failure("Setup failed") - - while 1: - # Retry until we get multiple partitions - partitions = {} - p_max = len(self.Env["nodes"]) - for node in self.Env["nodes"]: - p = self.Env.random_gen.randint(1, p_max) - if not p in partitions: - partitions[p] = [] - partitions[p].append(node) - p_max = len(list(partitions.keys())) - if p_max > 1: - break - # else, try again - - self.debug("Created %d partitions" % p_max) - for key in list(partitions.keys()): - self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) - - # Disabling STONITH to reduce test complexity for now - self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") - - for key in list(partitions.keys()): - self.isolate_partition(partitions[key]) - - count = 30 - while count > 0: - if len(self.CM.find_partitions()) != p_max: - time.sleep(10) - else: - break - else: - self.failure("Expected partitions were not created") - - # Target number of partitions formed - wait for stability - if not self.CM.cluster_stable(): - self.failure("Partitioned cluster not stable") - - # Now audit the cluster state - self.CM.partitions_expected = p_max - if not self.audit(): - self.failure("Audits failed") - self.CM.partitions_expected = 1 - - # And heal them again - for key in list(partitions.keys()): - self.heal_partition(partitions[key]) - - # Wait for a single partition to form - count = 30 - while count > 0: - if len(self.CM.find_partitions()) != 1: - time.sleep(10) - count -= 1 - else: - break - else: - self.failure("Cluster did not reform") - - # Wait for it to have the right number of members - count = 30 - while count > 0: - members = [] - - partitions = self.CM.find_partitions() - if len(partitions) > 0: - members = partitions[0].split() - - if len(members) != len(self.Env["nodes"]): - time.sleep(10) - count -= 1 - else: - break - else: - self.failure("Cluster did not completely reform") - - # Wait up to 20 minutes - the delay is more preferable than - # trying to continue with in a messed up state - if not self.CM.cluster_stable(1200): - self.failure("Reformed cluster not stable") - if self.Env["continue"]: - answer = "Y" - else: - try: - answer = input('Continue? [nY]') - except EOFError as e: - answer = "n" - if answer and answer == "n": - raise ValueError("Reformed cluster not stable") - - # Turn fencing back on - if self.Env["DoFencing"]: - self.rsh(node, "crm_attribute -V -D -n stonith-enabled") - - self.CM.cluster_stable() - - if self.passed: - return self.success() - return self.failure("See previous errors") - - def errorstoignore(self): - '''Return list of errors which are 'normal' and should be ignored''' - return [ - r"Another DC detected:", - r"(ERROR|error).*: .*Application of an update diff failed", - r"pacemaker-controld.*:.*not in our membership list", - r"CRIT:.*node.*returning after partition", - ] - - def is_applicable(self): - if not self.is_applicable_common(): - return False - return len(self.Env["nodes"]) > 2 - -AllTestClasses.append(SplitBrainTest) - - -class Reattach(CTSTest): - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "Reattach" - self.startall = SimulStartLite(cm) - self.restart1 = RestartTest(cm) - self.stopall = SimulStopLite(cm) - self.is_unsafe = 0 # Handled by canrunnow() - - def _is_managed(self, node): - (_, is_managed) = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1) - is_managed = is_managed[0].strip() - return is_managed == "true" - - def _set_unmanaged(self, node): - self.debug("Disable resource management") - self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") - - def _set_managed(self, node): - self.debug("Re-enable resource management") - self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") - - def setup(self, node): - attempt = 0 - if not self.startall(None): - return None - - # Make sure we are really _really_ stable and that all - # resources, including those that depend on transient node - # attributes, are started - while not self.CM.cluster_stable(double_check=True): - if attempt < 5: - attempt += 1 - self.debug("Not stable yet, re-testing") - else: - self.logger.log("Cluster is not stable") - return None - - return 1 - - def teardown(self, node): - - # Make sure 'node' is up - start = StartTest(self.CM) - start(node) - - if not self._is_managed(node): - self.logger.log("Attempting to re-enable resource management on %s" % node) - self._set_managed(node) - self.CM.cluster_stable() - if not self._is_managed(node): - self.logger.log("Could not re-enable resource management") - return 0 - - return 1 - - def canrunnow(self, node): - '''Return TRUE if we can meaningfully run right now''' - if self.find_ocfs2_resources(node): - self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") - return 0 - return 1 - - def __call__(self, node): - self.incr("calls") - - pats = [] - # Conveniently, the scheduler will display this message when disabling - # management, even if fencing is not enabled, so we can rely on it. - managed = self.create_watch(["No fencing will be done"], 60) - managed.set_watch() - - self._set_unmanaged(node) - - if not managed.look_for_all(): - self.logger.log("Patterns not found: " + repr(managed.unmatched)) - return self.failure("Resource management not disabled") - - pats = [] - pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) - pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*")) - pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*")) - pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*")) - pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*")) - - watch = self.create_watch(pats, 60, "ShutdownActivity") - watch.set_watch() - - self.debug("Shutting down the cluster") - ret = self.stopall(None) - if not ret: - self._set_managed(node) - return self.failure("Couldn't shut down the cluster") - - self.debug("Bringing the cluster back up") - ret = self.startall(None) - time.sleep(5) # allow ping to update the CIB - if not ret: - self._set_managed(node) - return self.failure("Couldn't restart the cluster") - - if self.local_badnews("ResourceActivity:", watch): - self._set_managed(node) - return self.failure("Resources stopped or started during cluster restart") - - watch = self.create_watch(pats, 60, "StartupActivity") - watch.set_watch() - - # Re-enable resource management (and verify it happened). - self._set_managed(node) - self.CM.cluster_stable() - if not self._is_managed(node): - return self.failure("Could not re-enable resource management") - - # Ignore actions for STONITH resources - ignore = [] - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if re.search("^Resource", line): - r = AuditResource(self.CM, line) - if r.rclass == "stonith": - - self.debug("Ignoring start actions for %s" % r.id) - ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id)) - - if self.local_badnews("ResourceActivity:", watch, ignore): - return self.failure("Resources stopped or started after resource management was re-enabled") - - return ret - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"resource( was|s were) active at shutdown", - ] - - def is_applicable(self): - return True - -AllTestClasses.append(Reattach) - - -class SpecialTest1(CTSTest): - '''Set up a custom test to cause quorum failure issues for Andrew''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "SpecialTest1" - self.startall = SimulStartLite(cm) - self.restart1 = RestartTest(cm) - self.stopall = SimulStopLite(cm) - - def __call__(self, node): - '''Perform the 'SpecialTest1' test for Andrew. ''' - self.incr("calls") - - # Shut down all the nodes... - ret = self.stopall(None) - if not ret: - return self.failure("Could not stop all nodes") - - # Test config recovery when the other nodes come up - self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*") - - # Start the selected node - ret = self.restart1(node) - if not ret: - return self.failure("Could not start "+node) - - # Start all remaining nodes - ret = self.startall(None) - if not ret: - return self.failure("Could not start the remaining nodes") - - return self.success() - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - # Errors that occur as a result of the CIB being wiped - return [ - r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", - r"error.*: Resource start-up disabled since no STONITH resources have been defined", - r"error.*: Either configure some or disable STONITH with the stonith-enabled option", - r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity", - ] - -AllTestClasses.append(SpecialTest1) - - -class HAETest(CTSTest): - '''Set up a custom test to cause quorum failure issues for Andrew''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "HAETest" - self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) - self.is_loop = 1 - - def setup(self, node): - # Start all remaining nodes - ret = self.startall(None) - if not ret: - return self.failure("Couldn't start all nodes") - return self.success() - - def teardown(self, node): - # Stop everything - ret = self.stopall(None) - if not ret: - return self.failure("Couldn't stop all nodes") - return self.success() - - def wait_on_state(self, node, resource, expected_clones, attempts=240): - while attempts > 0: - active = 0 - (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, verbose=1) - - # Hack until crm_resource does the right thing - if rc == 0 and lines: - active = len(lines) - - if len(lines) == expected_clones: - return 1 - - elif rc == 1: - self.debug("Resource %s is still inactive" % resource) - - elif rc == 234: - self.logger.log("Unknown resource %s" % resource) - return 0 - - elif rc == 246: - self.logger.log("Cluster is inactive") - return 0 - - elif rc != 0: - self.logger.log("Call to crm_resource failed, rc=%d" % rc) - return 0 - - else: - self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) - - attempts -= 1 - time.sleep(1) - - return 0 - - def find_dlm(self, node): - self.r_dlm = None - - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if re.search("^Resource", line): - r = AuditResource(self.CM, line) - if r.rtype == "controld" and r.parent != "NA": - self.debug("Found dlm: %s" % self.r_dlm) - self.r_dlm = r.parent - return 1 - return 0 - - def find_hae_resources(self, node): - self.r_dlm = None - self.r_o2cb = None - self.r_ocfs2 = [] - - if self.find_dlm(node): - self.find_ocfs2_resources(node) - - def is_applicable(self): - if not self.is_applicable_common(): - return False - if self.Env["Schema"] == "hae": - return True - return None - - -class HAERoleTest(HAETest): - def __init__(self, cm): - '''Lars' mount/unmount test for the HA extension. ''' - HAETest.__init__(self,cm) - self.name = "HAERoleTest" - - def change_state(self, node, resource, target): - (rc, _) = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) - return rc - - def __call__(self, node): - self.incr("calls") - lpc = 0 - failed = 0 - delay = 2 - done = time.time() + self.Env["loop-minutes"]*60 - self.find_hae_resources(node) - - clone_max = len(self.Env["nodes"]) - while time.time() <= done and not failed: - lpc = lpc + 1 - - self.change_state(node, self.r_dlm, "Stopped") - if not self.wait_on_state(node, self.r_dlm, 0): - self.failure("%s did not go down correctly" % self.r_dlm) - failed = lpc - - self.change_state(node, self.r_dlm, "Started") - if not self.wait_on_state(node, self.r_dlm, clone_max): - self.failure("%s did not come up correctly" % self.r_dlm) - failed = lpc - - if not self.wait_on_state(node, self.r_o2cb, clone_max): - self.failure("%s did not come up correctly" % self.r_o2cb) - failed = lpc - - for fs in self.r_ocfs2: - if not self.wait_on_state(node, fs, clone_max): - self.failure("%s did not come up correctly" % fs) - failed = lpc - - if failed: - return self.failure("iteration %d failed" % failed) - return self.success() - -AllTestClasses.append(HAERoleTest) - - -class HAEStandbyTest(HAETest): - '''Set up a custom test to cause quorum failure issues for Andrew''' - def __init__(self, cm): - HAETest.__init__(self,cm) - self.name = "HAEStandbyTest" - - def change_state(self, node, resource, target): - (rc, _) = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target)) - return rc - - def __call__(self, node): - self.incr("calls") - - lpc = 0 - failed = 0 - done = time.time() + self.Env["loop-minutes"]*60 - self.find_hae_resources(node) - - clone_max = len(self.Env["nodes"]) - while time.time() <= done and not failed: - lpc = lpc + 1 - - self.change_state(node, self.r_dlm, "true") - if not self.wait_on_state(node, self.r_dlm, clone_max-1): - self.failure("%s did not go down correctly" % self.r_dlm) - failed = lpc - - self.change_state(node, self.r_dlm, "false") - if not self.wait_on_state(node, self.r_dlm, clone_max): - self.failure("%s did not come up correctly" % self.r_dlm) - failed = lpc - - if not self.wait_on_state(node, self.r_o2cb, clone_max): - self.failure("%s did not come up correctly" % self.r_o2cb) - failed = lpc - - for fs in self.r_ocfs2: - if not self.wait_on_state(node, fs, clone_max): - self.failure("%s did not come up correctly" % fs) - failed = lpc - - if failed: - return self.failure("iteration %d failed" % failed) - return self.success() - -AllTestClasses.append(HAEStandbyTest) - - -class NearQuorumPointTest(CTSTest): - ''' - This test brings larger clusters near the quorum point (50%). - In addition, it will test doing starts and stops at the same time. - - Here is how I think it should work: - - loop over the nodes and decide randomly which will be up and which - will be down Use a 50% probability for each of up/down. - - figure out what to do to get into that state from the current state - - in parallel, bring up those going up and bring those going down. - ''' - - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "NearQuorumPoint" - - def __call__(self, dummy): - '''Perform the 'NearQuorumPoint' test. ''' - self.incr("calls") - startset = [] - stopset = [] - - stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint") - #decide what to do with each node - for node in self.Env["nodes"]: - action = self.Env.random_gen.choice(["start","stop"]) - #action = self.Env.random_gen.choice(["start","stop","no change"]) - if action == "start" : - startset.append(node) - elif action == "stop" : - stopset.append(node) - - self.debug("start nodes:" + repr(startset)) - self.debug("stop nodes:" + repr(stopset)) - - #add search patterns - watchpats = [ ] - for node in stopset: - if self.CM.ShouldBeStatus[node] == "up": - watchpats.append(self.templates["Pat:We_stopped"] % node) - - for node in startset: - if self.CM.ShouldBeStatus[node] == "down": - #watchpats.append(self.templates["Pat:NonDC_started"] % node) - watchpats.append(self.templates["Pat:Local_started"] % node) - else: - for stopping in stopset: - if self.CM.ShouldBeStatus[stopping] == "up": - watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) - - if len(watchpats) == 0: - return self.skipped() - - if len(startset) != 0: - watchpats.append(self.templates["Pat:DC_IDLE"]) - - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) - - watch.set_watch() - - #begin actions - for node in stopset: - if self.CM.ShouldBeStatus[node] == "up": - self.CM.StopaCMnoBlock(node) - - for node in startset: - if self.CM.ShouldBeStatus[node] == "down": - self.CM.StartaCMnoBlock(node) - - #get the result - if watch.look_for_all(): - self.CM.cluster_stable() - self.CM.fencing_cleanup("NearQuorumPoint", stonith) - return self.success() - - self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) - - #get the "bad" nodes - upnodes = [] - for node in stopset: - if self.CM.StataCM(node) == 1: - upnodes.append(node) - - downnodes = [] - for node in startset: - if self.CM.StataCM(node) == 0: - downnodes.append(node) - - self.CM.fencing_cleanup("NearQuorumPoint", stonith) - if upnodes == [] and downnodes == []: - self.CM.cluster_stable() - - # Make sure they're completely down with no residule - for node in stopset: - self.rsh(node, self.templates["StopCmd"]) - - return self.success() - - if len(upnodes) > 0: - self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) - - if len(downnodes) > 0: - self.logger.log("Warn: Unstartable nodes: " + repr(downnodes)) - - return self.failure() - - def is_applicable(self): - return True - -AllTestClasses.append(NearQuorumPointTest) - - -class RollingUpgradeTest(CTSTest): - '''Perform a rolling upgrade of the cluster''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "RollingUpgrade" - self.start = StartTest(cm) - self.stop = StopTest(cm) - self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) - - def setup(self, node): - # Start all remaining nodes - ret = self.stopall(None) - if not ret: - return self.failure("Couldn't stop all nodes") - - for node in self.Env["nodes"]: - if not self.downgrade(node, None): - return self.failure("Couldn't downgrade %s" % node) - - ret = self.startall(None) - if not ret: - return self.failure("Couldn't start all nodes") - return self.success() - - def teardown(self, node): - # Stop everything - ret = self.stopall(None) - if not ret: - return self.failure("Couldn't stop all nodes") - - for node in self.Env["nodes"]: - if not self.upgrade(node, None): - return self.failure("Couldn't upgrade %s" % node) - - return self.success() - - def install(self, node, version, start=1, flags="--force"): - - target_dir = "/tmp/rpm-%s" % version - src_dir = "%s/%s" % (self.Env["rpm-dir"], version) - - self.logger.log("Installing %s on %s with %s" % (version, node, flags)) - if not self.stop(node): - return self.failure("stop failure: "+node) - - self.rsh(node, "mkdir -p %s" % target_dir) - self.rsh(node, "rm -f %s/*.rpm" % target_dir) - (_, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, verbose=1) - for line in lines: - line = line[:-1] - rc = self.rsh.copy("%s" % (line), "%s:%s/" % (node, target_dir)) - self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) - - if start and not self.start(node): - return self.failure("start failure: "+node) - - return self.success() - - def upgrade(self, node, start=1): - return self.install(node, self.Env["current-version"], start) - - def downgrade(self, node, start=1): - return self.install(node, self.Env["previous-version"], start, "--force --nodeps") - - def __call__(self, node): - '''Perform the 'Rolling Upgrade' test. ''' - self.incr("calls") - - for node in self.Env["nodes"]: - if self.upgrade(node): - return self.failure("Couldn't upgrade %s" % node) - - self.CM.cluster_stable() - - return self.success() - - def is_applicable(self): - if not self.is_applicable_common(): - return None - - if not "rpm-dir" in list(self.Env.keys()): - return None - if not "current-version" in list(self.Env.keys()): - return None - if not "previous-version" in list(self.Env.keys()): - return None - - return 1 - -# Register RestartTest as a good test to run -AllTestClasses.append(RollingUpgradeTest) - - -class BSC_AddResource(CTSTest): - '''Add a resource to the cluster''' - def __init__(self, cm): - CTSTest.__init__(self, cm) - self.name = "AddResource" - self.resource_offset = 0 - self.cib_cmd = """cibadmin -C -o %s -X '%s' """ - - def __call__(self, node): - self.incr("calls") - self.resource_offset = self.resource_offset + 1 - - r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) - start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok" - - patterns = [] - patterns.append(start_pat % r_id) - - watch = self.create_watch(patterns, self.Env["DeadTime"]) - watch.set_watch() - - ip = self.NextIP() - if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): - return self.failure("Make resource %s failed" % r_id) - - failed = 0 - watch_result = watch.look_for_all() - if watch.unmatched: - for regex in watch.unmatched: - self.logger.log ("Warn: Pattern not found: %s" % (regex)) - failed = 1 - - if failed: - return self.failure("Resource pattern(s) not found") - - if not self.CM.cluster_stable(self.Env["DeadTime"]): - return self.failure("Unstable cluster") - - return self.success() - - def NextIP(self): - ip = self.Env["IPBase"] - if ":" in ip: - fields = ip.rpartition(":") - fields[2] = str(hex(int(fields[2], 16)+1)) - print(str(hex(int(f[2], 16)+1))) - else: - fields = ip.rpartition('.') - fields[2] = str(int(fields[2])+1) - - ip = fields[0] + fields[1] + fields[3]; - self.Env["IPBase"] = ip - return ip.strip() - - def make_ip_resource(self, node, id, rclass, type, ip): - self.logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node)) - rsc_xml=""" - - - - -""" % (id, rclass, type, id, id, ip) - - node_constraint = """ - - - - - """ % (id, id, id, id, node) - - rc = 0 - (rc, _) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), verbose=1) - if rc != 0: - self.logger.log("Constraint creation failed: %d" % rc) - return None - - (rc, _) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), verbose=1) - if rc != 0: - self.logger.log("Resource creation failed: %d" % rc) - return None - - return 1 - - def is_applicable(self): - if self.Env["DoBSC"]: - return True - return None - -AllTestClasses.append(BSC_AddResource) - - -class SimulStopLite(CTSTest): - '''Stop any active nodes ~ simultaneously''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "SimulStopLite" - - def __call__(self, dummy): - '''Perform the 'SimulStopLite' setup work. ''' - self.incr("calls") - - self.debug("Setup: " + self.name) - - # We ignore the "node" parameter... - watchpats = [ ] - - for node in self.Env["nodes"]: - if self.CM.ShouldBeStatus[node] == "up": - self.incr("WasStarted") - watchpats.append(self.templates["Pat:We_stopped"] % node) - - if len(watchpats) == 0: - return self.success() - - # Stop all the nodes - at about the same time... - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) - - watch.set_watch() - self.set_timer() - for node in self.Env["nodes"]: - if self.CM.ShouldBeStatus[node] == "up": - self.CM.StopaCMnoBlock(node) - if watch.look_for_all(): - # Make sure they're completely down with no residule - for node in self.Env["nodes"]: - self.rsh(node, self.templates["StopCmd"]) - - return self.success() - - did_fail = 0 - up_nodes = [] - for node in self.Env["nodes"]: - if self.CM.StataCM(node) == 1: - did_fail = 1 - up_nodes.append(node) - - if did_fail: - return self.failure("Active nodes exist: " + repr(up_nodes)) - - self.logger.log("Warn: All nodes stopped but CTS didn't detect: " - + repr(watch.unmatched)) - - return self.failure("Missing log message: "+repr(watch.unmatched)) - - def is_applicable(self): - '''SimulStopLite is a setup test and never applicable''' - return False - - -class SimulStartLite(CTSTest): - '''Start any stopped nodes ~ simultaneously''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "SimulStartLite" - - def __call__(self, dummy): - '''Perform the 'SimulStartList' setup work. ''' - self.incr("calls") - self.debug("Setup: " + self.name) - - # We ignore the "node" parameter... - node_list = [] - for node in self.Env["nodes"]: - if self.CM.ShouldBeStatus[node] == "down": - self.incr("WasStopped") - node_list.append(node) - - self.set_timer() - while len(node_list) > 0: - # Repeat until all nodes come up - watchpats = [ ] - - uppat = self.templates["Pat:NonDC_started"] - if self.CM.upcount() == 0: - uppat = self.templates["Pat:Local_started"] - - watchpats.append(self.templates["Pat:DC_IDLE"]) - for node in node_list: - watchpats.append(uppat % node) - watchpats.append(self.templates["Pat:InfraUp"] % node) - watchpats.append(self.templates["Pat:PacemakerUp"] % node) - - # Start all the nodes - at about the same time... - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) - watch.set_watch() - - stonith = self.CM.prepare_fencing_watcher(self.name) - - for node in node_list: - self.CM.StartaCMnoBlock(node) - - watch.look_for_all() - - node_list = self.CM.fencing_cleanup(self.name, stonith) - - if node_list == None: - return self.failure("Cluster did not stabilize") - - # Remove node_list messages from watch.unmatched - for node in node_list: - self.logger.debug("Dealing with stonith operations for %s" % repr(node_list)) - if watch.unmatched: - try: - watch.unmatched.remove(uppat % node) - except: - self.debug("Already matched: %s" % (uppat % node)) - try: - watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) - except: - self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) - try: - watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) - except: - self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) - - if watch.unmatched: - for regex in watch.unmatched: - self.logger.log ("Warn: Startup pattern not found: %s" %(regex)) - - if not self.CM.cluster_stable(): - return self.failure("Cluster did not stabilize") - - did_fail = 0 - unstable = [] - for node in self.Env["nodes"]: - if self.CM.StataCM(node) == 0: - did_fail = 1 - unstable.append(node) - - if did_fail: - return self.failure("Unstarted nodes exist: " + repr(unstable)) - - unstable = [] - for node in self.Env["nodes"]: - if not self.CM.node_stable(node): - did_fail = 1 - unstable.append(node) - - if did_fail: - return self.failure("Unstable cluster nodes exist: " + repr(unstable)) - - return self.success() - - def is_applicable(self): - '''SimulStartLite is a setup test and never applicable''' - return False - - -def TestList(cm, audits): - result = [] - for testclass in AllTestClasses: - bound_test = testclass(cm) - if bound_test.is_applicable(): - bound_test.Audits = audits - result.append(bound_test) - return result - - -class RemoteLXC(CTSTest): - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "RemoteLXC" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - self.num_containers = 2 - self.is_container = 1 - self.failed = 0 - self.fail_string = "" - - def start_lxc_simple(self, node): - - # restore any artifacts laying around from a previous test. - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") - - # generate the containers, put them in the config, add some resources to them - pats = [ ] - watch = self.create_watch(pats, 120) - watch.set_watch() - pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1")) - pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2")) - pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms")) - pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms")) - - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) - self.set_timer("remoteSimpleInit") - watch.look_for_all() - self.log_timer("remoteSimpleInit") - if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 - - def cleanup_lxc_simple(self, node): - - pats = [ ] - # if the test failed, attempt to clean up the cib and libvirt environment - # as best as possible - if self.failed == 1: - # restore libvirt and cib - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") - return - - watch = self.create_watch(pats, 120) - watch.set_watch() - - pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1")) - pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2")) - - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") - self.set_timer("remoteSimpleCleanup") - watch.look_for_all() - self.log_timer("remoteSimpleCleanup") - - if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 - - # cleanup libvirt - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") - - def __call__(self, node): - '''Perform the 'RemoteLXC' test. ''' - self.incr("calls") - - ret = self.startall(None) - if not ret: - return self.failure("Setup failed, start all nodes failed.") - - (rc, _) = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") - if rc == 1: - self.log("Environment test for lxc support failed.") - return self.skipped() - - self.start_lxc_simple(node) - self.cleanup_lxc_simple(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - - if self.failed == 1: - return self.failure(self.fail_string) - - return self.success() - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"Updating failcount for ping", - r"schedulerd.*: Recover\s+(ping|lxc-ms|container)\s+\(.*\)", - # The orphaned lxc-ms resource causes an expected transition error - # that is a result of the scheduler not having knowledge that the - # promotable resource used to be a clone. As a result, it looks like that - # resource is running in multiple locations when it shouldn't... But in - # this instance we know why this error is occurring and that it is expected. - r"Calculated [Tt]ransition .*pe-error", - r"Resource lxc-ms .* is active on 2 nodes attempting recovery", - r"Unknown operation: fail", - r"VirtualDomain.*ERROR: Unable to determine emulator", - ] - -AllTestClasses.append(RemoteLXC) - - -class RemoteDriver(CTSTest): - - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = self.__class__.__name__ - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - self.stop = StopTest(cm) - self.remote_rsc = "remote-rsc" - self.cib_cmd = """cibadmin -C -o %s -X '%s' """ - self.reset() - - def reset(self): - self.pcmk_started = 0 - self.failed = False - self.fail_string = "" - self.remote_node_added = 0 - self.remote_rsc_added = 0 - self.remote_use_reconnect_interval = self.Env.random_gen.choice([True,False]) - - def fail(self, msg): - """ Mark test as failed. """ - - self.failed = True - - # Always log the failure. - self.logger.log(msg) - - # Use first failure as test status, as it's likely to be most useful. - if not self.fail_string: - self.fail_string = msg - - def get_othernode(self, node): - for othernode in self.Env["nodes"]: - if othernode == node: - # we don't want to try and use the cib that we just shutdown. - # find a cluster node that is not our soon to be remote-node. - continue - else: - return othernode - - def del_rsc(self, node, rsc): - othernode = self.get_othernode(node) - (rc, _) = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) - if rc != 0: - self.fail("Removal of resource '%s' failed" % rsc) - - def add_rsc(self, node, rsc_xml): - othernode = self.get_othernode(node) - (rc, _) = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) - if rc != 0: - self.fail("resource creation failed") - - def add_primitive_rsc(self, node): - rsc_xml = """ - - - - - -""" % { "node": self.remote_rsc } - self.add_rsc(node, rsc_xml) - if not self.failed: - self.remote_rsc_added = 1 - - def add_connection_rsc(self, node): - rsc_xml = """ - - - -""" % { "node": self.remote_node, "server": node } - - if self.remote_use_reconnect_interval: - # Set reconnect interval on resource - rsc_xml = rsc_xml + """ - -""" % (self.remote_node) - - rsc_xml = rsc_xml + """ - - - - - - -""" % { "node": self.remote_node } - - self.add_rsc(node, rsc_xml) - if not self.failed: - self.remote_node_added = 1 - - def disable_services(self, node): - self.corosync_enabled = self.Env.service_is_enabled(node, "corosync") - if self.corosync_enabled: - self.Env.disable_service(node, "corosync") - - self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker") - if self.pacemaker_enabled: - self.Env.disable_service(node, "pacemaker") - - def restore_services(self, node): - if self.corosync_enabled: - self.Env.enable_service(node, "corosync") - - if self.pacemaker_enabled: - self.Env.enable_service(node, "pacemaker") - - def stop_pcmk_remote(self, node): - # disable pcmk remote - for i in range(10): - (rc, _) = self.rsh(node, "service pacemaker_remote stop") - if rc != 0: - time.sleep(6) - else: - break - - def start_pcmk_remote(self, node): - for i in range(10): - (rc, _) = self.rsh(node, "service pacemaker_remote start") - if rc != 0: - time.sleep(6) - else: - self.pcmk_started = 1 - break - - def freeze_pcmk_remote(self, node): - """ Simulate a Pacemaker Remote daemon failure. """ - - # We freeze the process. - self.rsh(node, "killall -STOP pacemaker-remoted") - - def resume_pcmk_remote(self, node): - # We resume the process. - self.rsh(node, "killall -CONT pacemaker-remoted") - - def start_metal(self, node): - # Cluster nodes are reused as remote nodes in remote tests. If cluster - # services were enabled at boot, in case the remote node got fenced, the - # cluster node would join instead of the expected remote one. Meanwhile - # pacemaker_remote would not be able to start. Depending on the chances, - # the situations might not be able to be orchestrated gracefully any more. - # - # Temporarily disable any enabled cluster serivces. - self.disable_services(node) - - pcmk_started = 0 - - # make sure the resource doesn't already exist for some reason - self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc)) - self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node)) - - if not self.stop(node): - self.fail("Failed to shutdown cluster node %s" % node) - return - - self.start_pcmk_remote(node) - - if self.pcmk_started == 0: - self.fail("Failed to start pacemaker_remote on node %s" % node) - return - - # Convert node to baremetal now that it has shutdown the cluster stack - pats = [ ] - watch = self.create_watch(pats, 120) - watch.set_watch() - pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) - pats.append(self.templates["Pat:DC_IDLE"]) - - self.add_connection_rsc(node) - - self.set_timer("remoteMetalInit") - watch.look_for_all() - self.log_timer("remoteMetalInit") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - - def migrate_connection(self, node): - if self.failed: - return - - pats = [ ] - pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node)) - pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node)) - pats.append(self.templates["Pat:DC_IDLE"]) - watch = self.create_watch(pats, 120) - watch.set_watch() - - (rc, _) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("failed to move remote node connection resource") - return - - self.set_timer("remoteMetalMigrate") - watch.look_for_all() - self.log_timer("remoteMetalMigrate") - - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - return - - def fail_rsc(self, node): - if self.failed: - return - - watchpats = [ ] - watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node)) - watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) - watchpats.append(self.templates["Pat:DC_IDLE"]) - - watch = self.create_watch(watchpats, 120) - watch.set_watch() - - self.debug("causing dummy rsc to fail.") - - self.rsh(node, "rm -f /var/run/resource-agents/Dummy*") - - self.set_timer("remoteRscFail") - watch.look_for_all() - self.log_timer("remoteRscFail") - if watch.unmatched: - self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) - - def fail_connection(self, node): - if self.failed: - return - - watchpats = [ ] - watchpats.append(self.templates["Pat:Fencing_ok"] % self.remote_node) - watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node) - - watch = self.create_watch(watchpats, 120) - watch.set_watch() - - # freeze the pcmk remote daemon. this will result in fencing - self.debug("Force stopped active remote node") - self.freeze_pcmk_remote(node) - - self.debug("Waiting for remote node to be fenced.") - self.set_timer("remoteMetalFence") - watch.look_for_all() - self.log_timer("remoteMetalFence") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - return - - self.debug("Waiting for the remote node to come back up") - self.CM.ns.wait_for_node(node, 120); - - pats = [ ] - watch = self.create_watch(pats, 240) - watch.set_watch() - pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) - if self.remote_rsc_added == 1: - pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) - - # start the remote node again watch it integrate back into cluster. - self.start_pcmk_remote(node) - if self.pcmk_started == 0: - self.fail("Failed to start pacemaker_remote on node %s" % node) - return - - self.debug("Waiting for remote node to rejoin cluster after being fenced.") - self.set_timer("remoteMetalRestart") - watch.look_for_all() - self.log_timer("remoteMetalRestart") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - return - - def add_dummy_rsc(self, node): - if self.failed: - return - - # verify we can put a resource on the remote node - pats = [ ] - watch = self.create_watch(pats, 120) - watch.set_watch() - pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) - pats.append(self.templates["Pat:DC_IDLE"]) - - # Add a resource that must live on remote-node - self.add_primitive_rsc(node) - - # force that rsc to prefer the remote node. - (rc, _) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to place remote resource on remote node.") - return - - self.set_timer("remoteMetalRsc") - watch.look_for_all() - self.log_timer("remoteMetalRsc") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - - def test_attributes(self, node): - if self.failed: - return - - # This verifies permanent attributes can be set on a remote-node. It also - # verifies the remote-node can edit its own cib node section remotely. - (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) - return - - (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to get remote-node attribute") - return - - (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to delete remote-node attribute") - return - - def cleanup_metal(self, node): - self.restore_services(node) - - if self.pcmk_started == 0: - return - - pats = [ ] - - watch = self.create_watch(pats, 120) - watch.set_watch() - - if self.remote_rsc_added == 1: - pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc)) - if self.remote_node_added == 1: - pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node)) - - self.set_timer("remoteMetalCleanup") - - self.resume_pcmk_remote(node) - - if self.remote_rsc_added == 1: - - # Remove dummy resource added for remote node tests - self.debug("Cleaning up dummy rsc put on remote node") - self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc) - self.del_rsc(node, self.remote_rsc) - - if self.remote_node_added == 1: - - # Remove remote node's connection resource - self.debug("Cleaning up remote node connection resource") - self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node)) - self.del_rsc(node, self.remote_node) - - watch.look_for_all() - self.log_timer("remoteMetalCleanup") - - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - - self.stop_pcmk_remote(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - - if self.remote_node_added == 1: - # Remove remote node itself - self.debug("Cleaning up node entry for remote node") - self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node) - - def setup_env(self, node): - - self.remote_node = "remote-%s" % (node) - - # we are assuming if all nodes have a key, that it is - # the right key... If any node doesn't have a remote - # key, we regenerate it everywhere. - if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]): - return - - # create key locally - (handle, keyfile) = tempfile.mkstemp(".cts") - os.close(handle) - subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"], - stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - - # sync key throughout the cluster - for node in self.Env["nodes"]: - self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker") - self.rsh.copy(keyfile, "root@%s:/etc/pacemaker/authkey" % node) - self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey") - self.rsh(node, "chmod 0640 /etc/pacemaker/authkey") - os.unlink(keyfile) - - def is_applicable(self): - if not self.is_applicable_common(): - return False - - for node in self.Env["nodes"]: - (rc, _) = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1") - if rc != 0: - return False - return True - - def start_new_test(self, node): - self.incr("calls") - self.reset() - - ret = self.startall(None) - if not ret: - return self.failure("setup failed: could not start all nodes") - - self.setup_env(node) - self.start_metal(node) - self.add_dummy_rsc(node) - return True - - def __call__(self, node): - return self.failure("This base class is not meant to be called directly.") - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ r"""is running on remote.*which isn't allowed""", - r"""Connection terminated""", - r"""Could not send remote""", - ] - -# RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses - - -class RemoteBasic(RemoteDriver): - - def __call__(self, node): - '''Perform the 'RemoteBaremetal' test. ''' - - if not self.start_new_test(node): - return self.failure(self.fail_string) - - self.test_attributes(node) - self.cleanup_metal(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - if self.failed: - return self.failure(self.fail_string) - - return self.success() - -AllTestClasses.append(RemoteBasic) - -class RemoteStonithd(RemoteDriver): - - def __call__(self, node): - '''Perform the 'RemoteStonithd' test. ''' - - if not self.start_new_test(node): - return self.failure(self.fail_string) - - self.fail_connection(node) - self.cleanup_metal(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - if self.failed: - return self.failure(self.fail_string) - - return self.success() - - def is_applicable(self): - if not RemoteDriver.is_applicable(self): - return False - - if "DoFencing" in list(self.Env.keys()): - return self.Env["DoFencing"] - - return True - - def errorstoignore(self): - ignore_pats = [ - r"Lost connection to Pacemaker Remote node", - r"Software caused connection abort", - r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor", - r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*", - r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)", - r"error: Result of monitor operation for .* on remote-.*: Internal communication failure", - ] - - ignore_pats.extend(RemoteDriver.errorstoignore(self)) - return ignore_pats - -AllTestClasses.append(RemoteStonithd) - - -class RemoteMigrate(RemoteDriver): - - def __call__(self, node): - '''Perform the 'RemoteMigrate' test. ''' - - if not self.start_new_test(node): - return self.failure(self.fail_string) - - self.migrate_connection(node) - self.cleanup_metal(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - if self.failed: - return self.failure(self.fail_string) - - return self.success() - - def is_applicable(self): - if not RemoteDriver.is_applicable(self): - return 0 - # This test requires at least three nodes: one to convert to a - # remote node, one to host the connection originally, and one - # to migrate the connection to. - if len(self.Env["nodes"]) < 3: - return 0 - return 1 - -AllTestClasses.append(RemoteMigrate) - - -class RemoteRscFailure(RemoteDriver): - - def __call__(self, node): - '''Perform the 'RemoteRscFailure' test. ''' - - if not self.start_new_test(node): - return self.failure(self.fail_string) - - # This is an important step. We are migrating the connection - # before failing the resource. This verifies that the migration - # has properly maintained control over the remote-node. - self.migrate_connection(node) - - self.fail_rsc(node) - self.cleanup_metal(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - if self.failed: - return self.failure(self.fail_string) - - return self.success() - - def errorstoignore(self): - ignore_pats = [ - r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)", - r"Dummy.*: No process state file found", - ] - - ignore_pats.extend(RemoteDriver.errorstoignore(self)) - return ignore_pats - - def is_applicable(self): - if not RemoteDriver.is_applicable(self): - return 0 - # This test requires at least three nodes: one to convert to a - # remote node, one to host the connection originally, and one - # to migrate the connection to. - if len(self.Env["nodes"]) < 3: - return 0 - return 1 - -AllTestClasses.append(RemoteRscFailure) - -# vim:ts=4:sw=4:et: diff --git a/cts/lab/ClusterManager.py b/cts/lab/ClusterManager.py deleted file mode 100644 index fda4cfb..0000000 --- a/cts/lab/ClusterManager.py +++ /dev/null @@ -1,940 +0,0 @@ -""" ClusterManager class for Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = """Copyright 2000-2023 the Pacemaker project contributors. -Certain portions by Huang Zhen are copyright 2004 -International Business Machines. The version control history for this file -may have further details.""" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import os -import re -import time - -from collections import UserDict - -from cts.CIB import ConfigFactory -from cts.CTStests import AuditResource - -from pacemaker.buildoptions import BuildOptions -from pacemaker._cts.CTS import NodeStatus, Process -from pacemaker._cts.environment import EnvFactory -from pacemaker._cts.logging import LogFactory -from pacemaker._cts.patterns import PatternSelector -from pacemaker._cts.remote import RemoteFactory -from pacemaker._cts.watcher import LogWatcher - -class ClusterManager(UserDict): - '''The Cluster Manager class. - This is an subclass of the Python dictionary class. - (this is because it contains lots of {name,value} pairs, - not because it's behavior is that terribly similar to a - dictionary in other ways.) - - This is an abstract class which class implements high-level - operations on the cluster and/or its cluster managers. - Actual cluster managers classes are subclassed from this type. - - One of the things we do is track the state we think every node should - be in. - ''' - - def __InitialConditions(self): - #if os.geteuid() != 0: - # raise ValueError("Must Be Root!") - None - - def _finalConditions(self): - for key in list(self.keys()): - if self[key] == None: - raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.") - - def __init__(self): - self.Env = EnvFactory().getInstance() - self.templates = PatternSelector(self.Env["Name"]) - self.__InitialConditions() - self.logger = LogFactory() - self.TestLoggingLevel=0 - self.data = {} - self.name = self.Env["Name"] - - self.rsh = RemoteFactory().getInstance() - self.ShouldBeStatus={} - self.ns = NodeStatus(self.Env) - self.OurNode = os.uname()[1].lower() - self.__instance_errorstoignore = [] - - self.cib_installed = 0 - self.config = None - self.cluster_monitor = 0 - self.use_short_names = 1 - - if self.Env["DoBSC"]: - del self.templates["Pat:They_stopped"] - - self._finalConditions() - - self.check_transitions = 0 - self.check_elections = 0 - self.CIBsync = {} - self.CibFactory = ConfigFactory(self) - self.cib = self.CibFactory.createConfig(self.Env["Schema"]) - - def __getitem__(self, key): - if key == "Name": - return self.name - - print("FIXME: Getting %s from %s" % (key, repr(self))) - if key in self.data: - return self.data[key] - - return self.templates.get_patterns(key) - - def __setitem__(self, key, value): - print("FIXME: Setting %s=%s on %s" % (key, value, repr(self))) - self.data[key] = value - - def key_for_node(self, node): - return node - - def instance_errorstoignore_clear(self): - '''Allows the test scenario to reset instance errors to ignore on each iteration.''' - self.__instance_errorstoignore = [] - - def instance_errorstoignore(self): - '''Return list of errors which are 'normal' for a specific test instance''' - return self.__instance_errorstoignore - - def log(self, args): - self.logger.log(args) - - def debug(self, args): - self.logger.debug(args) - - def upcount(self): - '''How many nodes are up?''' - count = 0 - for node in self.Env["nodes"]: - if self.ShouldBeStatus[node] == "up": - count = count + 1 - return count - - def install_support(self, command="install"): - for node in self.Env["nodes"]: - self.rsh(node, BuildOptions.DAEMON_DIR + "/cts-support " + command) - - def prepare_fencing_watcher(self, name): - # If we don't have quorum now but get it as a result of starting this node, - # then a bunch of nodes might get fenced - upnode = None - if self.HasQuorum(None): - self.debug("Have quorum") - return None - - if not self.templates["Pat:Fencing_start"]: - print("No start pattern") - return None - - if not self.templates["Pat:Fencing_ok"]: - print("No ok pattern") - return None - - stonith = None - stonithPats = [] - for peer in self.Env["nodes"]: - if self.ShouldBeStatus[peer] != "up": - stonithPats.append(self.templates["Pat:Fencing_ok"] % peer) - stonithPats.append(self.templates["Pat:Fencing_start"] % peer) - - stonith = LogWatcher(self.Env["LogFileName"], stonithPats, self.Env["nodes"], self.Env["LogWatcher"], "StartupFencing", 0) - stonith.set_watch() - return stonith - - def fencing_cleanup(self, node, stonith): - peer_list = [] - peer_state = {} - - self.debug("Looking for nodes that were fenced as a result of %s starting" % node) - - # If we just started a node, we may now have quorum (and permission to fence) - if not stonith: - self.debug("Nothing to do") - return peer_list - - q = self.HasQuorum(None) - if not q and len(self.Env["nodes"]) > 2: - # We didn't gain quorum - we shouldn't have shot anyone - self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"]))) - return peer_list - - for n in self.Env["nodes"]: - peer_state[n] = "unknown" - - # Now see if any states need to be updated - self.debug("looking for: " + repr(stonith.regexes)) - shot = stonith.look(0) - while shot: - line = repr(shot) - self.debug("Found: " + line) - del stonith.regexes[stonith.whichmatch] - - # Extract node name - for n in self.Env["nodes"]: - if re.search(self.templates["Pat:Fencing_ok"] % n, shot): - peer = n - peer_state[peer] = "complete" - self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer) - - elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot): - # TODO: Correctly detect multiple fencing operations for the same host - peer = n - peer_state[peer] = "in-progress" - self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer) - - if not peer: - self.logger.log("ERROR: Unknown stonith match: %s" % line) - - elif not peer in peer_list: - self.debug("Found peer: " + peer) - peer_list.append(peer) - - # Get the next one - shot = stonith.look(60) - - for peer in peer_list: - - self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer])) - if self.Env["at-boot"]: - self.ShouldBeStatus[peer] = "up" - else: - self.ShouldBeStatus[peer] = "down" - - if peer_state[peer] == "in-progress": - # Wait for any in-progress operations to complete - shot = stonith.look(60) - while len(stonith.regexes) and shot: - line = repr(shot) - self.debug("Found: " + line) - del stonith.regexes[stonith.whichmatch] - shot = stonith.look(60) - - # Now make sure the node is alive too - self.ns.wait_for_node(peer, self.Env["DeadTime"]) - - # Poll until it comes up - if self.Env["at-boot"]: - if not self.StataCM(peer): - time.sleep(self.Env["StartTime"]) - - if not self.StataCM(peer): - self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer) - return None - - return peer_list - - def StartaCM(self, node, verbose=False): - - '''Start up the cluster manager on a given node''' - if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node)) - else: self.debug("Starting %s on node %s" % (self.templates["Name"], node)) - ret = 1 - - if not node in self.ShouldBeStatus: - self.ShouldBeStatus[node] = "down" - - if self.ShouldBeStatus[node] != "down": - return 1 - - patterns = [] - # Technically we should always be able to notice ourselves starting - patterns.append(self.templates["Pat:Local_started"] % node) - if self.upcount() == 0: - patterns.append(self.templates["Pat:DC_started"] % node) - else: - patterns.append(self.templates["Pat:NonDC_started"] % node) - - watch = LogWatcher( - self.Env["LogFileName"], patterns, self.Env["nodes"], self.Env["LogWatcher"], "StartaCM", self.Env["StartTime"]+10) - - self.install_config(node) - - self.ShouldBeStatus[node] = "any" - if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]): - self.logger.log ("%s was already started" % (node)) - return 1 - - stonith = self.prepare_fencing_watcher(node) - watch.set_watch() - - (rc, _) = self.rsh(node, self.templates["StartCmd"]) - if rc != 0: - self.logger.log ("Warn: Start command failed on node %s" % (node)) - self.fencing_cleanup(node, stonith) - return None - - self.ShouldBeStatus[node] = "up" - watch_result = watch.look_for_all() - - if watch.unmatched: - for regex in watch.unmatched: - self.logger.log ("Warn: Startup pattern not found: %s" % (regex)) - - if watch_result and self.cluster_stable(self.Env["DeadTime"]): - #self.debug("Found match: "+ repr(watch_result)) - self.fencing_cleanup(node, stonith) - return 1 - - elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]): - self.fencing_cleanup(node, stonith) - return 1 - - self.logger.log ("Warn: Start failed for node %s" % (node)) - return None - - def StartaCMnoBlock(self, node, verbose=False): - - '''Start up the cluster manager on a given node with none-block mode''' - - if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node)) - else: self.debug("Starting %s on node %s" % (self["Name"], node)) - - self.install_config(node) - self.rsh(node, self.templates["StartCmd"], synchronous=False) - self.ShouldBeStatus[node] = "up" - return 1 - - def StopaCM(self, node, verbose=False, force=False): - - '''Stop the cluster manager on a given node''' - - if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node)) - else: self.debug("Stopping %s on node %s" % (self["Name"], node)) - - if self.ShouldBeStatus[node] != "up" and force == False: - return 1 - - (rc, _) = self.rsh(node, self.templates["StopCmd"]) - if rc == 0: - # Make sure we can continue even if corosync leaks - # fdata-* is the old name - #self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*") - self.ShouldBeStatus[node] = "down" - self.cluster_stable(self.Env["DeadTime"]) - return 1 - else: - self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node)) - - return None - - def StopaCMnoBlock(self, node): - - '''Stop the cluster manager on a given node with none-block mode''' - - self.debug("Stopping %s on node %s" % (self["Name"], node)) - - self.rsh(node, self.templates["StopCmd"], synchronous=False) - self.ShouldBeStatus[node] = "down" - return 1 - - def RereadCM(self, node): - - '''Force the cluster manager on a given node to reread its config - This may be a no-op on certain cluster managers. - ''' - (rc, _) = self.rsh(node, self.templates["RereadCmd"]) - if rc == 0: - return 1 - else: - self.logger.log ("Could not force %s on node %s to reread its config" - % (self["Name"], node)) - return None - - def startall(self, nodelist=None, verbose=False, quick=False): - - '''Start the cluster manager on every node in the cluster. - We can do it on a subset of the cluster if nodelist is not None. - ''' - map = {} - if not nodelist: - nodelist = self.Env["nodes"] - - for node in nodelist: - if self.ShouldBeStatus[node] == "down": - self.ns.wait_for_all_nodes(nodelist, 300) - - if not quick: - # This is used for "basic sanity checks", so only start one node ... - if not self.StartaCM(node, verbose=verbose): - return 0 - return 1 - - # Approximation of SimulStartList for --boot - watchpats = [ ] - watchpats.append(self.templates["Pat:DC_IDLE"]) - for node in nodelist: - watchpats.append(self.templates["Pat:InfraUp"] % node) - watchpats.append(self.templates["Pat:PacemakerUp"] % node) - watchpats.append(self.templates["Pat:Local_started"] % node) - watchpats.append(self.templates["Pat:They_up"] % (nodelist[0], node)) - - # Start all the nodes - at about the same time... - watch = LogWatcher(self.Env["LogFileName"], watchpats, self.Env["nodes"], self.Env["LogWatcher"], "fast-start", self.Env["DeadTime"]+10) - watch.set_watch() - - if not self.StartaCM(nodelist[0], verbose=verbose): - return 0 - for node in nodelist: - self.StartaCMnoBlock(node, verbose=verbose) - - watch.look_for_all() - if watch.unmatched: - for regex in watch.unmatched: - self.logger.log ("Warn: Startup pattern not found: %s" % (regex)) - - if not self.cluster_stable(): - self.logger.log("Cluster did not stabilize") - return 0 - - return 1 - - def stopall(self, nodelist=None, verbose=False, force=False): - - '''Stop the cluster managers on every node in the cluster. - We can do it on a subset of the cluster if nodelist is not None. - ''' - - ret = 1 - map = {} - if not nodelist: - nodelist = self.Env["nodes"] - for node in self.Env["nodes"]: - if self.ShouldBeStatus[node] == "up" or force == True: - if not self.StopaCM(node, verbose=verbose, force=force): - ret = 0 - return ret - - def rereadall(self, nodelist=None): - - '''Force the cluster managers on every node in the cluster - to reread their config files. We can do it on a subset of the - cluster if nodelist is not None. - ''' - - map = {} - if not nodelist: - nodelist = self.Env["nodes"] - for node in self.Env["nodes"]: - if self.ShouldBeStatus[node] == "up": - self.RereadCM(node) - - def statall(self, nodelist=None): - - '''Return the status of the cluster managers in the cluster. - We can do it on a subset of the cluster if nodelist is not None. - ''' - - result = {} - if not nodelist: - nodelist = self.Env["nodes"] - for node in nodelist: - if self.StataCM(node): - result[node] = "up" - else: - result[node] = "down" - return result - - def isolate_node(self, target, nodes=None): - '''isolate the communication between the nodes''' - if not nodes: - nodes = self.Env["nodes"] - - for node in nodes: - if node != target: - rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node)) - if rc != 0: - self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc)) - return None - else: - self.debug("Communication cut between %s and %s" % (target, node)) - return 1 - - def unisolate_node(self, target, nodes=None): - '''fix the communication between the nodes''' - if not nodes: - nodes = self.Env["nodes"] - - for node in nodes: - if node != target: - restored = 0 - - # Limit the amount of time we have asynchronous connectivity for - # Restore both sides as simultaneously as possible - self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=False) - self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=False) - self.debug("Communication restored between %s and %s" % (target, node)) - - def oprofileStart(self, node=None): - if not node: - for n in self.Env["oprofile"]: - self.oprofileStart(n) - - elif node in self.Env["oprofile"]: - self.debug("Enabling oprofile on %s" % node) - self.rsh(node, "opcontrol --init") - self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all") - self.rsh(node, "opcontrol --start") - self.rsh(node, "opcontrol --reset") - - def oprofileSave(self, test, node=None): - if not node: - for n in self.Env["oprofile"]: - self.oprofileSave(test, n) - - elif node in self.Env["oprofile"]: - self.rsh(node, "opcontrol --dump") - self.rsh(node, "opcontrol --save=cts.%d" % test) - # Read back with: opreport -l session:cts.0 image:/c* - if None: - self.rsh(node, "opcontrol --reset") - else: - self.oprofileStop(node) - self.oprofileStart(node) - - def oprofileStop(self, node=None): - if not node: - for n in self.Env["oprofile"]: - self.oprofileStop(n) - - elif node in self.Env["oprofile"]: - self.debug("Stopping oprofile on %s" % node) - self.rsh(node, "opcontrol --reset") - self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null") - - def errorstoignore(self): - # At some point implement a more elegant solution that - # also produces a report at the end - """ Return a list of known error messages that should be ignored """ - return self.templates.get_patterns("BadNewsIgnore") - - def install_config(self, node): - if not self.ns.wait_for_node(node): - self.log("Node %s is not up." % node) - return None - - if not node in self.CIBsync and self.Env["ClobberCIB"]: - self.CIBsync[node] = 1 - self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*") - - # Only install the CIB on the first node, all the other ones will pick it up from there - if self.cib_installed == 1: - return None - - self.cib_installed = 1 - if self.Env["CIBfilename"] == None: - self.log("Installing Generated CIB on node %s" % (node)) - self.cib.install(node) - - else: - self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node)) - if self.rsh.copy(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)) != 0: - raise ValueError("Can not scp file to %s %d"%(node)) - - self.rsh(node, "chown " + BuildOptions.DAEMON_USER + " " + BuildOptions.CIB_DIR + "/cib.xml") - - def prepare(self): - '''Finish the Initialization process. Prepare to test...''' - - self.partitions_expected = 1 - for node in self.Env["nodes"]: - self.ShouldBeStatus[node] = "" - if self.Env["experimental-tests"]: - self.unisolate_node(node) - self.StataCM(node) - - def test_node_CM(self, node): - '''Report the status of the cluster manager on a given node''' - - watchpats = [ ] - watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)") - watchpats.append(self.templates["Pat:NonDC_started"] % node) - watchpats.append(self.templates["Pat:DC_started"] % node) - idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, [node], self.Env["LogWatcher"], "ClusterIdle") - idle_watch.set_watch() - - (_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1) - - if not out: - out = "" - else: - out = out[0].strip() - - self.debug("Node %s status: '%s'" %(node, out)) - - if out.find('ok') < 0: - if self.ShouldBeStatus[node] == "up": - self.log( - "Node status for %s is %s but we think it should be %s" - % (node, "down", self.ShouldBeStatus[node])) - self.ShouldBeStatus[node] = "down" - return 0 - - if self.ShouldBeStatus[node] == "down": - self.log( - "Node status for %s is %s but we think it should be %s: %s" - % (node, "up", self.ShouldBeStatus[node], out)) - - self.ShouldBeStatus[node] = "up" - - # check the output first - because syslog-ng loses messages - if out.find('S_NOT_DC') != -1: - # Up and stable - return 2 - if out.find('S_IDLE') != -1: - # Up and stable - return 2 - - # fall back to syslog-ng and wait - if not idle_watch.look(): - # just up - self.debug("Warn: Node %s is unstable: %s" % (node, out)) - return 1 - - # Up and stable - return 2 - - # Is the node up or is the node down - def StataCM(self, node): - '''Report the status of the cluster manager on a given node''' - - if self.test_node_CM(node) > 0: - return 1 - return None - - # Being up and being stable is not the same question... - def node_stable(self, node): - '''Report the status of the cluster manager on a given node''' - - if self.test_node_CM(node) == 2: - return 1 - self.log("Warn: Node %s not stable" % (node)) - return None - - def partition_stable(self, nodes, timeout=None): - watchpats = [ ] - watchpats.append("Current ping state: S_IDLE") - watchpats.append(self.templates["Pat:DC_IDLE"]) - self.debug("Waiting for cluster stability...") - - if timeout == None: - timeout = self.Env["DeadTime"] - - if len(nodes) < 3: - self.debug("Cluster is inactive") - return 1 - - idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, nodes.split(), self.Env["LogWatcher"], "ClusterStable", timeout) - idle_watch.set_watch() - - for node in nodes.split(): - # have each node dump its current state - self.rsh(node, self.templates["StatusCmd"] % node, verbose=1) - - ret = idle_watch.look() - while ret: - self.debug(ret) - for node in nodes.split(): - if re.search(node, ret): - return 1 - ret = idle_watch.look() - - self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout)) - return None - - def cluster_stable(self, timeout=None, double_check=False): - partitions = self.find_partitions() - - for partition in partitions: - if not self.partition_stable(partition, timeout): - return None - - if double_check: - # Make sure we are really stable and that all resources, - # including those that depend on transient node attributes, - # are started if they were going to be - time.sleep(5) - for partition in partitions: - if not self.partition_stable(partition, timeout): - return None - - return 1 - - def is_node_dc(self, node, status_line=None): - rc = 0 - - if not status_line: - (_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1) - - if out: - status_line = out[0].strip() - - if not status_line: - rc = 0 - elif status_line.find('S_IDLE') != -1: - rc = 1 - elif status_line.find('S_INTEGRATION') != -1: - rc = 1 - elif status_line.find('S_FINALIZE_JOIN') != -1: - rc = 1 - elif status_line.find('S_POLICY_ENGINE') != -1: - rc = 1 - elif status_line.find('S_TRANSITION_ENGINE') != -1: - rc = 1 - - return rc - - def active_resources(self, node): - (_, output) = self.rsh(node, "crm_resource -c", verbose=1) - resources = [] - for line in output: - if re.search("^Resource", line): - tmp = AuditResource(self, line) - if tmp.type == "primitive" and tmp.host == node: - resources.append(tmp.id) - return resources - - def ResourceLocation(self, rid): - ResourceNodes = [] - for node in self.Env["nodes"]: - if self.ShouldBeStatus[node] == "up": - - cmd = self.templates["RscRunning"] % (rid) - (rc, lines) = self.rsh(node, cmd) - - if rc == 127: - self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd) - for line in lines: - self.log("Output: "+line) - elif rc == 0: - ResourceNodes.append(node) - - return ResourceNodes - - def find_partitions(self): - ccm_partitions = [] - - for node in self.Env["nodes"]: - if self.ShouldBeStatus[node] == "up": - (_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1) - - if not out: - self.log("no partition details for %s" % node) - continue - - partition = out[0].strip() - - if len(partition) > 2: - nodes = partition.split() - nodes.sort() - partition = ' '.join(nodes) - - found = 0 - for a_partition in ccm_partitions: - if partition == a_partition: - found = 1 - if found == 0: - self.debug("Adding partition from %s: %s" % (node, partition)) - ccm_partitions.append(partition) - else: - self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node)) - - else: - self.log("bad partition details for %s" % node) - else: - self.debug("Node %s is down... skipping" % node) - - self.debug("Found partitions: %s" % repr(ccm_partitions) ) - return ccm_partitions - - def HasQuorum(self, node_list): - # If we are auditing a partition, then one side will - # have quorum and the other not. - # So the caller needs to tell us which we are checking - # If no value for node_list is specified... assume all nodes - if not node_list: - node_list = self.Env["nodes"] - - for node in node_list: - if self.ShouldBeStatus[node] == "up": - (_, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1) - quorum = quorum[0].strip() - - if quorum.find("1") != -1: - return 1 - elif quorum.find("0") != -1: - return 0 - else: - self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum) - - return 0 - - def Components(self): - complist = [] - common_ignore = [ - "Pending action:", - "(ERROR|error): crm_log_message_adv:", - "(ERROR|error): MSG: No message to dump", - "pending LRM operations at shutdown", - "Lost connection to the CIB manager", - "Connection to the CIB terminated...", - "Sending message to the CIB manager FAILED", - "Action A_RECOVER .* not supported", - "(ERROR|error): stonithd_op_result_ready: not signed on", - "pingd.*(ERROR|error): send_update: Could not send update", - "send_ipc_message: IPC Channel to .* is not connected", - "unconfirmed_actions: Waiting on .* unconfirmed actions", - "cib_native_msgready: Message pending on command channel", - r": Performing A_EXIT_1 - forcefully exiting ", - r"Resource .* was active at shutdown. You may ignore this error if it is unmanaged.", - ] - - stonith_ignore = [ - r"Updating failcount for child_DoFencing", - r"error.*: Fencer connection failed \(will retry\)", - "pacemaker-execd.*(ERROR|error): stonithd_receive_ops_result failed.", - ] - - stonith_ignore.extend(common_ignore) - - ccm = Process(self, "ccm", pats = [ - "State transition .* S_RECOVERY", - "pacemaker-controld.*Action A_RECOVER .* not supported", - r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", - r"pacemaker-controld.*: Could not recover from internal error", - "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy", - # these status numbers are likely wrong now - r"pacemaker-controld.*exited with status 2", - r"attrd.*exited with status 1", - r"cib.*exited with status 2", - -# Not if it was fenced -# "A new node joined the cluster", - -# "WARN: determine_online_status: Node .* is unclean", -# "Scheduling node .* for fencing", -# "Executing .* fencing operation", -# "tengine_stonith_callback: .*result=0", -# "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE", -# "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN", - "State transition S_STARTING -> S_PENDING", - ], badnews_ignore = common_ignore) - - based = Process(self, "pacemaker-based", pats = [ - "State transition .* S_RECOVERY", - "Lost connection to the CIB manager", - "Connection to the CIB manager terminated", - r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", - "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy", - r"pacemaker-controld.*: Could not recover from internal error", - # these status numbers are likely wrong now - r"pacemaker-controld.*exited with status 2", - r"attrd.*exited with status 1", - ], badnews_ignore = common_ignore) - - execd = Process(self, "pacemaker-execd", pats = [ - "State transition .* S_RECOVERY", - "LRM Connection failed", - "pacemaker-controld.*I_ERROR.*lrm_connection_destroy", - "State transition S_STARTING -> S_PENDING", - r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", - r"pacemaker-controld.*: Could not recover from internal error", - # this status number is likely wrong now - r"pacemaker-controld.*exited with status 2", - ], badnews_ignore = common_ignore) - - controld = Process(self, "pacemaker-controld", - pats = [ -# "WARN: determine_online_status: Node .* is unclean", -# "Scheduling node .* for fencing", -# "Executing .* fencing operation", -# "tengine_stonith_callback: .*result=0", - "State transition .* S_IDLE", - "State transition S_STARTING -> S_PENDING", - ], badnews_ignore = common_ignore) - - schedulerd = Process(self, "pacemaker-schedulerd", pats = [ - "State transition .* S_RECOVERY", - r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", - r"pacemaker-controld.*: Could not recover from internal error", - r"pacemaker-controld.*CRIT.*: Connection to the scheduler failed", - "pacemaker-controld.*I_ERROR.*save_cib_contents", - # this status number is likely wrong now - r"pacemaker-controld.*exited with status 2", - ], badnews_ignore = common_ignore, dc_only=True) - - if self.Env["DoFencing"]: - complist.append(Process(self, "stoniths", dc_pats = [ - r"pacemaker-controld.*CRIT.*: Fencing daemon connection failed", - "Attempting connection to fencing daemon", - ], badnews_ignore = stonith_ignore)) - - ccm.pats.extend([ - # these status numbers are likely wrong now - r"attrd.*exited with status 1", - r"pacemaker-(based|controld).*exited with status 2", - ]) - based.pats.extend([ - # these status numbers are likely wrong now - r"attrd.*exited with status 1", - r"pacemaker-controld.*exited with status 2", - ]) - execd.pats.extend([ - # these status numbers are likely wrong now - r"pacemaker-controld.*exited with status 2", - ]) - - complist.append(ccm) - complist.append(based) - complist.append(execd) - complist.append(controld) - complist.append(schedulerd) - - return complist - - def StandbyStatus(self, node): - (_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1) - if not out: - return "off" - out = out[0].strip() - self.debug("Standby result: "+out) - return out - - # status == "on" : Enter Standby mode - # status == "off": Enter Active mode - def SetStandbyMode(self, node, status): - current_status = self.StandbyStatus(node) - cmd = self.templates["StandbyCmd"] % (node, status) - self.rsh(node, cmd) - return True - - def AddDummyRsc(self, node, rid): - rsc_xml = """ ' - - - - - '""" % (rid, rid) - constraint_xml = """ ' - - ' - """ % (rid, node, node, rid) - - self.rsh(node, self.templates['CibAddXml'] % (rsc_xml)) - self.rsh(node, self.templates['CibAddXml'] % (constraint_xml)) - - def RemoveDummyRsc(self, node, rid): - constraint = "\"//rsc_location[@rsc='%s']\"" % (rid) - rsc = "\"//primitive[@id='%s']\"" % (rid) - - self.rsh(node, self.templates['CibDelXpath'] % constraint) - self.rsh(node, self.templates['CibDelXpath'] % rsc) diff --git a/cts/lab/Makefile.am b/cts/lab/Makefile.am deleted file mode 100644 index 27e39b3..0000000 --- a/cts/lab/Makefile.am +++ /dev/null @@ -1,31 +0,0 @@ -# -# Copyright 2001-2023 the Pacemaker project contributors -# -# The version control history for this file may have further details. -# -# This source code is licensed under the GNU General Public License version 2 -# or later (GPLv2+) WITHOUT ANY WARRANTY. -# - -MAINTAINERCLEANFILES = Makefile.in - -noinst_SCRIPTS = cluster_test \ - OCFIPraTest.py - -# Commands intended to be run only via other commands -halibdir = $(CRM_DAEMON_DIR) -dist_halib_SCRIPTS = cts-log-watcher - -ctslibdir = $(pythondir)/cts -ctslib_PYTHON = __init__.py \ - CIB.py \ - cib_xml.py \ - ClusterManager.py \ - CM_corosync.py \ - CTSaudits.py \ - CTSscenarios.py \ - CTStests.py - -ctsdir = $(datadir)/$(PACKAGE)/tests/cts -cts_SCRIPTS = CTSlab.py \ - cts diff --git a/cts/lab/OCFIPraTest.py.in b/cts/lab/OCFIPraTest.py.in deleted file mode 100644 index 2cce304..0000000 --- a/cts/lab/OCFIPraTest.py.in +++ /dev/null @@ -1,173 +0,0 @@ -#!@PYTHON@ - -'''OCF IPaddr/IPaddr2 Resource Agent Test''' - -__copyright__ = """Original Author: Huang Zhen -Copyright 2004 International Business Machines - -with later changes copyright 2005-2023 the Pacemaker project contributors. -The version control history for this file may have further details. -""" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import os -import sys -import time -import random -import struct -import syslog - -from pacemaker import BuildOptions - - -def usage(): - print("usage: " + sys.argv[0] \ - + " [-2]"\ - + " [--ipbase|-i first-test-ip]"\ - + " [--ipnum|-n test-ip-num]"\ - + " [--help|-h]"\ - + " [--perform|-p op]"\ - + " [number-of-iterations]") - sys.exit(1) - - -def perform_op(ra, ip, op): - os.environ["OCF_RA_VERSION_MAJOR"] = "1" - os.environ["OCF_RA_VERSION_MINOR"] = "0" - os.environ["OCF_ROOT"] = BuildOptions.OCF_ROOT_DIR - os.environ["OCF_RESOURCE_INSTANCE"] = ip - os.environ["OCF_RESOURCE_TYPE"] = ra - os.environ["OCF_RESKEY_ip"] = ip - os.environ["HA_LOGFILE"] = "/dev/null" - os.environ["HA_LOGFACILITY"] = "local7" - path = BuildOptions.OCF_ROOT_DIR + "/resource.d/heartbeat/" + ra - return os.spawnvpe(os.P_WAIT, path, [ra, op], os.environ) - - -def audit(ra, iplist, ipstatus, summary): - passed = 1 - for ip in iplist: - ret = perform_op(ra, ip, "monitor") - if ret != ipstatus[ip]: - passed = 0 - log("audit: status of %s should be %d but it is %d\t [failure]" % - (ip,ipstatus[ip],ret)) - ipstatus[ip] = ret - summary["audit"]["called"] += 1; - if passed : - summary["audit"]["success"] += 1 - else : - summary["audit"]["failure"] += 1 - - -def log(towrite): - t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time())) - logstr = t + " "+str(towrite) - syslog.syslog(logstr) - print(logstr) - -if __name__ == '__main__': - ra = "IPaddr" - ipbase = "127.0.0.10" - ipnum = 1 - itnum = 50 - perform = None - summary = { - "start":{"called":0,"success":0,"failure":0}, - "stop" :{"called":0,"success":0,"failure":0}, - "audit":{"called":0,"success":0,"failure":0} - } - syslog.openlog(sys.argv[0], 0, syslog.LOG_LOCAL7) - - # Process arguments... - skipthis = None - args = sys.argv[1:] - for i in range(0, len(args)) : - if skipthis : - skipthis = None - continue - elif args[i] == "-2" : - ra = "IPaddr2" - elif args[i] == "--ip" or args[i] == "-i" : - skipthis = 1 - ipbase = args[i+1] - elif args[i] == "--ipnum" or args[i] == "-n" : - skipthis = 1 - ipnum = int(args[i+1]) - elif args[i] == "--perform" or args[i] == "-p" : - skipthis = 1 - perform = args[i+1] - elif args[i] == "--help" or args[i] == "-h" : - usage() - else: - itnum = int(args[i]) - - log("Begin OCF IPaddr/IPaddr2 Test") - - # Generate the test ips - iplist = [] - ipstatus = {} - fields = ipbase.split('.') - for i in range(0, ipnum) : - ip = fields.join('.') - iplist.append(ip) - ipstatus[ip] = perform_op(ra,ip,"monitor") - fields[3] = str(int(fields[3])+1) - log("Test ip:" + str(iplist)) - - # If use ask perform an operation - if perform != None: - log("Perform opeartion %s"%perform) - for ip in iplist: - perform_op(ra, ip, perform) - log("Done") - sys.exit() - - log("RA Type:" + ra) - log("Test Count:" + str(itnum)) - - # Prepare Random - f = open("/dev/urandom", "r") - seed = struct.unpack("BBB", f.read(3)) - f.close() - #seed=(123,321,231) - rand = random.Random() - rand.seed(seed[0]) - log("Test Random Seed:" + str(seed)) - - # - # Begin Tests - - log(">>>>>>>>>>>>>>>>>>>>>>>>") - for i in range(0, itnum): - ip = rand.choice(iplist) - if ipstatus[ip] == 0: - op = "stop" - elif ipstatus[ip] == 7: - op = "start" - else : - op = rand.choice(["start","stop"]) - - ret = perform_op(ra, ip, op) - # update status - if op == "start" and ret == 0: - ipstatus[ip] = 0 - elif op == "stop" and ret == 0: - ipstatus[ip] = 7 - else : - ipstatus[ip] = 1 - result = "" - if ret == 0: - result = "success" - else : - result = "failure" - summary[op]["called"] += 1 - summary[op][result] += 1 - log( "%d:%s %s \t[%s]"%(i, op, ip, result)) - audit(ra, iplist, ipstatus, summary) - - log("<<<<<<<<<<<<<<<<<<<<<<<<") - log("start:\t" + str(summary["start"])) - log("stop: \t" + str(summary["stop"])) - log("audit:\t" + str(summary["audit"])) - diff --git a/cts/lab/__init__.py b/cts/lab/__init__.py deleted file mode 100644 index abed502..0000000 --- a/cts/lab/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Python modules for Pacemaker's Cluster Test Suite (CTS) - -This package provides the following modules: - -CIB -cib_xml -CM_common -CM_corosync -CTSaudits -CTS -CTSscenarios -CTStests -patterns -watcher -""" diff --git a/cts/lab/cib_xml.py b/cts/lab/cib_xml.py deleted file mode 100644 index 378dd29..0000000 --- a/cts/lab/cib_xml.py +++ /dev/null @@ -1,319 +0,0 @@ -""" CIB XML generator for Pacemaker's Cluster Test Suite (CTS) -""" - -__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import sys - -from cts.CIB import CibBase - - -class XmlBase(CibBase): - def __init__(self, Factory, tag, _id, **kwargs): - CibBase.__init__(self, Factory, tag, _id, **kwargs) - - def show(self): - text = '''<%s''' % self.tag - if self.name: - text += ''' id="%s"''' % (self.name) - for k in list(self.kwargs.keys()): - text += ''' %s="%s"''' % (k, self.kwargs[k]) - - if not self.children: - text += '''/>''' - return text - - text += '''>''' - - for c in self.children: - text += c.show() - - text += '''''' % self.tag - return text - - def _run(self, operation, xml, section="all", options=""): - if self.name: - label = self.name - else: - label = "<%s>" % self.tag - self.Factory.debug("Writing out %s" % label) - fixed = "HOME=/root CIB_file="+self.Factory.tmpfile - fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml) - (rc, _) = self.Factory.rsh(self.Factory.target, fixed) - if rc != 0: - self.Factory.log("Configure call failed: "+fixed) - sys.exit(1) - - -class InstanceAttributes(XmlBase): - """ Create an section with name-value pairs """ - - def __init__(self, Factory, name, attrs): - XmlBase.__init__(self, Factory, "instance_attributes", name) - - # Create an for each attribute - for (attr, value) in list(attrs.items()): - self.add_child(XmlBase(Factory, "nvpair", "%s-%s" % (name, attr), - name=attr, value=value)) - - -class Node(XmlBase): - """ Create a section with node attributes for one node """ - - def __init__(self, Factory, node_name, node_id, node_attrs): - XmlBase.__init__(self, Factory, "node", node_id, uname=node_name) - self.add_child(InstanceAttributes(Factory, "%s-1" % node_name, node_attrs)) - - -class Nodes(XmlBase): - """ Create a section """ - - def __init__(self, Factory): - XmlBase.__init__(self, Factory, "nodes", None) - - def add_node(self, node_name, node_id, node_attrs): - self.add_child(Node(self.Factory, node_name, node_id, node_attrs)) - - def commit(self): - self._run("modify", self.show(), "configuration", "--allow-create") - - -class FencingTopology(XmlBase): - def __init__(self, Factory): - XmlBase.__init__(self, Factory, "fencing-topology", None) - - def level(self, index, target, devices, target_attr=None, target_value=None): - # Generate XML ID (sanitizing target-by-attribute levels) - - if target: - xml_id = "cts-%s.%d" % (target, index) - self.add_child(XmlBase(self.Factory, "fencing-level", xml_id, target=target, index=index, devices=devices)) - - else: - xml_id = "%s-%s.%d" % (target_attr, target_value, index) - child = XmlBase(self.Factory, "fencing-level", xml_id, index=index, devices=devices) - child["target-attribute"]=target_attr - child["target-value"]=target_value - self.add_child(child) - - def commit(self): - self._run("create", self.show(), "configuration", "--allow-create") - - -class Option(XmlBase): - def __init__(self, Factory, section="cib-bootstrap-options"): - XmlBase.__init__(self, Factory, "cluster_property_set", section) - - def __setitem__(self, key, value): - self.add_child(XmlBase(self.Factory, "nvpair", "cts-%s" % key, name=key, value=value)) - - def commit(self): - self._run("modify", self.show(), "crm_config", "--allow-create") - - -class OpDefaults(XmlBase): - def __init__(self, Factory): - XmlBase.__init__(self, Factory, "op_defaults", None) - self.meta = XmlBase(self.Factory, "meta_attributes", "cts-op_defaults-meta") - self.add_child(self.meta) - - def __setitem__(self, key, value): - self.meta.add_child(XmlBase(self.Factory, "nvpair", "cts-op_defaults-%s" % key, name=key, value=value)) - - def commit(self): - self._run("modify", self.show(), "configuration", "--allow-create") - - -class Alerts(XmlBase): - def __init__(self, Factory): - XmlBase.__init__(self, Factory, "alerts", None) - self.alert_count = 0 - - def add_alert(self, path, recipient): - self.alert_count = self.alert_count + 1 - alert = XmlBase(self.Factory, "alert", "alert-%d" % self.alert_count, - path=path) - recipient1 = XmlBase(self.Factory, "recipient", - "alert-%d-recipient-1" % self.alert_count, - value=recipient) - alert.add_child(recipient1) - self.add_child(alert) - - def commit(self): - self._run("modify", self.show(), "configuration", "--allow-create") - - -class Expression(XmlBase): - def __init__(self, Factory, name, attr, op, value=None): - XmlBase.__init__(self, Factory, "expression", name, attribute=attr, operation=op) - if value: - self["value"] = value - - -class Rule(XmlBase): - def __init__(self, Factory, name, score, op="and", expr=None): - XmlBase.__init__(self, Factory, "rule", "%s" % name) - self["boolean-op"] = op - self["score"] = score - if expr: - self.add_child(expr) - - -class Resource(XmlBase): - def __init__(self, Factory, name, rtype, standard, provider=None): - XmlBase.__init__(self, Factory, "native", name) - - self.rtype = rtype - self.standard = standard - self.provider = provider - - self.op = [] - self.meta = {} - self.param = {} - - self.scores = {} - self.needs = {} - self.coloc = {} - - if self.standard == "ocf" and not provider: - self.provider = "heartbeat" - elif self.standard == "lsb": - self.provider = None - - def __setitem__(self, key, value): - self.add_param(key, value) - - def add_op(self, name, interval, **kwargs): - self.op.append( - XmlBase(self.Factory, "op", "%s-%s" % (name, interval), name=name, interval=interval, **kwargs)) - - def add_param(self, name, value): - self.param[name] = value - - def add_meta(self, name, value): - self.meta[name] = value - - def prefer(self, node, score="INFINITY", rule=None): - if not rule: - rule = Rule(self.Factory, "prefer-%s-r" % node, score, - expr=Expression(self.Factory, "prefer-%s-e" % node, "#uname", "eq", node)) - self.scores[node] = rule - - def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs): - kargs = kwargs.copy() - kargs["kind"] = kind - if then: - kargs["first-action"] = "start" - kargs["then-action"] = then - - if first: - kargs["first-action"] = first - - self.needs[resource] = kargs - - def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs): - kargs = kwargs.copy() - kargs["score"] = score - if role: - kargs["rsc-role"] = role - if withrole: - kargs["with-rsc-role"] = withrole - - self.coloc[resource] = kargs - - def constraints(self): - text = "" - - for k in list(self.scores.keys()): - text += '''''' % (k, self.name) - text += self.scores[k].show() - text += '''''' - - for k in list(self.needs.keys()): - text += '''''' - - for k in list(self.coloc.keys()): - text += '''''' - - text += "" - return text - - def show(self): - text = '''''' - - if len(self.meta) > 0: - text += '''''' % self.name - for p in list(self.meta.keys()): - text += '''''' % (self.name, p, p, self.meta[p]) - text += '''''' - - if len(self.param) > 0: - text += '''''' % self.name - for p in list(self.param.keys()): - text += '''''' % (self.name, p, p, self.param[p]) - text += '''''' - - if len(self.op) > 0: - text += '''''' - for o in self.op: - key = o.name - o.name = "%s-%s" % (self.name, key) - text += o.show() - o.name = key - text += '''''' - - text += '''''' - return text - - def commit(self): - self._run("create", self.show(), "resources") - self._run("modify", self.constraints()) - - -class Group(Resource): - def __init__(self, Factory, name): - Resource.__init__(self, Factory, name, None, None) - self.tag = "group" - - def __setitem__(self, key, value): - self.add_meta(key, value) - - def show(self): - text = '''<%s id="%s">''' % (self.tag, self.name) - - if len(self.meta) > 0: - text += '''''' % self.name - for p in list(self.meta.keys()): - text += '''''' % (self.name, p, p, self.meta[p]) - text += '''''' - - for c in self.children: - text += c.show() - text += '''''' % self.tag - return text - - -class Clone(Group): - def __init__(self, Factory, name, child=None): - Group.__init__(self, Factory, name) - self.tag = "clone" - if child: - self.add_child(child) - - def add_child(self, resource): - if not self.children: - self.children.append(resource) - else: - self.Factory.log("Clones can only have a single child. Ignoring %s" % resource.name) diff --git a/cts/lab/cluster_test.in b/cts/lab/cluster_test.in deleted file mode 100755 index 1741b47..0000000 --- a/cts/lab/cluster_test.in +++ /dev/null @@ -1,175 +0,0 @@ -#!@BASH_PATH@ -# -# Copyright 2008-2020 the Pacemaker project contributors -# -# The version control history for this file may have further details. -# -# This source code is licensed under the GNU General Public License version 2 -# or later (GPLv2+) WITHOUT ANY WARRANTY. -# -if [ -e ~/.cts ]; then - . ~/.cts -fi -anyAsked=0 - -[ $# -lt 1 ] || CTS_numtests=$1 - -die() { echo "$@"; exit 1; } - -if [ -z "$CTS_asked_once" ]; then - anyAsked=1 - echo "This script should only be executed on the test exerciser." - echo "The test exerciser will remotely execute the actions required by the" - echo "tests and should not be part of the cluster itself." - - read -p "Is this host intended to be the test exerciser? (yN) " doUnderstand - [ "$doUnderstand" = "y" ] \ - || die "This script must be executed on the test exerciser" -fi - -if [ -z "$CTS_node_list" ]; then - anyAsked=1 - read -p "Please list your cluster nodes (eg. node1 node2 node3): " CTS_node_list -else - echo "Beginning test of cluster: $CTS_node_list" -fi - -if [ -z "$CTS_stack" ]; then - anyAsked=1 - read -p "Which cluster stack are you using? ([corosync]): " CTS_stack - [ -n "$CTS_stack" ] || CTS_stack=corosync -else - echo "Using the $CTS_stack cluster stack" -fi - -[ "${CTS_node_list}" = "${CTS_node_list/$HOSTNAME/}" ] \ - || die "This script must be executed on the test exerciser, and the test exerciser cannot be part of the cluster" - -printf "+ Bootstrapping ssh... " -if [ -z "$SSH_AUTH_SOCK" ]; then - printf "\n + Initializing SSH " - eval "$(ssh-agent)" - echo " + Adding identities..." - ssh-add - rc=$? - if [ $rc -ne 0 ]; then - echo " -- No identities added" - printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n" - - read -p " - Do you want this program to help you create one? (yN) " auto_fix - if [ "$auto_fix" = "y" ]; then - ssh-keygen -t dsa - ssh-add - else - die "Please run 'ssh-keygen -t dsa' to create a new key" - fi - fi -else - echo "OK" -fi - -test_ok=1 -printf "+ Testing ssh configuration... " -for n in $CTS_node_list; do - ssh -l root -o PasswordAuthentication=no -o ConnectTimeout=5 "$n" /bin/true - rc=$? - if [ $rc -ne 0 ]; then - echo " - connection to $n failed" - test_ok=0 - fi -done - -if [ $test_ok -eq 0 ]; then - printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n" - - read -p " - Do you want this program to help you with such a setup? (yN) " auto_fix - if [ "$auto_fix" = "y" ]; then - # XXX are we picking the most suitable identity? - privKey=$(ssh-add -L | head -n1 | cut -d" " -f3) - sshCopyIdOpts="-o User=root" - [ -z "$privKey" ] || sshCopyIdOpts+=" -i \"${privKey}.pub\"" - for n in $CTS_node_list; do - eval "ssh-copy-id $sshCopyIdOpts \"${n}\"" \ - || die "Attempt to 'ssh-copy-id $sshCopyIdOpts \"$n\"' failed" - done - else - die "Please install one of your SSH public keys to root's account on all cluster nodes" - fi -fi -echo "OK" - -if [ -z "$CTS_logfile" ]; then - anyAsked=1 - read -p " + Where does/should syslog store logs from remote hosts? (/var/log/messages) " CTS_logfile - [ -n "$CTS_logfile" ] || CTS_logfile=/var/log/messages -fi - -[ -e "$CTS_logfile" ] || die "$CTS_logfile doesn't exist" - -if [ -z "$CTS_logfacility" ]; then - anyAsked=1 - read -p " + Which log facility does the cluster use? (daemon) " CTS_logfacility - [ -n "$CTS_logfacility" ] || CTS_logfacility=daemon -fi - -if [ -z "$CTS_boot" ]; then - read -p "+ Is the cluster software started automatically when a node boots? [yN] " CTS_boot - if [ -z "$CTS_boot" ]; then - CTS_boot=0 - else - case $CTS_boot in - 1|y|Y) CTS_boot=1;; - *) CTS_boot=0;; - esac - fi -fi - -if [ -z "$CTS_numtests" ]; then - read -p "+ How many test iterations should be performed? (500) " CTS_numtests - [ -n "$CTS_numtests" ] || CTS_numtests=500 -fi - -if [ -z "$CTS_asked_once" ]; then - anyAsked=1 - read -p "+ What type of STONITH agent do you use? (none) " CTS_stonith - [ -z "$CTS_stonith" ] \ - || read -p "+ List any STONITH agent parameters (eq. device_host=switch.power.com): " CTS_stonith_args - [ -n "$CTS_adv" ] \ - || read -p "+ (Advanced) Any extra CTS parameters? (none) " CTS_adv -fi - -[ $anyAsked -eq 0 ] \ - || read -p "+ Save values to ~/.cts for next time? (yN) " doSave - -if [ "$doSave" = "y" ]; then - cat > ~/.cts <<-EOF - # CTS Test data - CTS_stack="$CTS_stack" - CTS_node_list="$CTS_node_list" - CTS_logfile="$CTS_logfile" - CTS_logport="$CTS_logport" - CTS_logfacility="$CTS_logfacility" - CTS_asked_once=1 - CTS_adv="$CTS_adv" - CTS_stonith="$CTS_stonith" - CTS_stonith_args="$CTS_stonith_args" - CTS_boot="$CTS_boot" -EOF -fi - -cts_extra="" -if [ -n "$CTS_stonith" ]; then - cts_extra="$cts_extra --stonith-type $CTS_stonith" - [ -z "$CTS_stonith_args" ] \ - || cts_extra="$cts_extra --stonith-params \"$CTS_stonith_args\"" -else - cts_extra="$cts_extra --stonith 0" - echo " - Testing a cluster without STONITH is like a blunt pencil... pointless" -fi - -printf "\nAll set to go for %d iterations!\n" "$CTS_numtests" -[ $anyAsked -ne 0 ] \ - || echo "+ To use a different configuration, remove ~/.cts and re-run cts (or edit it manually)." - -echo Now paste the following command into this shell: -echo "@PYTHON@ `dirname "$0"`/CTSlab.py -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests --stack \"$CTS_stack\" $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\"" diff --git a/cts/lab/cts-log-watcher.in b/cts/lab/cts-log-watcher.in deleted file mode 100644 index cee9c94..0000000 --- a/cts/lab/cts-log-watcher.in +++ /dev/null @@ -1,84 +0,0 @@ -#!@PYTHON@ -""" Remote log reader for Pacemaker's Cluster Test Suite (CTS) - -Reads a specified number of lines from the supplied offset -Returns the current offset -Contains logic for handling truncation -""" - -__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" - -import sys -import os -import fcntl - -if __name__ == '__main__': - - limit = 0 - offset = 0 - prefix = '' - filename = '/var/log/messages' - - skipthis=None - args=sys.argv[1:] - for i in range(0, len(args)): - if skipthis: - skipthis=None - continue - - elif args[i] == '-l' or args[i] == '--limit': - skipthis=1 - limit = int(args[i+1]) - - elif args[i] == '-f' or args[i] == '--filename': - skipthis=1 - filename = args[i+1] - - elif args[i] == '-o' or args[i] == '--offset': - skipthis=1 - offset = args[i+1] - - elif args[i] == '-p' or args[i] == '--prefix': - skipthis=1 - prefix = args[i+1] - - elif args[i] == '-t' or args[i] == '--tag': - skipthis=1 - - if not os.access(filename, os.R_OK): - print(prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)) - sys.exit(1) - - logfile=open(filename, 'r') - logfile.seek(0, os.SEEK_END) - newsize=logfile.tell() - - if offset != 'EOF': - offset = int(offset) - if newsize >= offset: - logfile.seek(offset) - else: - print(prefix + ('File truncated from %d to %d' % (offset, newsize))) - if (newsize*1.05) < offset: - logfile.seek(0) - # else: we probably just lost a few logs after a fencing op - # continue from the new end - # TODO: accept a timestamp and discard all messages older than it - - # Don't block when we reach EOF - fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) - - count = 0 - while True: - if logfile.tell() >= newsize: break - elif limit and count >= limit: break - - line = logfile.readline() - if not line: break - - print(line.strip()) - count += 1 - - print(prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)) - logfile.close() diff --git a/cts/lab/cts.in b/cts/lab/cts.in deleted file mode 100755 index 5b3aaab..0000000 --- a/cts/lab/cts.in +++ /dev/null @@ -1,262 +0,0 @@ -#!@BASH_PATH@ -# -# Copyright 2012-2023 the Pacemaker project contributors -# -# The version control history for this file may have further details. -# -# This source code is licensed under the GNU General Public License version 2 -# or later (GPLv2+) WITHOUT ANY WARRANTY. -# - -# e.g. /etc/sysconfig or /etc/default -CONFIG_DIR=@CONFIGDIR@ - -cts_root=`dirname $0` - -logfile=0 -summary=0 -verbose=0 -watch=0 -saved=0 -tests="" - -install=0 -clean=0 -kill=0 -run=0 -boot=0 -setup=0 -target=rhel-7 -cmd="" -trace="" - -custom_log="" -patterns="-e CTS:" - - -helpmsg=$(cat </dev/null -if [ $? != 0 ]; then - echo $0 needs the cluster-helper script to be in your path - exit 1 -fi - -which cluster-clean &>/dev/null -if [ $? != 0 ]; then - echo $0 needs the cluster-clean script to be in your path - exit 1 -fi - -if [ "x$cluster_name" = x ] || [ "x$cluster_name" = xpick ]; then - clusters=`ls -1 ~/.dsh/group/[a-z]+[0-9] | sed s/.*group.// | tr '\n' ' ' ` - - echo "custom) interactively define a cluster" - for i in $clusters; do - echo "$i) `cluster-helper --list short -g $i`" - done - - read -p "Choose a cluster [custom]: " cluster_name - echo -fi - -if [ -z $cluster_name ]; then - cluster_name=custom -fi - - -case $cluster_name in - custom) - read -p "Cluster name: " cluster_name - read -p "Cluster hosts: " cluster_hosts - read -p "Cluster log file: " cluster_log - cluster-helper add -g "$cluster_name" -w "$cluster_hosts" - ;; - *) - cluster_hosts=`cluster-helper --list short -g $cluster_name` - cluster_log=~/cluster-$cluster_name.log; - ;; -esac - -if [ x$cmd != x ]; then - config="${CONFIG_DIR}/pacemaker" - case $cmd in - trace-ls|tls) - cluster-helper -g $cluster_name -- grep PCMK_trace_functions $config - ;; - trace-add|tadd) - echo "Adding $trace to PCMK_trace_functions" - cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions=/PCMK_trace_functions=$trace,/" $config - ;; - trace-rm|trm) - echo "Removing $trace from PCMK_trace_functions" - cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions=\\\\\\(.*\\\\\\)$trace,\\\\\\(.*\\\\\\)/PCMK_trace_functions=\\\\\\1\\\\\\2/" $config - ;; - trace-set|tset) - echo "Setting PCMK_trace_functions to '$trace'" - cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions.*/PCMK_trace_functions=$trace/" $config - ;; - esac - exit 0 -fi - -if [ $run = 1 ]; then - install=1 - clean=1 -fi - -if [ $clean = 1 ]; then - rm -f $cluster_log; cluster-clean -g $cluster_name --kill -elif [ $kill = 1 ]; then - cluster-clean -g $cluster_name --kill-only - exit 0 -fi - -if [ $install = 1 ]; then - cluster-helper -g $cluster_name -- yum install -y pacemaker pacemaker-debuginfo pacemaker-cts libqb libqb-debuginfo -fi - -if [ $setup = 1 ]; then - cluster-init -g $cluster_name $target -u --test - exit 0 - -elif [ $boot = 1 ]; then - $cts_root/CTSlab.py -r -c -g $cluster_name --boot - rc=$? - if [ $rc = 0 ]; then - echo "The cluster is ready..." - fi - exit $rc - -elif [ $run = 1 ]; then - $cts_root/CTSlab.py -r -c -g $cluster_name 500 "$@" - exit $? - -elif [ $clean = 1 ]; then - exit 0 -fi - -screen -ls | grep cts-$cluster_name &>/dev/null -active=$? - -if [ ! -z $custom_log ]; then - cluster_log=$custom_log -fi - -if [ "x$tests" != x ] && [ "x$tests" != "x " ]; then - for t in $tests; do - echo "crm_report --cts-log $cluster_log -d -T $t" - crm_report --cts-log $cluster_log -d -T $t - done - -elif [ $logfile = 1 ]; then - echo $cluster_log - -elif [ $summary = 1 ]; then - files=$cluster_log - if [ $saved = 1 ]; then - files=`ls -1tr ~/CTS-*/cluster-log.txt` - fi - for f in $files; do - echo $f - case $verbose in - 0) cat -n $f | grep $patterns | grep -v "CTS: debug:" - ;; - 1) cat -n $f | grep $patterns | grep -v "CTS:.* cmd:" - ;; - *) cat -n $f | grep $patterns - ;; - esac - echo "" - done - -elif [ $watch = 1 ]; then - case $verbose in - 0) tail -F $cluster_log | grep $patterns | grep -v "CTS: debug:" - ;; - 1) tail -F $cluster_log | grep $patterns | grep -v "CTS:.* cmd:" - ;; - *) tail -F $cluster_log | grep $patterns - ;; - esac - -elif [ $active = 0 ]; then - screen -x cts-$cluster_name - -else - touch $cluster_log - -# . ~/.bashrc - export cluster_name cluster_hosts cluster_log - screen -S cts-$cluster_name bash -fi diff --git a/cts/lxc_autogen.sh.in b/cts/lxc_autogen.sh.in deleted file mode 100644 index 195d3f9..0000000 --- a/cts/lxc_autogen.sh.in +++ /dev/null @@ -1,545 +0,0 @@ -#!@BASH_PATH@ -# -# Copyright 2013-2022 the Pacemaker project contributors -# -# The version control history for this file may have further details. -# -# This source code is licensed under the GNU General Public License version 2 -# or later (GPLv2+) WITHOUT ANY WARRANTY. -# - -containers="2" -download=0 -share_configs=0 -# different than default libvirt network in case this is run nested in a KVM instance -addr="192.168.123.1" -restore=0 -restore_pcmk=0 -restore_all=0 -generate=0 -key_gen=0 -cib=0 -anywhere=0 -add_clone=0 -verify=0 -working_dir="@CRM_PACEMAKER_DIR@/cts/lxc" -run_dirs="/run /var/run /usr/var/run" - -# must be on one line b/c used inside quotes -SSH_RSYNC_OPTS="-o UserKnownHostsFile=/dev/null -o BatchMode=yes -o StrictHostKeyChecking=no" - -function helptext() { - echo "lxc_autogen.sh - generate libvirt LXC containers for testing purposes" - echo "" - echo "Usage: lxc-autogen [options]" - echo "" - echo "Options:" - echo "-g, --generate Generate libvirt LXC environment in directory this script is run from" - echo "-k, --key-gen Generate Pacemaker Remote key only" - echo "-r, --restore-libvirt Restore the default network and libvirt config to before this script ran" - echo "-p, --restore-cib Remove CIB entries this script generated" - echo "-R, --restore-all Restore both libvirt and CIB, and clean working directory" - echo " (libvirt xml files are not removed, so resource can be stopped properly)" - echo "" - echo "-A, --allow-anywhere Allow the containers to live anywhere in the cluster" - echo "-a, --add-cib Add CIB entries to create a guest node for each LXC instance" - echo "-C, --add-clone Add promotable clone resource shared between LXC guest nodes" - echo "-d, --download-agent Download and install latest VirtualDomain agent" - echo "-s, --share-configs Synchronize on all known cluster nodes" - echo "-c, --containers Specify number of containers to generate (default $containers; used with -g)" - echo "-n, --network Network to override libvirt default (example: -n 192.168.123.1; used with -g)" - echo "-v, --verify Verify environment is capable of running LXC" - echo "" - exit "$1" -} - -while true ; do - case "$1" in - --help|-h|-\?) helptext 0;; - -c|--containers) containers="$2"; shift; shift;; - -d|--download-agent) download=1; shift;; - -s|--share-configs) share_configs=1; shift;; - -n|--network) addr="$2"; shift; shift;; - -r|--restore-libvirt) restore=1; shift;; - -p|--restore-cib) restore_pcmk=1; shift;; - -R|--restore-all) - restore_all=1 - restore=1 - restore_pcmk=1 - shift;; - -g|--generate) generate=1; key_gen=1; shift;; - -k|--key-gen) key_gen=1; shift;; - -a|--add-cib) cib=1; shift;; - -A|--allow-anywhere) anywhere=1; shift;; - -C|--add-clone) add_clone=1; shift;; - -m|--add-master) - echo "$1 is deprecated (use -C/--add-clone instead)" - echo - add_clone=1 - shift - ;; - -v|--verify) verify=1; shift;; - "") break;; - *) helptext 1;; - esac -done - -if [ $verify -eq 1 ]; then - # verify virsh tool is available and that - # we can connect to lxc driver. - virsh -c lxc:/// list --all > /dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "libvirt LXC driver must be installed (could not connect 'virsh -c lxc:///')" - # yum install -y libvirt-daemon-driver-lxc libvirt-daemon-lxc libvirt-login-shell - exit 1 - fi - - SELINUX=$(getenforce) - if [ "$SELINUX" != "Enforcing" ] && [ "$SELINUX" != "Permissive" ]; then - echo "SELINUX must be set to permissive or enforcing mode" - exit 1 - fi - - ps ax | grep "[l]ibvirtd" - if [ $? -ne 0 ]; then - echo "libvirtd must be running" - exit 1 - fi - - which rsync > /dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "rsync must be installed" - fi - - which pacemaker-remoted > /dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "pacemaker-remoted must be installed" - fi -fi - -#strip last digits off addr -addr="$(echo "$addr" | awk -F. '{print $1"."$2"."$3}')" - -node_exec() { - ssh -o StrictHostKeyChecking=no \ - -o ConnectTimeout=30 \ - -o BatchMode=yes \ - -l root -T "$@" -} - -this_node() -{ - crm_node -n -} - -other_nodes() -{ - crm_node -l | awk "\$2 != \"$(this_node)\" {print \$2}" -} - -make_directory() -{ - # argument must be full path - DIR="$1" - - mkdir -p "$DIR" - if [ $share_configs -eq 1 ]; then - for node in $(other_nodes); do - node_exec "$node" mkdir -p "$DIR" - done - fi -} - -sync_file() -{ - TARGET="$1" - - if [ $share_configs -eq 1 ]; then - for node in $(other_nodes); do - rsync -ave "ssh $SSH_RSYNC_OPTS" "$TARGET" "${node}:${TARGET}" - done - fi -} - -download_agent() -{ - wget https://raw.github.com/ClusterLabs/resource-agents/main/heartbeat/VirtualDomain - chmod 755 VirtualDomain - mv -f VirtualDomain /usr/lib/ocf/resource.d/heartbeat/VirtualDomain - sync_file /usr/lib/ocf/resource.d/heartbeat/VirtualDomain -} - -set_network() -{ - rm -f cur_network.xml - cat << END >> cur_network.xml - - default - 41ebdb84-7134-1111-a136-91f0f1119225 - - - - - - - - - -END - sync_file "${working_dir}"/cur_network.xml -} - -distribute_configs() -{ - for node in $(other_nodes); do - rsync -ave "ssh $SSH_RSYNC_OPTS" "${working_dir}"/lxc*.xml "${node}:${working_dir}" - rsync -ave "ssh $SSH_RSYNC_OPTS" "${working_dir}"/lxc*-filesystem "${node}:${working_dir}" - done -} - -start_network() -{ - NODE="$1" - - node_exec "$NODE" <<-EOF - cd "$working_dir" - virsh net-info default >/dev/null 2>&1 - if [ \$? -eq 0 ]; then - if [ ! -f restore_default.xml ]; then - virsh net-dumpxml default > restore_default.xml - fi - virsh net-destroy default - virsh net-undefine default - fi - virsh net-define cur_network.xml - virsh net-start default - virsh net-autostart default -EOF -} - -start_network_all() -{ - start_network "$(this_node)" - if [ $share_configs -eq 1 ]; then - for node in $(other_nodes); do - start_network "$node" - done - fi -} - -add_hosts_entry() -{ - IP="$1" - HNAME="$2" - - echo "$IP $HNAME" >>/etc/hosts - if [ $share_configs -eq 1 ]; then - for node in $(other_nodes); do - node_exec "$node" "echo $IP $HNAME >>/etc/hosts" - done - fi -} - -generate_key() -{ - if [ ! -e /etc/pacemaker/authkey ]; then - make_directory /etc/pacemaker - dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1 - sync_file /etc/pacemaker/authkey - fi -} - -generate() -{ - set_network - - # Generate libvirt domains in xml - for (( c=1; c <= containers; c++ )) - do - # Clean any previous definition - rm -rf "lxc$c.xml" "lxc$c-filesystem" - - # Create a basic filesystem with run directories - for dir in $run_dirs; do - mkdir -p "lxc$c-filesystem/$dir" - done - - # Create libvirt definition - suffix=$((10 + c)) - prefix="$(echo "$addr" | awk -F. '{print $1"."$2}')" - subnet="$(echo "$addr" | awk -F. '{print $3}')" - while [ $suffix -gt 255 ]; do - subnet=$((subnet + 1)) - suffix=$((subnet - 255)) - done - cip="$prefix.$subnet.$suffix" - - cat << END >> lxc$c.xml - - lxc$c - 200704 - - exe - $working_dir/lxc$c-filesystem/launch-helper - - - - - - - -END - for dir in $run_dirs; do - cat << END >> lxc$c.xml - - - - -END - done - cat << END >> lxc$c.xml - - - - - - -END - - # Create CIB definition - rm -f "container$c.cib" - cat << END >> "container$c.cib" - - - - - - - - - - - - - - - - - -END - - # Create container init - rm -f "lxc$c-filesystem/launch-helper" - cat << END >> "lxc$c-filesystem/launch-helper" -#!@BASH_PATH@ -ip -f inet addr add "$cip/24" dev eth0 -ip link set eth0 up -ip route add default via "$addr.1" -hostname "lxc$c" -df > "$working_dir/lxc$c-filesystem/disk_usage.txt" -export PCMK_debugfile="@CRM_LOG_DIR@/pacemaker_remote_lxc$c.log" -/usr/sbin/pacemaker-remoted -END - chmod 711 "lxc$c-filesystem/launch-helper" - - add_hosts_entry "$cip" "lxc$c" - done - - # Create CIB fragment for a promotable clone resource - cat << END > lxc-clone.cib - - - - - - - - - - - - - - -END -} - -container_names() { - find . -maxdepth 1 -name "lxc*.xml" -exec basename -s .xml "{}" ";" -} - -apply_cib_clone() -{ - cibadmin -Q > cur.cib - export CIB_file=cur.cib - - cibadmin -o resources -Mc -x lxc-clone.cib - for tmp in $(container_names); do - echo "" > tmp_constraint - cibadmin -o constraints -Mc -x tmp_constraint - done - # Make sure the version changes even if the content doesn't - cibadmin -B - unset CIB_file - - cibadmin --replace -o configuration --xml-file cur.cib - rm -f cur.cib -} - -apply_cib_entries() -{ - cibadmin -Q > cur.cib - export CIB_file=cur.cib - for tmp in container*.cib; do - cibadmin -o resources -Mc -x "$tmp" - - remote_node="$(grep remote-node "${tmp}" | sed -n -e 's/^.*value=\"\(.*\)\".*/\1/p')" - if [ $anywhere -eq 0 ]; then - crm_resource -M -r "${tmp//\.cib/}" -H "$(this_node)" - fi - echo "" > tmp_constraint - # Ignore any failure; this constraint is just to help with CTS when the - # connectivity resources (which fail the guest nodes) are in use. - cibadmin -o constraints -Mc -x tmp_constraint > /dev/null 2>&1 - - for rsc in $(crm_resource -l | grep rsc_ ); do - echo "" > tmp_constraint - cibadmin -o constraints -Mc -x tmp_constraint > /dev/null 2>&1 - done - - rm -f tmp_constraint - done - - # Make sure the version changes even if the content doesn't - cibadmin -B - - unset CIB_file - - cibadmin --replace -o configuration --xml-file cur.cib - rm -f cur.cib -} - -restore_cib() -{ - cibadmin -Q > cur.cib - export CIB_file=cur.cib - - for tmp in $(container_names); do - echo "" > tmp_constraint - cibadmin -o constraints -D -x tmp_constraint - echo "" > tmp_constraint - cibadmin -o constraints -D -x tmp_constraint - - for rsc in $(crm_resource -l | grep rsc_ ); do - echo "" > tmp_constraint - cibadmin -o constraints -D -x tmp_constraint - done - rm -f tmp_constraint - done - cibadmin -o resources -D -x lxc-clone.cib - - for tmp in container*.cib; do - tmp="${tmp//\.cib/}" - crm_resource -U -r "$tmp" -H "$(this_node)" - crm_resource -D -r "$tmp" -t primitive - done - # Make sure the version changes even if the content doesn't - cibadmin -B - unset CIB_file - - cibadmin --replace -o configuration --xml-file cur.cib - rm -f cur.cib - - # Allow the cluster to stabilize before continuing - crm_resource --wait - - # Purge nodes from caches and CIB status section - for tmp in $(container_names); do - crm_node --force --remove "$tmp" - done -} - -restore_network() -{ - NODE="$1" - - node_exec "$NODE" <<-EOF - cd "$working_dir" - for tmp in \$(ls lxc*.xml | sed -e 's/\.xml//g'); do - virsh -c lxc:/// destroy "\$tmp" >/dev/null 2>&1 - virsh -c lxc:/// undefine "\$tmp" >/dev/null 2>&1 - sed -i.bak "/...\....\....\..* \${tmp}/d" /etc/hosts - done - virsh net-destroy default >/dev/null 2>&1 - virsh net-undefine default >/dev/null 2>&1 - if [ -f restore_default.xml ]; then - virsh net-define restore_default.xml - virsh net-start default - rm restore_default.xml - fi -EOF - echo "Containers destroyed and default network restored on $NODE" -} - -restore_libvirt() -{ - restore_network "$(this_node)" - if [ $share_configs -eq 1 ]; then - for node in $(other_nodes); do - restore_network "$node" - done - fi -} - -restore_files() -{ - find . -maxdepth 1 -not -name "lxc*.xml" -a -not -name . -exec rm -rf "{}" ";" - if [ $share_configs -eq 1 ]; then - for node in $(other_nodes); do - node_exec "$node" rm -rf \ - "$working_dir"/lxc*-filesystem \ - "$working_dir"/cur_network.xml - done - fi -} - -make_directory "$working_dir" -cd "$working_dir" || exit 1 - -# Generate files as requested -if [ $download -eq 1 ]; then - download_agent -fi -if [ $key_gen -eq 1 ]; then - generate_key -fi -if [ $generate -eq 1 ]; then - generate -fi -if [ $share_configs -eq 1 ]; then - distribute_configs -fi -if [ $generate -eq 1 ]; then - start_network_all -fi - -# Update cluster as requested -if [ $cib -eq 1 ]; then - apply_cib_entries -fi -if [ $add_clone -eq 1 ]; then - apply_cib_clone -fi - -# Restore original state as requested -if [ $restore_pcmk -eq 1 ]; then - restore_cib -fi -if [ $restore -eq 1 ]; then - restore_libvirt -fi -if [ $restore_all -eq 1 ]; then - restore_files -fi - -# vim: set expandtab tabstop=8 softtabstop=4 shiftwidth=4 textwidth=80: diff --git a/cts/scheduler/Makefile.am b/cts/scheduler/Makefile.am index 9074390..aed7714 100644 --- a/cts/scheduler/Makefile.am +++ b/cts/scheduler/Makefile.am @@ -10,9 +10,15 @@ MAINTAINERCLEANFILES = Makefile.in pedir = $(datadir)/$(PACKAGE)/tests/scheduler +.PHONY: list list: @for T in "$(srcdir)"/xml/*.xml; do \ echo $$(basename $$T .xml); \ done -SUBDIRS = dot exp scores stderr summary xml +SUBDIRS = dot \ + exp \ + scores \ + stderr \ + summary \ + xml diff --git a/cts/scheduler/dot/bug-lf-2422.dot b/cts/scheduler/dot/bug-lf-2422.dot index 72ad12a..6fe2208 100644 --- a/cts/scheduler/dot/bug-lf-2422.dot +++ b/cts/scheduler/dot/bug-lf-2422.dot @@ -68,9 +68,12 @@ "ocfs:2_stop_0 qa-suse-3" -> "c-ocfs_stopped_0" [ style = bold] "ocfs:2_stop_0 qa-suse-3" -> "o2stage:2_stop_0" [ style = bold] "ocfs:2_stop_0 qa-suse-3" -> "ocfs:0_stop_0 qa-suse-4" [ style = bold] +"ocfs:2_stop_0 qa-suse-3" -> "ocfs:1_stop_0 qa-suse-1" [ style = bold] "ocfs:2_stop_0 qa-suse-3" [ style=bold color="green" fontcolor="black"] "ocfs:3_stop_0 qa-suse-2" -> "c-ocfs_stopped_0" [ style = bold] "ocfs:3_stop_0 qa-suse-2" -> "o2stage:3_stop_0" [ style = bold] +"ocfs:3_stop_0 qa-suse-2" -> "ocfs:0_stop_0 qa-suse-4" [ style = bold] +"ocfs:3_stop_0 qa-suse-2" -> "ocfs:1_stop_0 qa-suse-1" [ style = bold] "ocfs:3_stop_0 qa-suse-2" -> "ocfs:2_stop_0 qa-suse-3" [ style = bold] "ocfs:3_stop_0 qa-suse-2" [ style=bold color="green" fontcolor="black"] "sbd_stonith_monitor_15000 qa-suse-2" [ style=bold color="green" fontcolor="black"] diff --git a/cts/scheduler/dot/bundle-interleave-start.dot b/cts/scheduler/dot/bundle-interleave-start.dot index bf6ed7f..109a6cb 100644 --- a/cts/scheduler/dot/bundle-interleave-start.dot +++ b/cts/scheduler/dot/bundle-interleave-start.dot @@ -41,9 +41,15 @@ "app-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"] "app-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"] "app-bundle-2_start_0 node4" -> "app-bundle-2_monitor_30000 node4" [ style = bold] -"app-bundle-2_start_0 node4" -> "app:2_monitor_16000 app-bundle-2" [ style = bold] +"app-bundle-2_start_0 node4" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] +"app-bundle-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold] "app-bundle-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold] "app-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"app-bundle-clone_promote_0" -> "app:2_promote_0 app-bundle-2" [ style = bold] +"app-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle-clone_promoted_0" -> "app-bundle_promoted_0" [ style = bold] +"app-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle-clone_running_0" -> "app-bundle-clone_promote_0" [ style = bold] "app-bundle-clone_running_0" -> "app-bundle_running_0" [ style = bold] "app-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] "app-bundle-clone_start_0" -> "app-bundle-clone_running_0" [ style = bold] @@ -133,8 +139,13 @@ "app-bundle-podman-2_start_0 node4" -> "app-bundle-2_start_0 node4" [ style = bold] "app-bundle-podman-2_start_0 node4" -> "app-bundle-podman-2_monitor_60000 node4" [ style = bold] "app-bundle-podman-2_start_0 node4" -> "app-bundle_running_0" [ style = bold] +"app-bundle-podman-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold] "app-bundle-podman-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold] "app-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"app-bundle_promote_0" -> "app-bundle-clone_promote_0" [ style = bold] +"app-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"app-bundle_running_0" -> "app-bundle_promote_0" [ style = bold] "app-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "app-bundle_start_0" -> "app-bundle-clone_start_0" [ style = bold] "app-bundle_start_0" -> "app-bundle-podman-0_start_0 node2" [ style = bold] @@ -145,15 +156,20 @@ "app:0_start_0 app-bundle-0" -> "app-bundle-clone_running_0" [ style = bold] "app:0_start_0 app-bundle-0" -> "app:0_monitor_16000 app-bundle-0" [ style = bold] "app:0_start_0 app-bundle-0" -> "app:1_start_0 app-bundle-1" [ style = bold] +"app:0_start_0 app-bundle-0" -> "app:2_start_0 app-bundle-2" [ style = bold] "app:0_start_0 app-bundle-0" [ style=bold color="green" fontcolor="black"] "app:1_monitor_16000 app-bundle-1" [ style=bold color="green" fontcolor="black"] "app:1_start_0 app-bundle-1" -> "app-bundle-clone_running_0" [ style = bold] "app:1_start_0 app-bundle-1" -> "app:1_monitor_16000 app-bundle-1" [ style = bold] "app:1_start_0 app-bundle-1" -> "app:2_start_0 app-bundle-2" [ style = bold] "app:1_start_0 app-bundle-1" [ style=bold color="green" fontcolor="black"] -"app:2_monitor_16000 app-bundle-2" [ style=bold color="green" fontcolor="black"] +"app:2_monitor_15000 app-bundle-2" [ style=bold color="green" fontcolor="black"] +"app:2_promote_0 app-bundle-2" -> "app-bundle-clone_promoted_0" [ style = bold] +"app:2_promote_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] +"app:2_promote_0 app-bundle-2" [ style=bold color="green" fontcolor="black"] "app:2_start_0 app-bundle-2" -> "app-bundle-clone_running_0" [ style = bold] -"app:2_start_0 app-bundle-2" -> "app:2_monitor_16000 app-bundle-2" [ style = bold] +"app:2_start_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold] +"app:2_start_0 app-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold] "app:2_start_0 app-bundle-2" [ style=bold color="green" fontcolor="black"] "base-bundle-0_monitor_0 node1" -> "base-bundle-0_start_0 node2" [ style = bold] "base-bundle-0_monitor_0 node1" [ style=bold color="green" fontcolor="black"] @@ -197,9 +213,15 @@ "base-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"] "base-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"] "base-bundle-2_start_0 node4" -> "base-bundle-2_monitor_30000 node4" [ style = bold] -"base-bundle-2_start_0 node4" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node4" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold] "base-bundle-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold] "base-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold] "base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] "base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] "base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] @@ -289,9 +311,15 @@ "base-bundle-podman-2_start_0 node4" -> "base-bundle-2_start_0 node4" [ style = bold] "base-bundle-podman-2_start_0 node4" -> "base-bundle-podman-2_monitor_60000 node4" [ style = bold] "base-bundle-podman-2_start_0 node4" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold] "base-bundle-podman-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold] "base-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" -> "app-bundle_promote_0" [ style = bold] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] "base-bundle_running_0" -> "app-bundle_start_0" [ style = bold] +"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold] "base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] "base-bundle_start_0" -> "base-bundle-podman-0_start_0 node2" [ style = bold] @@ -303,6 +331,7 @@ "base:0_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = bold] "base:0_start_0 base-bundle-0" -> "base:0_monitor_16000 base-bundle-0" [ style = bold] "base:0_start_0 base-bundle-0" -> "base:1_start_0 base-bundle-1" [ style = bold] +"base:0_start_0 base-bundle-0" -> "base:2_start_0 base-bundle-2" [ style = bold] "base:0_start_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] "base:1_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"] "base:1_start_0 base-bundle-1" -> "app-bundle-podman-1_start_0 node3" [ style = bold] @@ -310,9 +339,14 @@ "base:1_start_0 base-bundle-1" -> "base:1_monitor_16000 base-bundle-1" [ style = bold] "base:1_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold] "base:1_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] -"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_promote_0 base-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold] +"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold] +"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] "base:2_start_0 base-bundle-2" -> "app-bundle-podman-2_start_0 node4" [ style = bold] "base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] -"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold] "base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/bundle-nested-colocation.dot b/cts/scheduler/dot/bundle-nested-colocation.dot index 87b6c0d..c11447a 100644 --- a/cts/scheduler/dot/bundle-nested-colocation.dot +++ b/cts/scheduler/dot/bundle-nested-colocation.dot @@ -139,6 +139,7 @@ "rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_running_0" [ style = bold] "rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq:0_monitor_10000 rabbitmq-bundle-0" [ style = bold] "rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq:1_start_0 rabbitmq-bundle-1" [ style = bold] +"rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq:2_start_0 rabbitmq-bundle-2" [ style = bold] "rabbitmq:0_start_0 rabbitmq-bundle-0" [ style=bold color="green" fontcolor="black"] "rabbitmq:1_monitor_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_start_0" [ style = bold] "rabbitmq:1_monitor_0 rabbitmq-bundle-1" [ style=bold color="green" fontcolor="black"] diff --git a/cts/scheduler/dot/bundle-order-startup-clone-2.dot b/cts/scheduler/dot/bundle-order-startup-clone-2.dot index b04e9f7..93877f4 100644 --- a/cts/scheduler/dot/bundle-order-startup-clone-2.dot +++ b/cts/scheduler/dot/bundle-order-startup-clone-2.dot @@ -114,6 +114,7 @@ "galera:0_start_0 galera-bundle-0" -> "galera:0_monitor_20000 galera-bundle-0" [ style = bold] "galera:0_start_0 galera-bundle-0" -> "galera:0_monitor_30000 galera-bundle-0" [ style = bold] "galera:0_start_0 galera-bundle-0" -> "galera:1_start_0 galera-bundle-1" [ style = bold] +"galera:0_start_0 galera-bundle-0" -> "galera:2_start_0 galera-bundle-2" [ style = bold] "galera:0_start_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"] "galera:1_monitor_20000 galera-bundle-1" [ style=bold color="green" fontcolor="black"] "galera:1_monitor_30000 galera-bundle-1" [ style=bold color="green" fontcolor="black"] @@ -343,6 +344,7 @@ "redis:0_start_0 redis-bundle-0" -> "redis:0_monitor_20000 redis-bundle-0" [ style = bold] "redis:0_start_0 redis-bundle-0" -> "redis:0_promote_0 redis-bundle-0" [ style = bold] "redis:0_start_0 redis-bundle-0" -> "redis:1_start_0 redis-bundle-1" [ style = bold] +"redis:0_start_0 redis-bundle-0" -> "redis:2_start_0 redis-bundle-2" [ style = bold] "redis:0_start_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"] "redis:1_monitor_20000 redis-bundle-1" [ style=bold color="green" fontcolor="black"] "redis:1_post_notify_promote_0 redis-bundle-1" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] diff --git a/cts/scheduler/dot/bundle-probe-remotes.dot b/cts/scheduler/dot/bundle-probe-remotes.dot index 958cc90..260b0be 100644 --- a/cts/scheduler/dot/bundle-probe-remotes.dot +++ b/cts/scheduler/dot/bundle-probe-remotes.dot @@ -53,21 +53,31 @@ "dummy1:0_monitor_10000 scale1-bundle-0" [ style=bold color="green" fontcolor="black"] "dummy1:0_start_0 scale1-bundle-0" -> "dummy1:0_monitor_10000 scale1-bundle-0" [ style = bold] "dummy1:0_start_0 scale1-bundle-0" -> "dummy1:1_start_0 scale1-bundle-1" [ style = bold] +"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:2_start_0 scale1-bundle-2" [ style = bold] +"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:3_start_0 scale1-bundle-3" [ style = bold] +"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold] +"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold] "dummy1:0_start_0 scale1-bundle-0" -> "scale1-bundle-clone_running_0" [ style = bold] "dummy1:0_start_0 scale1-bundle-0" [ style=bold color="green" fontcolor="black"] "dummy1:1_monitor_10000 scale1-bundle-1" [ style=bold color="green" fontcolor="black"] "dummy1:1_start_0 scale1-bundle-1" -> "dummy1:1_monitor_10000 scale1-bundle-1" [ style = bold] "dummy1:1_start_0 scale1-bundle-1" -> "dummy1:2_start_0 scale1-bundle-2" [ style = bold] +"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:3_start_0 scale1-bundle-3" [ style = bold] +"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold] +"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold] "dummy1:1_start_0 scale1-bundle-1" -> "scale1-bundle-clone_running_0" [ style = bold] "dummy1:1_start_0 scale1-bundle-1" [ style=bold color="green" fontcolor="black"] "dummy1:2_monitor_10000 scale1-bundle-2" [ style=bold color="green" fontcolor="black"] "dummy1:2_start_0 scale1-bundle-2" -> "dummy1:2_monitor_10000 scale1-bundle-2" [ style = bold] "dummy1:2_start_0 scale1-bundle-2" -> "dummy1:3_start_0 scale1-bundle-3" [ style = bold] +"dummy1:2_start_0 scale1-bundle-2" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold] +"dummy1:2_start_0 scale1-bundle-2" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold] "dummy1:2_start_0 scale1-bundle-2" -> "scale1-bundle-clone_running_0" [ style = bold] "dummy1:2_start_0 scale1-bundle-2" [ style=bold color="green" fontcolor="black"] "dummy1:3_monitor_10000 scale1-bundle-3" [ style=bold color="green" fontcolor="black"] "dummy1:3_start_0 scale1-bundle-3" -> "dummy1:3_monitor_10000 scale1-bundle-3" [ style = bold] "dummy1:3_start_0 scale1-bundle-3" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold] +"dummy1:3_start_0 scale1-bundle-3" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold] "dummy1:3_start_0 scale1-bundle-3" -> "scale1-bundle-clone_running_0" [ style = bold] "dummy1:3_start_0 scale1-bundle-3" [ style=bold color="green" fontcolor="black"] "dummy1:4_monitor_10000 scale1-bundle-4" [ style=bold color="green" fontcolor="black"] diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-1.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-1.dot new file mode 100644 index 0000000..ce2cf6a --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-anticolocation-1.dot @@ -0,0 +1,7 @@ + digraph "g" { +"vip_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"vip_start_0 node1" -> "vip_monitor_10000 node1" [ style = bold] +"vip_start_0 node1" [ style=bold color="green" fontcolor="black"] +"vip_stop_0 node3" -> "vip_start_0 node1" [ style = bold] +"vip_stop_0 node3" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-2.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-2.dot new file mode 100644 index 0000000..ce2cf6a --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-anticolocation-2.dot @@ -0,0 +1,7 @@ + digraph "g" { +"vip_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"vip_start_0 node1" -> "vip_monitor_10000 node1" [ style = bold] +"vip_start_0 node1" [ style=bold color="green" fontcolor="black"] +"vip_stop_0 node3" -> "vip_start_0 node1" [ style = bold] +"vip_stop_0 node3" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-3.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-3.dot new file mode 100644 index 0000000..a71ec35 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-anticolocation-3.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold] +"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"Cancel base_monitor_16000 base-bundle-1" -> "base_promote_0 base-bundle-1" [ style = bold] +"Cancel base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-1" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] +"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold] +"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold] +"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_promote_0 base-bundle-1" -> "base-bundle-clone_promoted_0" [ style = bold] +"base_promote_0 base-bundle-1" -> "base_monitor_15000 base-bundle-1" [ style = bold] +"base_promote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-4.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-4.dot new file mode 100644 index 0000000..a71ec35 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-anticolocation-4.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold] +"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"Cancel base_monitor_16000 base-bundle-1" -> "base_promote_0 base-bundle-1" [ style = bold] +"Cancel base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-1" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] +"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold] +"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold] +"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_promote_0 base-bundle-1" -> "base-bundle-clone_promoted_0" [ style = bold] +"base_promote_0 base-bundle-1" -> "base_monitor_15000 base-bundle-1" [ style = bold] +"base_promote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-5.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-5.dot new file mode 100644 index 0000000..b3db02e --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-anticolocation-5.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold] +"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold] +"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold] +"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"] +} diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-6.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-6.dot new file mode 100644 index 0000000..b3db02e --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-anticolocation-6.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold] +"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold] +"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold] +"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"] +} diff --git a/cts/scheduler/dot/bundle-promoted-colocation-1.dot b/cts/scheduler/dot/bundle-promoted-colocation-1.dot new file mode 100644 index 0000000..6b857e5 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-colocation-1.dot @@ -0,0 +1,7 @@ + digraph "g" { +"vip_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"vip_start_0 node3" -> "vip_monitor_10000 node3" [ style = bold] +"vip_start_0 node3" [ style=bold color="green" fontcolor="black"] +"vip_stop_0 node1" -> "vip_start_0 node3" [ style = bold] +"vip_stop_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-colocation-2.dot b/cts/scheduler/dot/bundle-promoted-colocation-2.dot new file mode 100644 index 0000000..6b857e5 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-colocation-2.dot @@ -0,0 +1,7 @@ + digraph "g" { +"vip_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"vip_start_0 node3" -> "vip_monitor_10000 node3" [ style = bold] +"vip_start_0 node3" [ style=bold color="green" fontcolor="black"] +"vip_stop_0 node1" -> "vip_start_0 node3" [ style = bold] +"vip_stop_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-colocation-3.dot b/cts/scheduler/dot/bundle-promoted-colocation-3.dot new file mode 100644 index 0000000..69f6cf5 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-colocation-3.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold] +"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"Cancel base_monitor_16000 base-bundle-0" -> "base_promote_0 base-bundle-0" [ style = bold] +"Cancel base_monitor_16000 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-0" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] +"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold] +"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold] +"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_monitor_15000 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_promote_0 base-bundle-0" -> "base-bundle-clone_promoted_0" [ style = bold] +"base_promote_0 base-bundle-0" -> "base_monitor_15000 base-bundle-0" [ style = bold] +"base_promote_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-colocation-4.dot b/cts/scheduler/dot/bundle-promoted-colocation-4.dot new file mode 100644 index 0000000..69f6cf5 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-colocation-4.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold] +"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"Cancel base_monitor_16000 base-bundle-0" -> "base_promote_0 base-bundle-0" [ style = bold] +"Cancel base_monitor_16000 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-0" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] +"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold] +"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold] +"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_monitor_15000 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_promote_0 base-bundle-0" -> "base-bundle-clone_promoted_0" [ style = bold] +"base_promote_0 base-bundle-0" -> "base_monitor_15000 base-bundle-0" [ style = bold] +"base_promote_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-colocation-5.dot b/cts/scheduler/dot/bundle-promoted-colocation-5.dot new file mode 100644 index 0000000..b3db02e --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-colocation-5.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold] +"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold] +"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold] +"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"] +} diff --git a/cts/scheduler/dot/bundle-promoted-colocation-6.dot b/cts/scheduler/dot/bundle-promoted-colocation-6.dot new file mode 100644 index 0000000..b3db02e --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-colocation-6.dot @@ -0,0 +1,32 @@ + digraph "g" { +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold] +"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold] +"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold] +"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold] +"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold] +"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"] +"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold] +"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold] +"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold] +"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold] +"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"] +"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"] +} diff --git a/cts/scheduler/dot/bundle-promoted-location-1.dot b/cts/scheduler/dot/bundle-promoted-location-1.dot new file mode 100644 index 0000000..d8f1c9f --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-location-1.dot @@ -0,0 +1,2 @@ + digraph "g" { +} diff --git a/cts/scheduler/dot/bundle-promoted-location-2.dot b/cts/scheduler/dot/bundle-promoted-location-2.dot new file mode 100644 index 0000000..72d1487 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-location-2.dot @@ -0,0 +1,75 @@ + digraph "g" { +"Cancel base_monitor_15000 base-bundle-0" -> "base_demote_0 base-bundle-0" [ style = bold] +"Cancel base_monitor_15000 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"Cancel base_monitor_16000 base-bundle-1" -> "base_promote_0 base-bundle-1" [ style = bold] +"Cancel base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base-bundle-0_monitor_30000 node3" [ style=dashed color="red" fontcolor="black"] +"base-bundle-0_start_0 node3" -> "base-bundle-0_monitor_30000 node3" [ style = dashed] +"base-bundle-0_start_0 node3" -> "base_monitor_16000 base-bundle-0" [ style = dashed] +"base-bundle-0_start_0 node3" -> "base_start_0 base-bundle-0" [ style = dashed] +"base-bundle-0_start_0 node3" [ style=dashed color="red" fontcolor="black"] +"base-bundle-0_stop_0 node3" -> "base-bundle-0_start_0 node3" [ style = dashed] +"base-bundle-0_stop_0 node3" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] +"base-bundle-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-0" [ style = bold] +"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_stop_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-1" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base_start_0 base-bundle-0" [ style = dashed] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold] +"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] +"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_stopped_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold] +"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-0_stop_0 node3" -> "base-bundle_stopped_0" [ style = bold] +"base-bundle-podman-0_stop_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] +"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_demoted_0" -> "base-bundle_stop_0" [ style = bold] +"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold] +"base-bundle_stop_0" -> "base-bundle-podman-0_stop_0 node3" [ style = bold] +"base-bundle_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold] +"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_stopped_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] +"base_demote_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold] +"base_demote_0 base-bundle-0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base_demote_0 base-bundle-0" -> "base_monitor_16000 base-bundle-0" [ style = dashed] +"base_demote_0 base-bundle-0" -> "base_stop_0 base-bundle-0" [ style = bold] +"base_demote_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] +"base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base_monitor_16000 base-bundle-0" [ style=dashed color="red" fontcolor="black"] +"base_promote_0 base-bundle-1" -> "base-bundle-clone_promoted_0" [ style = bold] +"base_promote_0 base-bundle-1" -> "base_monitor_15000 base-bundle-1" [ style = bold] +"base_promote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = dashed] +"base_start_0 base-bundle-0" -> "base_monitor_16000 base-bundle-0" [ style = dashed] +"base_start_0 base-bundle-0" [ style=dashed color="red" fontcolor="black"] +"base_stop_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold] +"base_stop_0 base-bundle-0" -> "base-bundle-clone_stopped_0" [ style = bold] +"base_stop_0 base-bundle-0" -> "base_start_0 base-bundle-0" [ style = dashed] +"base_stop_0 base-bundle-0" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-promoted-location-3.dot b/cts/scheduler/dot/bundle-promoted-location-3.dot new file mode 100644 index 0000000..d8f1c9f --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-location-3.dot @@ -0,0 +1,2 @@ + digraph "g" { +} diff --git a/cts/scheduler/dot/bundle-promoted-location-4.dot b/cts/scheduler/dot/bundle-promoted-location-4.dot new file mode 100644 index 0000000..d8f1c9f --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-location-4.dot @@ -0,0 +1,2 @@ + digraph "g" { +} diff --git a/cts/scheduler/dot/bundle-promoted-location-5.dot b/cts/scheduler/dot/bundle-promoted-location-5.dot new file mode 100644 index 0000000..d8f1c9f --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-location-5.dot @@ -0,0 +1,2 @@ + digraph "g" { +} diff --git a/cts/scheduler/dot/bundle-promoted-location-6.dot b/cts/scheduler/dot/bundle-promoted-location-6.dot new file mode 100644 index 0000000..9627375 --- /dev/null +++ b/cts/scheduler/dot/bundle-promoted-location-6.dot @@ -0,0 +1,37 @@ + digraph "g" { +"base-bundle-1_monitor_30000 node2" [ style=dashed color="red" fontcolor="black"] +"base-bundle-1_start_0 node2" -> "base-bundle-1_monitor_30000 node2" [ style = dashed] +"base-bundle-1_start_0 node2" -> "base_monitor_16000 base-bundle-1" [ style = dashed] +"base-bundle-1_start_0 node2" -> "base_start_0 base-bundle-1" [ style = dashed] +"base-bundle-1_start_0 node2" [ style=dashed color="red" fontcolor="black"] +"base-bundle-1_stop_0 node2" -> "base-bundle-1_start_0 node2" [ style = dashed] +"base-bundle-1_stop_0 node2" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] +"base-bundle-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base_start_0 base-bundle-1" [ style = dashed] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold] +"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] +"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold] +"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-1_stop_0 node2" -> "base-bundle_stopped_0" [ style = bold] +"base-bundle-podman-1_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold] +"base-bundle_stop_0" -> "base-bundle-podman-1_stop_0 node2" [ style = bold] +"base-bundle_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold] +"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] +"base_monitor_16000 base-bundle-1" [ style=dashed color="red" fontcolor="black"] +"base_start_0 base-bundle-1" -> "base-bundle-clone_running_0" [ style = dashed] +"base_start_0 base-bundle-1" -> "base_monitor_16000 base-bundle-1" [ style = dashed] +"base_start_0 base-bundle-1" [ style=dashed color="red" fontcolor="black"] +"base_stop_0 base-bundle-1" -> "base-bundle-1_stop_0 node2" [ style = bold] +"base_stop_0 base-bundle-1" -> "base-bundle-clone_stopped_0" [ style = bold] +"base_stop_0 base-bundle-1" -> "base_start_0 base-bundle-1" [ style = dashed] +"base_stop_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/bundle-replicas-change.dot b/cts/scheduler/dot/bundle-replicas-change.dot index 08c20b4..4d98275 100644 --- a/cts/scheduler/dot/bundle-replicas-change.dot +++ b/cts/scheduler/dot/bundle-replicas-change.dot @@ -89,6 +89,7 @@ "httpd:0_start_0 httpd-bundle-0" -> "httpd-bundle-clone_running_0" [ style = bold] "httpd:0_start_0 httpd-bundle-0" -> "httpd:0_monitor_10000 httpd-bundle-0" [ style = bold] "httpd:0_start_0 httpd-bundle-0" -> "httpd:1_start_0 httpd-bundle-1" [ style = bold] +"httpd:0_start_0 httpd-bundle-0" -> "httpd:2_start_0 httpd-bundle-2" [ style = bold] "httpd:0_start_0 httpd-bundle-0" [ style=bold color="green" fontcolor="black"] "httpd:1_monitor_10000 httpd-bundle-1" [ style=bold color="green" fontcolor="black"] "httpd:1_start_0 httpd-bundle-1" -> "httpd-bundle-clone_running_0" [ style = bold] diff --git a/cts/scheduler/dot/cancel-behind-moving-remote.dot b/cts/scheduler/dot/cancel-behind-moving-remote.dot index 1a0dfc8..0eddcce 100644 --- a/cts/scheduler/dot/cancel-behind-moving-remote.dot +++ b/cts/scheduler/dot/cancel-behind-moving-remote.dot @@ -1,50 +1,30 @@ digraph "g" { -"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ip-172.17.1.87_monitor_10000 controller-0" [ style=bold color="green" fontcolor="black"] -"ip-172.17.1.87_start_0 controller-0" -> "ip-172.17.1.87_monitor_10000 controller-0" [ style = bold] -"ip-172.17.1.87_start_0 controller-0" [ style=bold color="green" fontcolor="black"] +"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-2" -> "ovndb_servers_promote_0 ovn-dbs-bundle-2" [ style = bold] +"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] +"ip-172.17.1.87_monitor_10000 controller-1" [ style=bold color="green" fontcolor="black"] +"ip-172.17.1.87_start_0 controller-1" -> "ip-172.17.1.87_monitor_10000 controller-1" [ style = bold] +"ip-172.17.1.87_start_0 controller-1" [ style=bold color="green" fontcolor="black"] "nova-evacuate_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-0_clear_failcount_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] -"ovn-dbs-bundle-0_clear_failcount_0 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-0_monitor_30000 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-0_start_0 controller-2" -> "ovn-dbs-bundle-0_monitor_30000 controller-2" [ style = bold] -"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_clear_failcount_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-1_clear_failcount_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovn-dbs-bundle-1_monitor_30000 controller-0" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] -"ovn-dbs-bundle-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] +"ovn-dbs-bundle-0_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"] +"ovn-dbs-bundle-0_start_0 controller-0" -> "ovn-dbs-bundle-0_monitor_30000 controller-0" [ style = bold] +"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] +"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] +"ovn-dbs-bundle-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovn-dbs-bundle_promoted_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] +"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle_running_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] +"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_start_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] -"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] "ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style = bold] @@ -55,22 +35,16 @@ "ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style = bold] -"ovn-dbs-bundle-master_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_pre_notify_start_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] +"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style = bold] -"ovn-dbs-bundle-master_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] +"ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-2" [ style = bold] "ovn-dbs-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_promoted_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold] "ovn-dbs-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] @@ -79,48 +53,22 @@ "ovn-dbs-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle-master_start_0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] "ovn-dbs-bundle-master_start_0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-master_start_0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] "ovn-dbs-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_stop_0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold] -"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] -"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] -"ovn-dbs-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle_running_0" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] -"ovn-dbs-bundle-podman-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle-podman-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] -"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle_stopped_0" [ style = bold] -"ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"] -"ovn-dbs-bundle_promote_0" -> "ip-172.17.1.87_start_0 controller-0" [ style = bold] +"ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold] +"ovn-dbs-bundle-podman-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"] +"ovn-dbs-bundle_promote_0" -> "ip-172.17.1.87_start_0 controller-1" [ style = bold] "ovn-dbs-bundle_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold] "ovn-dbs-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle_running_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] "ovn-dbs-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold] -"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-2" [ style = bold] -"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold] +"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-0" [ style = bold] "ovn-dbs-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold] -"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold] -"ovn-dbs-bundle_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold] -"ovn-dbs-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] -"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_promote_0" [ style = bold] -"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_start_0" [ style = bold] -"ovn-dbs-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] @@ -130,9 +78,8 @@ "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] "ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_running_0" [ style = bold] "ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold] -"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] +"ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold] @@ -141,29 +88,17 @@ "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold] "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold] -"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold] "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] +"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] +"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold] "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold] -"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold] -"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -"ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_running_0" [ style = bold] -"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold] -"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold] -"ovndb_servers_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-1_stop_0 controller-2" [ style = bold] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold] -"ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"] +"ovndb_servers_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold] +"ovndb_servers_promote_0 ovn-dbs-bundle-2" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style = bold] +"ovndb_servers_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"] "rabbitmq-bundle-1_monitor_30000 controller-0" [ style=dashed color="red" fontcolor="black"] "rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq-bundle-1_monitor_30000 controller-0" [ style = dashed] "rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq:1_monitor_10000 rabbitmq-bundle-1" [ style = dashed] diff --git a/cts/scheduler/dot/clone-order-16instances.dot b/cts/scheduler/dot/clone-order-16instances.dot index cf87468..fbbb55b 100644 --- a/cts/scheduler/dot/clone-order-16instances.dot +++ b/cts/scheduler/dot/clone-order-16instances.dot @@ -3,51 +3,36 @@ "clvmd-clone_start_0" -> "clvmd-clone_running_0" [ style = dashed] "clvmd-clone_start_0" [ style=dashed color="red" fontcolor="orange"] "clvmd:10_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:10_start_0 " -> "clvmd:11_start_0 " [ style = dashed] "clvmd:10_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:11_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:11_start_0 " -> "clvmd:12_start_0 " [ style = dashed] "clvmd:11_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:12_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:12_start_0 " -> "clvmd:13_start_0 " [ style = dashed] "clvmd:12_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:13_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:13_start_0 " -> "clvmd:14_start_0 " [ style = dashed] "clvmd:13_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:14_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:14_start_0 " -> "clvmd:15_start_0 " [ style = dashed] "clvmd:14_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:15_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:15_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:1_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:1_start_0 " -> "clvmd:2_start_0 " [ style = dashed] "clvmd:1_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:2_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:2_start_0 " -> "clvmd:3_start_0 " [ style = dashed] "clvmd:2_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:3_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:3_start_0 " -> "clvmd:4_start_0 " [ style = dashed] "clvmd:3_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:4_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:4_start_0 " -> "clvmd:5_start_0 " [ style = dashed] "clvmd:4_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:5_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:5_start_0 " -> "clvmd:6_start_0 " [ style = dashed] "clvmd:5_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:6_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:6_start_0 " -> "clvmd:7_start_0 " [ style = dashed] "clvmd:6_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:7_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:7_start_0 " -> "clvmd:8_start_0 " [ style = dashed] "clvmd:7_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:8_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:8_start_0 " -> "clvmd:9_start_0 " [ style = dashed] "clvmd:8_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:9_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:9_start_0 " -> "clvmd:10_start_0 " [ style = dashed] "clvmd:9_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd_start_0 " -> "clvmd:1_start_0 " [ style = dashed] "clvmd_start_0 " [ style=dashed color="red" fontcolor="black"] "dlm-clone_running_0" -> "clvmd-clone_start_0" [ style = dashed] "dlm-clone_running_0" [ style=bold color="green" fontcolor="orange"] @@ -71,21 +56,31 @@ "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:11_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:12_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:13_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:14_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] @@ -98,37 +93,93 @@ "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:3_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:4_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:5_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:6_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:7_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:8_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:9_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] "dlm_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] @@ -149,7 +200,19 @@ "dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:9_start_0 " [ style = dashed] "dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd_start_0 " [ style = dashed] "dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-1.dot b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot new file mode 100644 index 0000000..287d82d --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot @@ -0,0 +1,10 @@ + digraph "g" { +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-10.dot b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot new file mode 100644 index 0000000..1e18409 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot @@ -0,0 +1,10 @@ + digraph "g" { +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_11000 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-11.dot b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot new file mode 100644 index 0000000..2b08a59 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot @@ -0,0 +1,21 @@ + digraph "g" { +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_11000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_11000 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-12.dot b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot new file mode 100644 index 0000000..ebc1dc6 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot @@ -0,0 +1,35 @@ + digraph "g" { +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-2.dot b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot new file mode 100644 index 0000000..d3bdf04 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot @@ -0,0 +1,21 @@ + digraph "g" { +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-3.dot b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot new file mode 100644 index 0000000..f60fd2c --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot @@ -0,0 +1,32 @@ + digraph "g" { +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot new file mode 100644 index 0000000..287d82d --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot @@ -0,0 +1,10 @@ + digraph "g" { +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot new file mode 100644 index 0000000..d3bdf04 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot @@ -0,0 +1,21 @@ + digraph "g" { +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot new file mode 100644 index 0000000..f60fd2c --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot @@ -0,0 +1,32 @@ + digraph "g" { +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot new file mode 100644 index 0000000..f61bf0d --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot @@ -0,0 +1,30 @@ + digraph "g" { +"Cancel dummy_monitor_10000 node2" -> "dummy_demote_0 node2" [ style = bold] +"Cancel dummy_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"dummy-clone_demote_0" -> "dummy-clone_demoted_0" [ style = bold] +"dummy-clone_demote_0" -> "dummy_demote_0 node2" [ style = bold] +"dummy-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_demoted_0" -> "dummy-clone_promote_0" [ style = bold] +"dummy-clone_demoted_0" -> "dummy-clone_start_0" [ style = bold] +"dummy-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_promote_0" -> "dummy:2_promote_0 node1" [ style = bold] +"dummy-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_running_0" -> "dummy-clone_promote_0" [ style = bold] +"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold] +"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold] +"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold] +"dummy:2_promote_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] +"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold] +"dummy:2_start_0 node1" -> "dummy:2_promote_0 node1" [ style = bold] +"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"dummy_demote_0 node2" -> "dummy-clone_demoted_0" [ style = bold] +"dummy_demote_0 node2" -> "dummy_monitor_11000 node2" [ style = bold] +"dummy_demote_0 node2" [ style=bold color="green" fontcolor="black"] +"dummy_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-8.dot b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot new file mode 100644 index 0000000..d9c311a --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot @@ -0,0 +1,63 @@ + digraph "g" { +"Cancel rsc1_monitor_10000 node2" -> "rsc1_demote_0 node2" [ style = bold] +"Cancel rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"Cancel rsc2_monitor_10000 node2" -> "rsc2_demote_0 node2" [ style = bold] +"Cancel rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"grp-clone_demote_0" -> "grp-clone_demoted_0" [ style = bold] +"grp-clone_demote_0" -> "grp:1_demote_0" [ style = bold] +"grp-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_demoted_0" -> "grp-clone_promote_0" [ style = bold] +"grp-clone_demoted_0" -> "grp-clone_start_0" [ style = bold] +"grp-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_promote_0" -> "grp:2_promote_0" [ style = bold] +"grp-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_running_0" -> "grp-clone_promote_0" [ style = bold] +"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold] +"grp-clone_start_0" -> "grp:2_start_0" [ style = bold] +"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_demote_0" -> "rsc1_demote_0 node2" [ style = bold] +"grp:1_demote_0" -> "rsc2_demote_0 node2" [ style = bold] +"grp:1_demote_0" [ style=bold color="green" fontcolor="orange"] +"grp:1_demoted_0" -> "grp-clone_demoted_0" [ style = bold] +"grp:1_demoted_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_promote_0" -> "rsc1:2_promote_0 node1" [ style = bold] +"grp:2_promote_0" -> "rsc2:2_promote_0 node1" [ style = bold] +"grp:2_promote_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_promoted_0" -> "grp-clone_promoted_0" [ style = bold] +"grp:2_promoted_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_running_0" -> "grp-clone_running_0" [ style = bold] +"grp:2_running_0" [ style=bold color="green" fontcolor="orange"] +"grp:2_start_0" -> "grp:2_running_0" [ style = bold] +"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold] +"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold] +"grp:2_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold] +"rsc1:2_promote_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_promote_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold] +"rsc1:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc1:2_promote_0 node1" [ style = bold] +"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold] +"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1_demote_0 node2" -> "grp:1_demoted_0" [ style = bold] +"rsc1_demote_0 node2" -> "rsc1_monitor_11000 node2" [ style = bold] +"rsc1_demote_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc1_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] +"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold] +"rsc2:2_promote_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_promote_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold] +"rsc2:2_start_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold] +"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc2_demote_0 node2" -> "grp:1_demoted_0" [ style = bold] +"rsc2_demote_0 node2" -> "rsc1_demote_0 node2" [ style = bold] +"rsc2_demote_0 node2" -> "rsc2_monitor_11000 node2" [ style = bold] +"rsc2_demote_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc2_monitor_11000 node2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-9.dot b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot new file mode 100644 index 0000000..45dbac4 --- /dev/null +++ b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot @@ -0,0 +1,69 @@ + digraph "g" { +"Cancel base_monitor_15000 base-bundle-1" -> "base_demote_0 base-bundle-1" [ style = bold] +"Cancel base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold] +"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-1" [ style = bold] +"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold] +"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold] +"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold] +"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold] +"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"] +"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold] +"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold] +"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_demoted_0" -> "base-bundle_start_0" [ style = bold] +"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold] +"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold] +"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"] +"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold] +"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold] +"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"] +"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold] +"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold] +"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"] +"base_demote_0 base-bundle-1" -> "base-bundle-clone_demoted_0" [ style = bold] +"base_demote_0 base-bundle-1" -> "base_monitor_16000 base-bundle-1" [ style = bold] +"base_demote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"] +"base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/coloc-with-inner-group-member.dot b/cts/scheduler/dot/coloc-with-inner-group-member.dot new file mode 100644 index 0000000..a3bad7a --- /dev/null +++ b/cts/scheduler/dot/coloc-with-inner-group-member.dot @@ -0,0 +1,40 @@ + digraph "g" { +"bar_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"] +"bar_start_0 rhel8-3" -> "bar_monitor_10000 rhel8-3" [ style = bold] +"bar_start_0 rhel8-3" -> "grp_running_0" [ style = bold] +"bar_start_0 rhel8-3" -> "vip_start_0 rhel8-3" [ style = bold] +"bar_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"] +"bar_stop_0 rhel8-4" -> "bar_start_0 rhel8-3" [ style = bold] +"bar_stop_0 rhel8-4" -> "foo_stop_0 rhel8-4" [ style = bold] +"bar_stop_0 rhel8-4" -> "grp_stopped_0" [ style = bold] +"bar_stop_0 rhel8-4" [ style=bold color="green" fontcolor="black"] +"foo_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"] +"foo_start_0 rhel8-3" -> "bar_start_0 rhel8-3" [ style = bold] +"foo_start_0 rhel8-3" -> "foo_monitor_10000 rhel8-3" [ style = bold] +"foo_start_0 rhel8-3" -> "grp_running_0" [ style = bold] +"foo_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"] +"foo_stop_0 rhel8-4" -> "foo_start_0 rhel8-3" [ style = bold] +"foo_stop_0 rhel8-4" -> "grp_stopped_0" [ style = bold] +"foo_stop_0 rhel8-4" [ style=bold color="green" fontcolor="black"] +"grp_running_0" [ style=bold color="green" fontcolor="orange"] +"grp_start_0" -> "bar_start_0 rhel8-3" [ style = bold] +"grp_start_0" -> "foo_start_0 rhel8-3" [ style = bold] +"grp_start_0" -> "grp_running_0" [ style = bold] +"grp_start_0" -> "vip_start_0 rhel8-3" [ style = bold] +"grp_start_0" [ style=bold color="green" fontcolor="orange"] +"grp_stop_0" -> "bar_stop_0 rhel8-4" [ style = bold] +"grp_stop_0" -> "foo_stop_0 rhel8-4" [ style = bold] +"grp_stop_0" -> "grp_stopped_0" [ style = bold] +"grp_stop_0" -> "vip_stop_0 rhel8-3" [ style = bold] +"grp_stop_0" [ style=bold color="green" fontcolor="orange"] +"grp_stopped_0" -> "grp_start_0" [ style = bold] +"grp_stopped_0" [ style=bold color="green" fontcolor="orange"] +"vip_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"] +"vip_start_0 rhel8-3" -> "grp_running_0" [ style = bold] +"vip_start_0 rhel8-3" -> "vip_monitor_10000 rhel8-3" [ style = bold] +"vip_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"] +"vip_stop_0 rhel8-3" -> "bar_stop_0 rhel8-4" [ style = bold] +"vip_stop_0 rhel8-3" -> "grp_stopped_0" [ style = bold] +"vip_stop_0 rhel8-3" -> "vip_start_0 rhel8-3" [ style = bold] +"vip_stop_0 rhel8-3" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/group-anticolocation-2.dot b/cts/scheduler/dot/group-anticolocation-2.dot new file mode 100644 index 0000000..def3b8b --- /dev/null +++ b/cts/scheduler/dot/group-anticolocation-2.dot @@ -0,0 +1,29 @@ + digraph "g" { +"group2_running_0" [ style=bold color="green" fontcolor="orange"] +"group2_start_0" -> "group2_running_0" [ style = bold] +"group2_start_0" -> "member2a_start_0 node2" [ style = bold] +"group2_start_0" -> "member2b_start_0 node2" [ style = bold] +"group2_start_0" [ style=bold color="green" fontcolor="orange"] +"group2_stop_0" -> "group2_stopped_0" [ style = bold] +"group2_stop_0" -> "member2a_stop_0 node1" [ style = bold] +"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold] +"group2_stop_0" [ style=bold color="green" fontcolor="orange"] +"group2_stopped_0" -> "group2_start_0" [ style = bold] +"group2_stopped_0" [ style=bold color="green" fontcolor="orange"] +"member2a_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"member2a_start_0 node2" -> "group2_running_0" [ style = bold] +"member2a_start_0 node2" -> "member2a_monitor_10000 node2" [ style = bold] +"member2a_start_0 node2" -> "member2b_start_0 node2" [ style = bold] +"member2a_start_0 node2" [ style=bold color="green" fontcolor="black"] +"member2a_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"member2a_stop_0 node1" -> "member2a_start_0 node2" [ style = bold] +"member2a_stop_0 node1" [ style=bold color="green" fontcolor="black"] +"member2b_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"member2b_start_0 node2" -> "group2_running_0" [ style = bold] +"member2b_start_0 node2" -> "member2b_monitor_10000 node2" [ style = bold] +"member2b_start_0 node2" [ style=bold color="green" fontcolor="black"] +"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"member2b_stop_0 node1" -> "member2a_stop_0 node1" [ style = bold] +"member2b_stop_0 node1" -> "member2b_start_0 node2" [ style = bold] +"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/group-anticolocation-3.dot b/cts/scheduler/dot/group-anticolocation-3.dot new file mode 100644 index 0000000..4886650 --- /dev/null +++ b/cts/scheduler/dot/group-anticolocation-3.dot @@ -0,0 +1,8 @@ + digraph "g" { +"group2_stop_0" -> "group2_stopped_0" [ style = bold] +"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold] +"group2_stop_0" [ style=bold color="green" fontcolor="orange"] +"group2_stopped_0" [ style=bold color="green" fontcolor="orange"] +"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/group-anticolocation-4.dot b/cts/scheduler/dot/group-anticolocation-4.dot new file mode 100644 index 0000000..def3b8b --- /dev/null +++ b/cts/scheduler/dot/group-anticolocation-4.dot @@ -0,0 +1,29 @@ + digraph "g" { +"group2_running_0" [ style=bold color="green" fontcolor="orange"] +"group2_start_0" -> "group2_running_0" [ style = bold] +"group2_start_0" -> "member2a_start_0 node2" [ style = bold] +"group2_start_0" -> "member2b_start_0 node2" [ style = bold] +"group2_start_0" [ style=bold color="green" fontcolor="orange"] +"group2_stop_0" -> "group2_stopped_0" [ style = bold] +"group2_stop_0" -> "member2a_stop_0 node1" [ style = bold] +"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold] +"group2_stop_0" [ style=bold color="green" fontcolor="orange"] +"group2_stopped_0" -> "group2_start_0" [ style = bold] +"group2_stopped_0" [ style=bold color="green" fontcolor="orange"] +"member2a_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"member2a_start_0 node2" -> "group2_running_0" [ style = bold] +"member2a_start_0 node2" -> "member2a_monitor_10000 node2" [ style = bold] +"member2a_start_0 node2" -> "member2b_start_0 node2" [ style = bold] +"member2a_start_0 node2" [ style=bold color="green" fontcolor="black"] +"member2a_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"member2a_stop_0 node1" -> "member2a_start_0 node2" [ style = bold] +"member2a_stop_0 node1" [ style=bold color="green" fontcolor="black"] +"member2b_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"member2b_start_0 node2" -> "group2_running_0" [ style = bold] +"member2b_start_0 node2" -> "member2b_monitor_10000 node2" [ style = bold] +"member2b_start_0 node2" [ style=bold color="green" fontcolor="black"] +"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"member2b_stop_0 node1" -> "member2a_stop_0 node1" [ style = bold] +"member2b_stop_0 node1" -> "member2b_start_0 node2" [ style = bold] +"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/group-anticolocation-5.dot b/cts/scheduler/dot/group-anticolocation-5.dot new file mode 100644 index 0000000..c30fd94 --- /dev/null +++ b/cts/scheduler/dot/group-anticolocation-5.dot @@ -0,0 +1,29 @@ + digraph "g" { +"group2_running_0" [ style=bold color="green" fontcolor="orange"] +"group2_start_0" -> "group2_running_0" [ style = bold] +"group2_start_0" -> "member2a_start_0 node3" [ style = bold] +"group2_start_0" -> "member2b_start_0 node3" [ style = bold] +"group2_start_0" [ style=bold color="green" fontcolor="orange"] +"group2_stop_0" -> "group2_stopped_0" [ style = bold] +"group2_stop_0" -> "member2a_stop_0 node1" [ style = bold] +"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold] +"group2_stop_0" [ style=bold color="green" fontcolor="orange"] +"group2_stopped_0" -> "group2_start_0" [ style = bold] +"group2_stopped_0" [ style=bold color="green" fontcolor="orange"] +"member2a_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"member2a_start_0 node3" -> "group2_running_0" [ style = bold] +"member2a_start_0 node3" -> "member2a_monitor_10000 node3" [ style = bold] +"member2a_start_0 node3" -> "member2b_start_0 node3" [ style = bold] +"member2a_start_0 node3" [ style=bold color="green" fontcolor="black"] +"member2a_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"member2a_stop_0 node1" -> "member2a_start_0 node3" [ style = bold] +"member2a_stop_0 node1" [ style=bold color="green" fontcolor="black"] +"member2b_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"member2b_start_0 node3" -> "group2_running_0" [ style = bold] +"member2b_start_0 node3" -> "member2b_monitor_10000 node3" [ style = bold] +"member2b_start_0 node3" [ style=bold color="green" fontcolor="black"] +"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold] +"member2b_stop_0 node1" -> "member2a_stop_0 node1" [ style = bold] +"member2b_stop_0 node1" -> "member2b_start_0 node3" [ style = bold] +"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/group-anticolocation.dot b/cts/scheduler/dot/group-anticolocation.dot index def3b8b..6454c12 100644 --- a/cts/scheduler/dot/group-anticolocation.dot +++ b/cts/scheduler/dot/group-anticolocation.dot @@ -1,4 +1,15 @@ digraph "g" { +"group1_running_0" [ style=bold color="green" fontcolor="orange"] +"group1_start_0" -> "group1_running_0" [ style = bold] +"group1_start_0" -> "member1a_start_0 node1" [ style = bold] +"group1_start_0" -> "member1b_start_0 node1" [ style = bold] +"group1_start_0" [ style=bold color="green" fontcolor="orange"] +"group1_stop_0" -> "group1_stopped_0" [ style = bold] +"group1_stop_0" -> "member1a_stop_0 node2" [ style = bold] +"group1_stop_0" -> "member1b_stop_0 node2" [ style = bold] +"group1_stop_0" [ style=bold color="green" fontcolor="orange"] +"group1_stopped_0" -> "group1_start_0" [ style = bold] +"group1_stopped_0" [ style=bold color="green" fontcolor="orange"] "group2_running_0" [ style=bold color="green" fontcolor="orange"] "group2_start_0" -> "group2_running_0" [ style = bold] "group2_start_0" -> "member2a_start_0 node2" [ style = bold] @@ -10,6 +21,22 @@ "group2_stop_0" [ style=bold color="green" fontcolor="orange"] "group2_stopped_0" -> "group2_start_0" [ style = bold] "group2_stopped_0" [ style=bold color="green" fontcolor="orange"] +"member1a_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"member1a_start_0 node1" -> "group1_running_0" [ style = bold] +"member1a_start_0 node1" -> "member1a_monitor_10000 node1" [ style = bold] +"member1a_start_0 node1" -> "member1b_start_0 node1" [ style = bold] +"member1a_start_0 node1" [ style=bold color="green" fontcolor="black"] +"member1a_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"member1a_stop_0 node2" -> "member1a_start_0 node1" [ style = bold] +"member1a_stop_0 node2" [ style=bold color="green" fontcolor="black"] +"member1b_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"member1b_start_0 node1" -> "group1_running_0" [ style = bold] +"member1b_start_0 node1" -> "member1b_monitor_10000 node1" [ style = bold] +"member1b_start_0 node1" [ style=bold color="green" fontcolor="black"] +"member1b_stop_0 node2" -> "group1_stopped_0" [ style = bold] +"member1b_stop_0 node2" -> "member1a_stop_0 node2" [ style = bold] +"member1b_stop_0 node2" -> "member1b_start_0 node1" [ style = bold] +"member1b_stop_0 node2" [ style=bold color="green" fontcolor="black"] "member2a_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] "member2a_start_0 node2" -> "group2_running_0" [ style = bold] "member2a_start_0 node2" -> "member2a_monitor_10000 node2" [ style = bold] diff --git a/cts/scheduler/dot/guest-host-not-fenceable.dot b/cts/scheduler/dot/guest-host-not-fenceable.dot index a086fcb..0b6eeae 100644 --- a/cts/scheduler/dot/guest-host-not-fenceable.dot +++ b/cts/scheduler/dot/guest-host-not-fenceable.dot @@ -111,6 +111,7 @@ "galera_start_0 galera-bundle-0" -> "galera-bundle-master_running_0" [ style = dashed] "galera_start_0 galera-bundle-0" -> "galera_monitor_10000 galera-bundle-0" [ style = dashed] "galera_start_0 galera-bundle-0" -> "galera_start_0 galera-bundle-1" [ style = dashed] +"galera_start_0 galera-bundle-0" -> "galera_start_0 galera-bundle-2" [ style = dashed] "galera_start_0 galera-bundle-0" [ style=dashed color="red" fontcolor="black"] "galera_start_0 galera-bundle-1" -> "galera-bundle-master_running_0" [ style = dashed] "galera_start_0 galera-bundle-1" -> "galera_monitor_20000 galera-bundle-1" [ style = dashed] @@ -131,6 +132,7 @@ "galera_stop_0 galera-bundle-1" [ style=dashed color="red" fontcolor="black"] "galera_stop_0 galera-bundle-2" -> "galera-bundle-master_stopped_0" [ style = dashed] "galera_stop_0 galera-bundle-2" -> "galera_start_0 galera-bundle-2" [ style = dashed] +"galera_stop_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-0" [ style = dashed] "galera_stop_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-1" [ style = dashed] "galera_stop_0 galera-bundle-2" [ style=dashed color="red" fontcolor="black"] "rabbitmq-bundle-0_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"] @@ -233,6 +235,7 @@ "rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_running_0" [ style = dashed] "rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style = dashed] "rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_start_0 rabbitmq-bundle-1" [ style = dashed] +"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed] "rabbitmq_start_0 rabbitmq-bundle-0" [ style=dashed color="red" fontcolor="black"] "rabbitmq_start_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_running_0" [ style = dashed] "rabbitmq_start_0 rabbitmq-bundle-1" -> "rabbitmq_monitor_10000 rabbitmq-bundle-1" [ style = dashed] @@ -251,6 +254,7 @@ "rabbitmq_stop_0 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"] "rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq-bundle-clone_stopped_0" [ style = dashed] "rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed] +"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = dashed] "rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_stop_0 rabbitmq-bundle-1" [ style = dashed] "rabbitmq_stop_0 rabbitmq-bundle-2" [ style=dashed color="red" fontcolor="black"] "stonith-fence_ipmilan-node1_stop_0 node2" [ style=dashed color="red" fontcolor="black"] diff --git a/cts/scheduler/dot/inc4.dot b/cts/scheduler/dot/inc4.dot index be3e1b3..620a845 100644 --- a/cts/scheduler/dot/inc4.dot +++ b/cts/scheduler/dot/inc4.dot @@ -24,10 +24,12 @@ "child_rsc1:3_stop_0 node1" -> "child_rsc1:3_start_0 node2" [ style = bold] "child_rsc1:3_stop_0 node1" -> "rsc1_stopped_0" [ style = bold] "child_rsc1:3_stop_0 node1" [ style=bold color="green" fontcolor="black"] +"child_rsc1:4_monitor_0 node2" -> "child_rsc1:2_stop_0 node1" [ style = bold] "child_rsc1:4_monitor_0 node2" -> "child_rsc1:3_stop_0 node1" [ style = bold] "child_rsc1:4_monitor_0 node2" -> "rsc1_start_0" [ style = bold] "child_rsc1:4_monitor_0 node2" -> "rsc1_stopped_0" [ style = bold] "child_rsc1:4_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"child_rsc1:4_stop_0 node1" -> "child_rsc1:2_stop_0 node1" [ style = bold] "child_rsc1:4_stop_0 node1" -> "child_rsc1:3_stop_0 node1" [ style = bold] "child_rsc1:4_stop_0 node1" -> "rsc1_stopped_0" [ style = bold] "child_rsc1:4_stop_0 node1" [ style=bold color="green" fontcolor="black"] diff --git a/cts/scheduler/dot/node-pending-timeout.dot b/cts/scheduler/dot/node-pending-timeout.dot new file mode 100644 index 0000000..c808f7e --- /dev/null +++ b/cts/scheduler/dot/node-pending-timeout.dot @@ -0,0 +1,7 @@ + digraph "g" { +"st-sbd_monitor_0 node-1" -> "st-sbd_start_0 node-1" [ style = bold] +"st-sbd_monitor_0 node-1" [ style=bold color="green" fontcolor="black"] +"st-sbd_start_0 node-1" [ style=bold color="green" fontcolor="black"] +"stonith 'reboot' node-2" -> "st-sbd_start_0 node-1" [ style = bold] +"stonith 'reboot' node-2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/order-clone.dot b/cts/scheduler/dot/order-clone.dot index 5aee990..e1b2a1a 100644 --- a/cts/scheduler/dot/order-clone.dot +++ b/cts/scheduler/dot/order-clone.dot @@ -9,9 +9,12 @@ "clvm-clone_start_0" [ style=dashed color="red" fontcolor="orange"] "clvm:0_start_0 hex-7" -> "clvm-clone_running_0" [ style = dashed] "clvm:0_start_0 hex-7" -> "clvm:1_start_0 hex-8" [ style = dashed] +"clvm:0_start_0 hex-7" -> "clvm:2_start_0 hex-9" [ style = dashed] +"clvm:0_start_0 hex-7" -> "clvm:3_start_0 hex-0" [ style = dashed] "clvm:0_start_0 hex-7" [ style=dashed color="red" fontcolor="black"] "clvm:1_start_0 hex-8" -> "clvm-clone_running_0" [ style = dashed] "clvm:1_start_0 hex-8" -> "clvm:2_start_0 hex-9" [ style = dashed] +"clvm:1_start_0 hex-8" -> "clvm:3_start_0 hex-0" [ style = dashed] "clvm:1_start_0 hex-8" [ style=dashed color="red" fontcolor="black"] "clvm:2_start_0 hex-9" -> "clvm-clone_running_0" [ style = dashed] "clvm:2_start_0 hex-9" -> "clvm:3_start_0 hex-0" [ style = dashed] diff --git a/cts/scheduler/dot/pending-node-no-uname.dot b/cts/scheduler/dot/pending-node-no-uname.dot new file mode 100644 index 0000000..98783ca --- /dev/null +++ b/cts/scheduler/dot/pending-node-no-uname.dot @@ -0,0 +1,7 @@ + digraph "g" { +"st-sbd_monitor_0 node-1" -> "st-sbd_start_0 node-1" [ style = dashed] +"st-sbd_monitor_0 node-1" [ style=bold color="green" fontcolor="black"] +"st-sbd_monitor_0 node-2" -> "st-sbd_start_0 node-1" [ style = dashed] +"st-sbd_monitor_0 node-2" [ style=dashed color="red" fontcolor="black"] +"st-sbd_start_0 node-1" [ style=dashed color="red" fontcolor="black"] +} diff --git a/cts/scheduler/dot/promoted-ordering.dot b/cts/scheduler/dot/promoted-ordering.dot index a4ada9c..9b5033b 100644 --- a/cts/scheduler/dot/promoted-ordering.dot +++ b/cts/scheduler/dot/promoted-ordering.dot @@ -24,20 +24,20 @@ "extip_2_start_0 webcluster01" [ style=bold color="green" fontcolor="black"] "fs_mysql_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"] "intip_0_main_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"] -"intip_1_master_monitor_0 webcluster01" -> "intip_1_master_start_0 webcluster01" [ style = bold] -"intip_1_master_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"] -"intip_1_master_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"] -"intip_1_master_start_0 webcluster01" -> "intip_1_master_monitor_30000 webcluster01" [ style = bold] -"intip_1_master_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold] -"intip_1_master_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold] -"intip_1_master_start_0 webcluster01" [ style=bold color="green" fontcolor="black"] -"intip_2_slave_monitor_0 webcluster01" -> "intip_2_slave_start_0 webcluster01" [ style = bold] -"intip_2_slave_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"] -"intip_2_slave_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"] -"intip_2_slave_start_0 webcluster01" -> "intip_2_slave_monitor_30000 webcluster01" [ style = bold] -"intip_2_slave_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold] -"intip_2_slave_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold] -"intip_2_slave_start_0 webcluster01" [ style=bold color="green" fontcolor="black"] +"intip_1_active_monitor_0 webcluster01" -> "intip_1_active_start_0 webcluster01" [ style = bold] +"intip_1_active_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"] +"intip_1_active_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"] +"intip_1_active_start_0 webcluster01" -> "intip_1_active_monitor_30000 webcluster01" [ style = bold] +"intip_1_active_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold] +"intip_1_active_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold] +"intip_1_active_start_0 webcluster01" [ style=bold color="green" fontcolor="black"] +"intip_2_passive_monitor_0 webcluster01" -> "intip_2_passive_start_0 webcluster01" [ style = bold] +"intip_2_passive_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"] +"intip_2_passive_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"] +"intip_2_passive_start_0 webcluster01" -> "intip_2_passive_monitor_30000 webcluster01" [ style = bold] +"intip_2_passive_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold] +"intip_2_passive_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold] +"intip_2_passive_start_0 webcluster01" [ style=bold color="green" fontcolor="black"] "ms_drbd_mysql_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] "ms_drbd_mysql_confirmed-pre_notify_start_0" -> "ms_drbd_mysql_post_notify_running_0" [ style = bold] "ms_drbd_mysql_confirmed-pre_notify_start_0" -> "ms_drbd_mysql_start_0" [ style = bold] diff --git a/cts/scheduler/dot/promoted-probed-score.dot b/cts/scheduler/dot/promoted-probed-score.dot index a382979..f93648f 100644 --- a/cts/scheduler/dot/promoted-probed-score.dot +++ b/cts/scheduler/dot/promoted-probed-score.dot @@ -1,11 +1,11 @@ digraph "g" { -"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] "AdminClone_confirmed-post_notify_promoted_0" -> "FilesystemClone_start_0" [ style = bold] "AdminClone_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_confirmed-post_notify_running_0" -> "AdminClone_pre_notify_promote_0" [ style = bold] -"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] "AdminClone_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_confirmed-pre_notify_promote_0" -> "AdminClone_post_notify_promoted_0" [ style = bold] "AdminClone_confirmed-pre_notify_promote_0" -> "AdminClone_promote_0" [ style = bold] @@ -14,21 +14,21 @@ "AdminClone_confirmed-pre_notify_start_0" -> "AdminClone_start_0" [ style = bold] "AdminClone_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_post_notify_promoted_0" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold] -"AdminClone_post_notify_promoted_0" -> "AdminDrbd:0_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminClone_post_notify_promoted_0" -> "AdminDrbd:1_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_post_notify_promoted_0" -> "AdminDrbd:0_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_post_notify_promoted_0" -> "AdminDrbd:1_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] "AdminClone_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_post_notify_running_0" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold] -"AdminClone_post_notify_running_0" -> "AdminDrbd:0_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminClone_post_notify_running_0" -> "AdminDrbd:1_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_post_notify_running_0" -> "AdminDrbd:0_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_post_notify_running_0" -> "AdminDrbd:1_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] "AdminClone_post_notify_running_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_pre_notify_promote_0" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold] -"AdminClone_pre_notify_promote_0" -> "AdminDrbd:0_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminClone_pre_notify_promote_0" -> "AdminDrbd:1_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_pre_notify_promote_0" -> "AdminDrbd:0_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_pre_notify_promote_0" -> "AdminDrbd:1_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] "AdminClone_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_pre_notify_start_0" -> "AdminClone_confirmed-pre_notify_start_0" [ style = bold] "AdminClone_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"] -"AdminClone_promote_0" -> "AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminClone_promote_0" -> "AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_promote_0" -> "AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_promote_0" -> "AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] "AdminClone_promote_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_promoted_0" -> "AdminClone_post_notify_promoted_0" [ style = bold] "AdminClone_promoted_0" [ style=bold color="green" fontcolor="orange"] @@ -36,53 +36,53 @@ "AdminClone_running_0" -> "AdminClone_promote_0" [ style = bold] "AdminClone_running_0" [ style=bold color="green" fontcolor="orange"] "AdminClone_start_0" -> "AdminClone_running_0" [ style = bold] -"AdminClone_start_0" -> "AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminClone_start_0" -> "AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_start_0" -> "AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminClone_start_0" -> "AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] "AdminClone_start_0" [ style=bold color="green" fontcolor="orange"] -"AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:0_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold] -"AdminDrbd:0_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:0_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold] -"AdminDrbd:0_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:0_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold] -"AdminDrbd:0_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold] -"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_start_0" [ style = bold] -"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold] -"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:1_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold] -"AdminDrbd:1_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:1_post_notify_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold] -"AdminDrbd:1_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:1_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold] -"AdminDrbd:1_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold] -"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_start_0" [ style = bold] -"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold] -"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminLvm:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"AdminLvm:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminLvm:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminLvm:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] -"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminLvm:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"AdminLvm:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminLvm:1_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminLvm:1_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] -"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:0_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold] +"AdminDrbd:0_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:0_post_notify_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold] +"AdminDrbd:0_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:0_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold] +"AdminDrbd:0_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold] +"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_start_0" [ style = bold] +"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold] +"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:1_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold] +"AdminDrbd:1_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:1_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold] +"AdminDrbd:1_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:1_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold] +"AdminDrbd:1_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold] +"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_start_0" [ style = bold] +"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold] +"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminLvm:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"AdminLvm:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminLvm:0_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminLvm:0_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] +"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminLvm:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"AdminLvm:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminLvm:1_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminLvm:1_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] +"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] "ClusterIP:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] "ClusterIP:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "ClusterIP:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style = bold] "ClusterIP:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "ClusterIPLocal:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] @@ -259,74 +259,74 @@ "ExportsGroup:1_start_0" -> "ExportWWW:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] "ExportsGroup:1_start_0" -> "ExportsGroup:1_running_0" [ style = bold] "ExportsGroup:1_start_0" [ style=bold color="green" fontcolor="orange"] -"FSMail:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSMail:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSMail:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] -"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSMail:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSMail:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSMail:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] -"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSUsrNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSUsrNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSUsrNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] -"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSUsrNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSUsrNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSUsrNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] -"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVarNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSVarNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVarNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] -"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVarNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSVarNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVarNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] -"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVirtualMachines:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSVirtualMachines:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVirtualMachines:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] -"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVirtualMachines:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSVirtualMachines:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVirtualMachines:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] -"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSWork:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSWork:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSWork:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] -"FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSWork:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] -"FSWork:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSWork:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] -"FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] -"FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSMail:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSMail:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSMail:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] +"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSMail:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSMail:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSMail:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] +"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSUsrNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSUsrNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSUsrNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] +"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSUsrNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSUsrNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSUsrNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] +"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVarNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSVarNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVarNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] +"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVarNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSVarNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVarNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] +"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVirtualMachines:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSVirtualMachines:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVirtualMachines:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] +"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVirtualMachines:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSVirtualMachines:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVirtualMachines:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] +"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSWork:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSWork:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSWork:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold] +"FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSWork:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold] +"FSWork:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSWork:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] +"FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold] +"FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"] "FilesystemClone_running_0" -> "CronAmbientTemperature_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] "FilesystemClone_running_0" -> "CupsClone_start_0" [ style = bold] "FilesystemClone_running_0" -> "DhcpGroup_start_0" [ style = bold] @@ -339,32 +339,32 @@ "FilesystemClone_start_0" -> "FilesystemGroup:0_start_0" [ style = bold] "FilesystemClone_start_0" -> "FilesystemGroup:1_start_0" [ style = bold] "FilesystemClone_start_0" [ style=bold color="green" fontcolor="orange"] -"FilesystemGroup:0_running_0" -> "CupsGroup:0_start_0" [ style = bold] -"FilesystemGroup:0_running_0" -> "ExportsGroup:0_start_0" [ style = bold] +"FilesystemGroup:0_running_0" -> "CupsGroup:1_start_0" [ style = bold] +"FilesystemGroup:0_running_0" -> "ExportsGroup:1_start_0" [ style = bold] "FilesystemGroup:0_running_0" -> "FilesystemClone_running_0" [ style = bold] -"FilesystemGroup:0_running_0" -> "LibvirtdGroup:0_start_0" [ style = bold] -"FilesystemGroup:0_running_0" -> "TftpGroup:0_start_0" [ style = bold] +"FilesystemGroup:0_running_0" -> "LibvirtdGroup:1_start_0" [ style = bold] +"FilesystemGroup:0_running_0" -> "TftpGroup:1_start_0" [ style = bold] "FilesystemGroup:0_running_0" [ style=bold color="green" fontcolor="orange"] -"FilesystemGroup:0_start_0" -> "AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:0_start_0" -> "FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:0_start_0" -> "FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:0_start_0" -> "FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:0_start_0" -> "FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:0_start_0" -> "FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:0_start_0" -> "AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:0_start_0" -> "FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:0_start_0" -> "FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:0_start_0" -> "FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:0_start_0" -> "FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:0_start_0" -> "FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] "FilesystemGroup:0_start_0" -> "FilesystemGroup:0_running_0" [ style = bold] "FilesystemGroup:0_start_0" [ style=bold color="green" fontcolor="orange"] -"FilesystemGroup:1_running_0" -> "CupsGroup:1_start_0" [ style = bold] -"FilesystemGroup:1_running_0" -> "ExportsGroup:1_start_0" [ style = bold] +"FilesystemGroup:1_running_0" -> "CupsGroup:0_start_0" [ style = bold] +"FilesystemGroup:1_running_0" -> "ExportsGroup:0_start_0" [ style = bold] "FilesystemGroup:1_running_0" -> "FilesystemClone_running_0" [ style = bold] -"FilesystemGroup:1_running_0" -> "LibvirtdGroup:1_start_0" [ style = bold] -"FilesystemGroup:1_running_0" -> "TftpGroup:1_start_0" [ style = bold] +"FilesystemGroup:1_running_0" -> "LibvirtdGroup:0_start_0" [ style = bold] +"FilesystemGroup:1_running_0" -> "TftpGroup:0_start_0" [ style = bold] "FilesystemGroup:1_running_0" [ style=bold color="green" fontcolor="orange"] -"FilesystemGroup:1_start_0" -> "AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:1_start_0" -> "FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:1_start_0" -> "FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:1_start_0" -> "FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:1_start_0" -> "FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] -"FilesystemGroup:1_start_0" -> "FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:1_start_0" -> "AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:1_start_0" -> "FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:1_start_0" -> "FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:1_start_0" -> "FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:1_start_0" -> "FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] +"FilesystemGroup:1_start_0" -> "FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold] "FilesystemGroup:1_start_0" -> "FilesystemGroup:1_running_0" [ style = bold] "FilesystemGroup:1_start_0" [ style=bold color="green" fontcolor="orange"] "IPClone_running_0" [ style=bold color="green" fontcolor="orange"] diff --git a/cts/scheduler/dot/timeout-by-node.dot b/cts/scheduler/dot/timeout-by-node.dot new file mode 100644 index 0000000..b4c0b97 --- /dev/null +++ b/cts/scheduler/dot/timeout-by-node.dot @@ -0,0 +1,40 @@ + digraph "g" { +"rsc1-clone_running_0" [ style=bold color="green" fontcolor="orange"] +"rsc1-clone_start_0" -> "rsc1-clone_running_0" [ style = bold] +"rsc1-clone_start_0" -> "rsc1:0_start_0 node2" [ style = bold] +"rsc1-clone_start_0" -> "rsc1:1_start_0 node3" [ style = bold] +"rsc1-clone_start_0" -> "rsc1:2_start_0 node4" [ style = bold] +"rsc1-clone_start_0" -> "rsc1:3_start_0 node5" [ style = bold] +"rsc1-clone_start_0" -> "rsc1:4_start_0 node1" [ style = bold] +"rsc1-clone_start_0" [ style=bold color="green" fontcolor="orange"] +"rsc1:0_monitor_0 node2" -> "rsc1-clone_start_0" [ style = bold] +"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc1:0_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"rsc1:0_start_0 node2" -> "rsc1-clone_running_0" [ style = bold] +"rsc1:0_start_0 node2" -> "rsc1:0_monitor_10000 node2" [ style = bold] +"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black"] +"rsc1:1_monitor_0 node3" -> "rsc1-clone_start_0" [ style = bold] +"rsc1:1_monitor_0 node3" [ style=bold color="green" fontcolor="black"] +"rsc1:1_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"rsc1:1_start_0 node3" -> "rsc1-clone_running_0" [ style = bold] +"rsc1:1_start_0 node3" -> "rsc1:1_monitor_10000 node3" [ style = bold] +"rsc1:1_start_0 node3" [ style=bold color="green" fontcolor="black"] +"rsc1:2_monitor_0 node4" -> "rsc1-clone_start_0" [ style = bold] +"rsc1:2_monitor_0 node4" [ style=bold color="green" fontcolor="black"] +"rsc1:2_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] +"rsc1:2_start_0 node4" -> "rsc1-clone_running_0" [ style = bold] +"rsc1:2_start_0 node4" -> "rsc1:2_monitor_10000 node4" [ style = bold] +"rsc1:2_start_0 node4" [ style=bold color="green" fontcolor="black"] +"rsc1:3_monitor_0 node5" -> "rsc1-clone_start_0" [ style = bold] +"rsc1:3_monitor_0 node5" [ style=bold color="green" fontcolor="black"] +"rsc1:3_monitor_10000 node5" [ style=bold color="green" fontcolor="black"] +"rsc1:3_start_0 node5" -> "rsc1-clone_running_0" [ style = bold] +"rsc1:3_start_0 node5" -> "rsc1:3_monitor_10000 node5" [ style = bold] +"rsc1:3_start_0 node5" [ style=bold color="green" fontcolor="black"] +"rsc1:4_monitor_0 node1" -> "rsc1-clone_start_0" [ style = bold] +"rsc1:4_monitor_0 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:4_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"rsc1:4_start_0 node1" -> "rsc1-clone_running_0" [ style = bold] +"rsc1:4_start_0 node1" -> "rsc1:4_monitor_10000 node1" [ style = bold] +"rsc1:4_start_0 node1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/unfence-definition.dot b/cts/scheduler/dot/unfence-definition.dot index 6b67392..b2ec3d5 100644 --- a/cts/scheduler/dot/unfence-definition.dot +++ b/cts/scheduler/dot/unfence-definition.dot @@ -20,11 +20,13 @@ "clvmd:1_start_0 virt-2" [ style=bold color="green" fontcolor="black"] "clvmd:2_monitor_0 virt-3" -> "clvmd-clone_start_0" [ style = bold] "clvmd:2_monitor_0 virt-3" -> "clvmd-clone_stopped_0" [ style = bold] +"clvmd:2_monitor_0 virt-3" -> "clvmd_stop_0 virt-1" [ style = bold] "clvmd:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"] "clvmd:2_start_0 virt-3" -> "clvmd-clone_running_0" [ style = bold] "clvmd:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"] "clvmd_start_0 virt-1" -> "clvmd-clone_running_0" [ style = bold] "clvmd_start_0 virt-1" -> "clvmd:1_start_0 virt-2" [ style = bold] +"clvmd_start_0 virt-1" -> "clvmd:2_start_0 virt-3" [ style = bold] "clvmd_start_0 virt-1" [ style=bold color="green" fontcolor="black"] "clvmd_stop_0 virt-1" -> "clvmd-clone_stopped_0" [ style = bold] "clvmd_stop_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold] @@ -44,12 +46,14 @@ "dlm-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] "dlm:2_monitor_0 virt-3" -> "dlm-clone_start_0" [ style = bold] "dlm:2_monitor_0 virt-3" -> "dlm-clone_stopped_0" [ style = bold] +"dlm:2_monitor_0 virt-3" -> "dlm_stop_0 virt-1" [ style = bold] "dlm:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"] "dlm:2_start_0 virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] "dlm:2_start_0 virt-3" -> "dlm-clone_running_0" [ style = bold] "dlm:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"] "dlm_start_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold] "dlm_start_0 virt-1" -> "dlm-clone_running_0" [ style = bold] +"dlm_start_0 virt-1" -> "dlm:2_start_0 virt-3" [ style = bold] "dlm_start_0 virt-1" [ style=bold color="green" fontcolor="black"] "dlm_stop_0 virt-1" -> "dlm-clone_stopped_0" [ style = bold] "dlm_stop_0 virt-1" -> "dlm_start_0 virt-1" [ style = bold] diff --git a/cts/scheduler/dot/unfence-parameters.dot b/cts/scheduler/dot/unfence-parameters.dot index d03b227..d5646c9 100644 --- a/cts/scheduler/dot/unfence-parameters.dot +++ b/cts/scheduler/dot/unfence-parameters.dot @@ -20,11 +20,13 @@ "clvmd:1_start_0 virt-2" [ style=bold color="green" fontcolor="black"] "clvmd:2_monitor_0 virt-3" -> "clvmd-clone_start_0" [ style = bold] "clvmd:2_monitor_0 virt-3" -> "clvmd-clone_stopped_0" [ style = bold] +"clvmd:2_monitor_0 virt-3" -> "clvmd_stop_0 virt-1" [ style = bold] "clvmd:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"] "clvmd:2_start_0 virt-3" -> "clvmd-clone_running_0" [ style = bold] "clvmd:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"] "clvmd_start_0 virt-1" -> "clvmd-clone_running_0" [ style = bold] "clvmd_start_0 virt-1" -> "clvmd:1_start_0 virt-2" [ style = bold] +"clvmd_start_0 virt-1" -> "clvmd:2_start_0 virt-3" [ style = bold] "clvmd_start_0 virt-1" [ style=bold color="green" fontcolor="black"] "clvmd_stop_0 virt-1" -> "clvmd-clone_stopped_0" [ style = bold] "clvmd_stop_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold] @@ -44,12 +46,14 @@ "dlm-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] "dlm:2_monitor_0 virt-3" -> "dlm-clone_start_0" [ style = bold] "dlm:2_monitor_0 virt-3" -> "dlm-clone_stopped_0" [ style = bold] +"dlm:2_monitor_0 virt-3" -> "dlm_stop_0 virt-1" [ style = bold] "dlm:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"] "dlm:2_start_0 virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] "dlm:2_start_0 virt-3" -> "dlm-clone_running_0" [ style = bold] "dlm:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"] "dlm_start_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold] "dlm_start_0 virt-1" -> "dlm-clone_running_0" [ style = bold] +"dlm_start_0 virt-1" -> "dlm:2_start_0 virt-3" [ style = bold] "dlm_start_0 virt-1" [ style=bold color="green" fontcolor="black"] "dlm_stop_0 virt-1" -> "dlm-clone_stopped_0" [ style = bold] "dlm_stop_0 virt-1" -> "dlm_start_0 virt-1" [ style = bold] diff --git a/cts/scheduler/dot/utilization-complex.dot b/cts/scheduler/dot/utilization-complex.dot index cccda24..340880d 100644 --- a/cts/scheduler/dot/utilization-complex.dot +++ b/cts/scheduler/dot/utilization-complex.dot @@ -151,6 +151,7 @@ "httpd_start_0 httpd-bundle-0" -> "httpd-bundle-clone_running_0" [ style = bold] "httpd_start_0 httpd-bundle-0" -> "httpd_monitor_15000 httpd-bundle-0" [ style = bold] "httpd_start_0 httpd-bundle-0" -> "httpd_start_0 httpd-bundle-1" [ style = dashed] +"httpd_start_0 httpd-bundle-0" -> "httpd_start_0 httpd-bundle-2" [ style = dashed] "httpd_start_0 httpd-bundle-0" [ style=bold color="green" fontcolor="black"] "httpd_start_0 httpd-bundle-1" -> "httpd-bundle-clone_running_0" [ style = dashed] "httpd_start_0 httpd-bundle-1" -> "httpd_monitor_15000 httpd-bundle-1" [ style = dashed] diff --git a/cts/scheduler/exp/bug-1822.exp b/cts/scheduler/exp/bug-1822.exp index 1206c97..9960c68 100644 --- a/cts/scheduler/exp/bug-1822.exp +++ b/cts/scheduler/exp/bug-1822.exp @@ -60,7 +60,7 @@ - + @@ -89,7 +89,7 @@ - + diff --git a/cts/scheduler/exp/bug-lf-2422.exp b/cts/scheduler/exp/bug-lf-2422.exp index 212493e..4728c24 100644 --- a/cts/scheduler/exp/bug-lf-2422.exp +++ b/cts/scheduler/exp/bug-lf-2422.exp @@ -303,6 +303,12 @@ + + + + + + @@ -319,6 +325,9 @@ + + + diff --git a/cts/scheduler/exp/bundle-interleave-start.exp b/cts/scheduler/exp/bundle-interleave-start.exp index e676b1b..4f726cd 100644 --- a/cts/scheduler/exp/bundle-interleave-start.exp +++ b/cts/scheduler/exp/bundle-interleave-start.exp @@ -1,42 +1,73 @@ - - - + + + - + - + + + + - - - + + + - + - + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -46,13 +77,13 @@ - + - + - + @@ -65,75 +96,99 @@ - + - + - + - - - + + + - + - + - + - - - + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + @@ -188,7 +243,7 @@ - + @@ -201,7 +256,7 @@ - + @@ -235,7 +290,7 @@ - + @@ -244,7 +299,7 @@ - + @@ -253,7 +308,7 @@ - + @@ -262,7 +317,7 @@ - + @@ -271,7 +326,7 @@ - + @@ -280,7 +335,7 @@ - + @@ -293,7 +348,7 @@ - + @@ -321,7 +376,7 @@ - + @@ -334,7 +389,7 @@ - + @@ -347,7 +402,7 @@ - + @@ -360,7 +415,7 @@ - + @@ -373,7 +428,7 @@ - + @@ -386,7 +441,7 @@ - + @@ -399,7 +454,7 @@ - + @@ -433,7 +488,7 @@ - + @@ -442,7 +497,7 @@ - + @@ -451,7 +506,7 @@ - + @@ -460,7 +515,7 @@ - + @@ -469,7 +524,7 @@ - + @@ -478,7 +533,7 @@ - + @@ -491,7 +546,7 @@ - + @@ -519,7 +574,7 @@ - + @@ -532,7 +587,7 @@ - + @@ -545,7 +600,7 @@ - + @@ -558,7 +613,7 @@ - + @@ -571,7 +626,7 @@ - + @@ -584,7 +639,7 @@ - + @@ -597,7 +652,7 @@ - + @@ -631,7 +686,7 @@ - + @@ -640,7 +695,7 @@ - + @@ -649,7 +704,7 @@ - + @@ -658,7 +713,7 @@ - + @@ -667,7 +722,7 @@ - + @@ -676,7 +731,7 @@ - + @@ -689,7 +744,7 @@ - + @@ -717,7 +772,7 @@ - + @@ -730,7 +785,7 @@ - + @@ -743,7 +798,7 @@ - + @@ -756,7 +811,7 @@ - + @@ -769,7 +824,7 @@ - + @@ -782,141 +837,199 @@ - + - - - + + + - + - + + + + - + - - - + + + - + - + - + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - - - + + + - + - + - + + + + + + + + + + - + - + - + - + - + + + + + + + + + + - + - + + + + + + + - + - + @@ -967,26 +1080,26 @@ - + - + - + - + - + - + @@ -1014,14 +1127,14 @@ - + - + - + @@ -1030,7 +1143,7 @@ - + @@ -1039,7 +1152,7 @@ - + @@ -1048,7 +1161,7 @@ - + @@ -1057,7 +1170,7 @@ - + @@ -1066,22 +1179,22 @@ - + - + - + - + - + @@ -1103,11 +1216,11 @@ - + - + @@ -1116,11 +1229,11 @@ - + - + @@ -1129,11 +1242,11 @@ - + - + @@ -1142,11 +1255,11 @@ - + - + @@ -1155,11 +1268,11 @@ - + - + @@ -1168,26 +1281,26 @@ - + - + - + - + - + - + @@ -1215,14 +1328,14 @@ - + - + - + @@ -1231,7 +1344,7 @@ - + @@ -1240,7 +1353,7 @@ - + @@ -1249,7 +1362,7 @@ - + @@ -1258,7 +1371,7 @@ - + @@ -1267,22 +1380,22 @@ - + - + - + - + - + @@ -1304,11 +1417,11 @@ - + - + @@ -1317,11 +1430,11 @@ - + - + @@ -1330,11 +1443,11 @@ - + - + @@ -1343,11 +1456,11 @@ - + - + @@ -1356,11 +1469,11 @@ - + - + @@ -1369,26 +1482,26 @@ - + - + - + - + - + - + @@ -1416,14 +1529,14 @@ - + - + - + @@ -1432,7 +1545,7 @@ - + @@ -1441,7 +1554,7 @@ - + @@ -1450,7 +1563,7 @@ - + @@ -1459,7 +1572,7 @@ - + @@ -1468,22 +1581,22 @@ - + - + - + - + - + @@ -1505,11 +1618,11 @@ - + - + @@ -1518,11 +1631,11 @@ - + - + @@ -1531,11 +1644,11 @@ - + - + @@ -1544,11 +1657,11 @@ - + - + @@ -1557,11 +1670,11 @@ - + - + @@ -1570,34 +1683,61 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + @@ -1607,7 +1747,31 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1624,11 +1788,11 @@ - + - + diff --git a/cts/scheduler/exp/bundle-nested-colocation.exp b/cts/scheduler/exp/bundle-nested-colocation.exp index 025699c..ec7a71f 100644 --- a/cts/scheduler/exp/bundle-nested-colocation.exp +++ b/cts/scheduler/exp/bundle-nested-colocation.exp @@ -176,6 +176,9 @@ + + + diff --git a/cts/scheduler/exp/bundle-order-fencing.exp b/cts/scheduler/exp/bundle-order-fencing.exp index 3149204..a47bd4f 100644 --- a/cts/scheduler/exp/bundle-order-fencing.exp +++ b/cts/scheduler/exp/bundle-order-fencing.exp @@ -1,104 +1,104 @@ - + - - - + + + + - - - - - - - + - - - + + + + - + + + + - - - + + + + - - - - + - + - - - + + + - + + + + - - - - + + + - + - + + + + - + - - - - + + + - + - + - - - - + + + - + - + @@ -565,66 +565,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -637,7 +577,7 @@ - + @@ -650,7 +590,7 @@ - + @@ -663,7 +603,7 @@ - + @@ -676,7 +616,7 @@ - + @@ -689,7 +629,7 @@ - + @@ -705,7 +645,7 @@ - + @@ -730,7 +670,7 @@ - + @@ -761,7 +701,7 @@ - + @@ -770,7 +710,7 @@ - + @@ -779,7 +719,7 @@ - + @@ -792,7 +732,7 @@ - + @@ -805,7 +745,7 @@ - + @@ -818,7 +758,7 @@ - + @@ -831,7 +771,7 @@ - + @@ -844,7 +784,7 @@ - + @@ -860,6 +800,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-order-startup-clone-2.exp b/cts/scheduler/exp/bundle-order-startup-clone-2.exp index 8752185..e4a35c7 100644 --- a/cts/scheduler/exp/bundle-order-startup-clone-2.exp +++ b/cts/scheduler/exp/bundle-order-startup-clone-2.exp @@ -407,6 +407,9 @@ + + + @@ -1424,6 +1427,9 @@ + + + diff --git a/cts/scheduler/exp/bundle-order-stop-on-remote.exp b/cts/scheduler/exp/bundle-order-stop-on-remote.exp index 7e23dcc..11ec557 100644 --- a/cts/scheduler/exp/bundle-order-stop-on-remote.exp +++ b/cts/scheduler/exp/bundle-order-stop-on-remote.exp @@ -696,7 +696,59 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -707,9 +759,9 @@ - + - + @@ -720,9 +772,9 @@ - + - + @@ -733,7 +785,7 @@ - + @@ -755,7 +807,7 @@ - + @@ -777,7 +829,7 @@ - + @@ -802,58 +854,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -868,10 +868,10 @@ - + - + @@ -904,10 +904,10 @@ - + - + @@ -967,10 +967,10 @@ - + - + @@ -1003,7 +1003,7 @@ - + diff --git a/cts/scheduler/exp/bundle-probe-remotes.exp b/cts/scheduler/exp/bundle-probe-remotes.exp index b1b14db..41a6cf1 100644 --- a/cts/scheduler/exp/bundle-probe-remotes.exp +++ b/cts/scheduler/exp/bundle-probe-remotes.exp @@ -279,6 +279,9 @@ + + + @@ -317,6 +320,12 @@ + + + + + + @@ -355,6 +364,15 @@ + + + + + + + + + @@ -393,6 +411,18 @@ + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-1.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-1.exp new file mode 100644 index 0000000..bb2aee1 --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-anticolocation-1.exp @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-2.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-2.exp new file mode 100644 index 0000000..bb2aee1 --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-anticolocation-2.exp @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-3.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-3.exp new file mode 100644 index 0000000..7febd99 --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-anticolocation-3.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-4.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-4.exp new file mode 100644 index 0000000..7febd99 --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-anticolocation-4.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-5.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-5.exp new file mode 100644 index 0000000..d5861ab --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-anticolocation-5.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-6.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-6.exp new file mode 100644 index 0000000..d5861ab --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-anticolocation-6.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-colocation-1.exp b/cts/scheduler/exp/bundle-promoted-colocation-1.exp new file mode 100644 index 0000000..8d7ea7a --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-colocation-1.exp @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-colocation-2.exp b/cts/scheduler/exp/bundle-promoted-colocation-2.exp new file mode 100644 index 0000000..8d7ea7a --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-colocation-2.exp @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-colocation-3.exp b/cts/scheduler/exp/bundle-promoted-colocation-3.exp new file mode 100644 index 0000000..1963bbb --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-colocation-3.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-colocation-4.exp b/cts/scheduler/exp/bundle-promoted-colocation-4.exp new file mode 100644 index 0000000..1963bbb --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-colocation-4.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-colocation-5.exp b/cts/scheduler/exp/bundle-promoted-colocation-5.exp new file mode 100644 index 0000000..d3c6df3 --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-colocation-5.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-colocation-6.exp b/cts/scheduler/exp/bundle-promoted-colocation-6.exp new file mode 100644 index 0000000..d3c6df3 --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-colocation-6.exp @@ -0,0 +1,179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-location-1.exp b/cts/scheduler/exp/bundle-promoted-location-1.exp new file mode 100644 index 0000000..56e315f --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-location-1.exp @@ -0,0 +1 @@ + diff --git a/cts/scheduler/exp/bundle-promoted-location-2.exp b/cts/scheduler/exp/bundle-promoted-location-2.exp new file mode 100644 index 0000000..cbb74ba --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-location-2.exp @@ -0,0 +1,328 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-promoted-location-3.exp b/cts/scheduler/exp/bundle-promoted-location-3.exp new file mode 100644 index 0000000..56e315f --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-location-3.exp @@ -0,0 +1 @@ + diff --git a/cts/scheduler/exp/bundle-promoted-location-4.exp b/cts/scheduler/exp/bundle-promoted-location-4.exp new file mode 100644 index 0000000..56e315f --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-location-4.exp @@ -0,0 +1 @@ + diff --git a/cts/scheduler/exp/bundle-promoted-location-5.exp b/cts/scheduler/exp/bundle-promoted-location-5.exp new file mode 100644 index 0000000..56e315f --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-location-5.exp @@ -0,0 +1 @@ + diff --git a/cts/scheduler/exp/bundle-promoted-location-6.exp b/cts/scheduler/exp/bundle-promoted-location-6.exp new file mode 100644 index 0000000..07a6a2d --- /dev/null +++ b/cts/scheduler/exp/bundle-promoted-location-6.exp @@ -0,0 +1,136 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/bundle-replicas-change.exp b/cts/scheduler/exp/bundle-replicas-change.exp index b14dbf2..ec89d94 100644 --- a/cts/scheduler/exp/bundle-replicas-change.exp +++ b/cts/scheduler/exp/bundle-replicas-change.exp @@ -115,6 +115,9 @@ + + + diff --git a/cts/scheduler/exp/cancel-behind-moving-remote.exp b/cts/scheduler/exp/cancel-behind-moving-remote.exp index 17759cb..91651ba 100644 --- a/cts/scheduler/exp/cancel-behind-moving-remote.exp +++ b/cts/scheduler/exp/cancel-behind-moving-remote.exp @@ -1,46 +1,46 @@ - + - + - + - + - + - + - + - + @@ -48,824 +48,529 @@ - + - + - + - + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - - - - - - - - - - + - - - + + + - + - + - + - + - - - + + + - + + + + - - - + + + - - - - - + - + - - - + + + - + - + - - - + + + - + - + - - - + + + - - - - - - - - - - - - - + - - - + + + - - - - + - + - + - + - - - - - - - - - - - - - - - - - - - + + + - - - - + - + - - - - + - + - + - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - - - - - - - - - - - + - - - + + + - + - + - - - + + + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - + - + - + - + - - - - - - - - - - + - - - - - - - - - - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - - - - - - - - - - - - - - - - + - - + + - - - - - - - - + - + - + - + - - - - + - + - + - + - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - + - + - + - + - + - + - + - + - + - + @@ -874,7 +579,7 @@ - + @@ -883,7 +588,7 @@ - + @@ -892,7 +597,7 @@ - + @@ -901,42 +606,42 @@ - + - + - + - + - + - + - + - + - + @@ -945,7 +650,7 @@ - + @@ -954,7 +659,7 @@ - + @@ -963,42 +668,42 @@ - + - + - + - + - + - + - + - + - + @@ -1007,7 +712,7 @@ - + @@ -1016,7 +721,7 @@ - + @@ -1025,7 +730,7 @@ - + @@ -1034,101 +739,68 @@ - + - + - + - + - + - - - - + - + - + - + - + - + - + - + - + - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - + - + diff --git a/cts/scheduler/exp/clone-anon-failcount.exp b/cts/scheduler/exp/clone-anon-failcount.exp index 05312c2..a48f69b 100644 --- a/cts/scheduler/exp/clone-anon-failcount.exp +++ b/cts/scheduler/exp/clone-anon-failcount.exp @@ -186,7 +186,7 @@ - + diff --git a/cts/scheduler/exp/clone-order-16instances.exp b/cts/scheduler/exp/clone-order-16instances.exp index 9d20ae1..b06826b 100644 --- a/cts/scheduler/exp/clone-order-16instances.exp +++ b/cts/scheduler/exp/clone-order-16instances.exp @@ -75,6 +75,9 @@ + + + @@ -104,6 +107,12 @@ + + + + + + @@ -133,6 +142,15 @@ + + + + + + + + + @@ -162,6 +180,18 @@ + + + + + + + + + + + + @@ -191,6 +221,21 @@ + + + + + + + + + + + + + + + @@ -220,6 +265,24 @@ + + + + + + + + + + + + + + + + + + @@ -249,6 +312,27 @@ + + + + + + + + + + + + + + + + + + + + + @@ -278,6 +362,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + @@ -307,6 +415,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -336,6 +471,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -365,6 +530,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -394,6 +592,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-1.exp b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp new file mode 100644 index 0000000..670a823 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-10.exp b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp new file mode 100644 index 0000000..27b8b70 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-11.exp b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp new file mode 100644 index 0000000..40cf1f6 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-12.exp b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp new file mode 100644 index 0000000..919e6b2 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp @@ -0,0 +1,187 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-2.exp b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp new file mode 100644 index 0000000..84b1e1b --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-3.exp b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp new file mode 100644 index 0000000..6b6ed07 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp @@ -0,0 +1,171 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp new file mode 100644 index 0000000..670a823 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp new file mode 100644 index 0000000..84b1e1b --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp new file mode 100644 index 0000000..6b6ed07 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp @@ -0,0 +1,171 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp new file mode 100644 index 0000000..870ed54 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp @@ -0,0 +1,162 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-8.exp b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp new file mode 100644 index 0000000..763a2f0 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp @@ -0,0 +1,338 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-9.exp b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp new file mode 100644 index 0000000..e249bc7 --- /dev/null +++ b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp @@ -0,0 +1,364 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/coloc-with-inner-group-member.exp b/cts/scheduler/exp/coloc-with-inner-group-member.exp new file mode 100644 index 0000000..bb8f779 --- /dev/null +++ b/cts/scheduler/exp/coloc-with-inner-group-member.exp @@ -0,0 +1,202 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/group-anticolocation-2.exp b/cts/scheduler/exp/group-anticolocation-2.exp new file mode 100644 index 0000000..4e57e18 --- /dev/null +++ b/cts/scheduler/exp/group-anticolocation-2.exp @@ -0,0 +1,148 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/group-anticolocation-3.exp b/cts/scheduler/exp/group-anticolocation-3.exp new file mode 100644 index 0000000..066b3bd --- /dev/null +++ b/cts/scheduler/exp/group-anticolocation-3.exp @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/group-anticolocation-4.exp b/cts/scheduler/exp/group-anticolocation-4.exp new file mode 100644 index 0000000..4e57e18 --- /dev/null +++ b/cts/scheduler/exp/group-anticolocation-4.exp @@ -0,0 +1,148 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/group-anticolocation-5.exp b/cts/scheduler/exp/group-anticolocation-5.exp new file mode 100644 index 0000000..2394b4e --- /dev/null +++ b/cts/scheduler/exp/group-anticolocation-5.exp @@ -0,0 +1,148 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/group-anticolocation.exp b/cts/scheduler/exp/group-anticolocation.exp index 4e57e18..5a37559 100644 --- a/cts/scheduler/exp/group-anticolocation.exp +++ b/cts/scheduler/exp/group-anticolocation.exp @@ -1,25 +1,25 @@ - + - + - + - + - + @@ -27,66 +27,212 @@ - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + @@ -96,26 +242,26 @@ - + - + - + - + - + - + @@ -125,14 +271,14 @@ - + - + - + @@ -141,7 +287,7 @@ - + diff --git a/cts/scheduler/exp/inc4.exp b/cts/scheduler/exp/inc4.exp index add43f8..7b1d121 100644 --- a/cts/scheduler/exp/inc4.exp +++ b/cts/scheduler/exp/inc4.exp @@ -44,9 +44,15 @@ + + + + + + diff --git a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp index 351aec1..5eeb3d4 100644 --- a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp +++ b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp @@ -1,34 +1,5 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -41,7 +12,7 @@ - + @@ -54,9 +25,9 @@ - + - + @@ -67,9 +38,9 @@ - + - + @@ -80,16 +51,16 @@ - + - + - + @@ -99,9 +70,9 @@ - + - + @@ -115,7 +86,7 @@ - + @@ -124,7 +95,7 @@ - + @@ -137,7 +108,7 @@ - + @@ -150,9 +121,9 @@ - + - + @@ -163,9 +134,9 @@ - + - + @@ -176,6 +147,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -250,7 +250,7 @@ - + @@ -286,10 +286,10 @@ - + - + @@ -319,13 +319,13 @@ - + - + - + @@ -396,7 +396,7 @@ - + @@ -473,7 +473,7 @@ - + diff --git a/cts/scheduler/exp/node-pending-timeout.exp b/cts/scheduler/exp/node-pending-timeout.exp new file mode 100644 index 0000000..e94812f --- /dev/null +++ b/cts/scheduler/exp/node-pending-timeout.exp @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/pending-node-no-uname.exp b/cts/scheduler/exp/pending-node-no-uname.exp new file mode 100644 index 0000000..2c45756 --- /dev/null +++ b/cts/scheduler/exp/pending-node-no-uname.exp @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/cts/scheduler/exp/promoted-failed-demote-2.exp b/cts/scheduler/exp/promoted-failed-demote-2.exp index 02b9250..81ed8df 100644 --- a/cts/scheduler/exp/promoted-failed-demote-2.exp +++ b/cts/scheduler/exp/promoted-failed-demote-2.exp @@ -30,7 +30,7 @@ - + diff --git a/cts/scheduler/exp/promoted-failed-demote.exp b/cts/scheduler/exp/promoted-failed-demote.exp index e4fc706..69e6b39 100644 --- a/cts/scheduler/exp/promoted-failed-demote.exp +++ b/cts/scheduler/exp/promoted-failed-demote.exp @@ -43,7 +43,7 @@ - + diff --git a/cts/scheduler/exp/promoted-ordering.exp b/cts/scheduler/exp/promoted-ordering.exp index 1df608d..430fbe6 100644 --- a/cts/scheduler/exp/promoted-ordering.exp +++ b/cts/scheduler/exp/promoted-ordering.exp @@ -89,34 +89,34 @@ - - + + - + - - + + - + - - + + @@ -124,34 +124,34 @@ - - + + - + - - + + - + - - + + @@ -268,10 +268,10 @@ - + - + @@ -425,10 +425,10 @@ - + - + diff --git a/cts/scheduler/exp/promoted-probed-score.exp b/cts/scheduler/exp/promoted-probed-score.exp index 3db546c..0952700 100644 --- a/cts/scheduler/exp/promoted-probed-score.exp +++ b/cts/scheduler/exp/promoted-probed-score.exp @@ -1,9 +1,9 @@ - + - + @@ -14,9 +14,9 @@ - + - + @@ -27,9 +27,9 @@ - + - + @@ -40,17 +40,17 @@ - + - + - + - + @@ -62,14 +62,14 @@ - + - + - + @@ -78,9 +78,9 @@ - + - + @@ -91,9 +91,9 @@ - + - + @@ -104,9 +104,9 @@ - + - + @@ -117,9 +117,9 @@ - + - + @@ -130,17 +130,17 @@ - + - + - + - + @@ -152,14 +152,14 @@ - + - + - + @@ -168,9 +168,9 @@ - + - + @@ -190,10 +190,10 @@ - + - + @@ -223,10 +223,10 @@ - + - + @@ -250,10 +250,10 @@ - + - + @@ -283,10 +283,10 @@ - + - + @@ -333,10 +333,10 @@ - + - + @@ -527,7 +527,7 @@ - + @@ -647,7 +647,7 @@ - + @@ -1058,7 +1058,7 @@ - + @@ -1173,7 +1173,7 @@ - + @@ -1300,7 +1300,7 @@ - + @@ -1393,7 +1393,7 @@ - + @@ -1546,7 +1546,7 @@ - + @@ -1794,7 +1794,7 @@ - + @@ -2081,22 +2081,22 @@ - + - + - + - + - + - + @@ -2111,7 +2111,7 @@ - + @@ -2120,22 +2120,22 @@ - + - + - + - + - + @@ -2146,36 +2146,36 @@ - + - + - + - + - + - + - + - + @@ -2184,36 +2184,36 @@ - + - + - + - + - + - + - + - + @@ -2222,36 +2222,36 @@ - + - + - + - + - + - + - + - + @@ -2260,36 +2260,36 @@ - + - + - + - + - + - + - + - + @@ -2298,36 +2298,36 @@ - + - + - + - + - + - + - + - + @@ -2336,9 +2336,9 @@ - + - + @@ -2351,22 +2351,22 @@ - + - + - + - + - + - + @@ -2381,7 +2381,7 @@ - + @@ -2390,22 +2390,22 @@ - + - + - + - + - + @@ -2416,36 +2416,36 @@ - + - + - + - + - + - + - + - + @@ -2454,36 +2454,36 @@ - + - + - + - + - + - + - + - + @@ -2492,36 +2492,36 @@ - + - + - + - + - + - + - + - + @@ -2530,36 +2530,36 @@ - + - + - + - + - + - + - + - + @@ -2568,36 +2568,36 @@ - + - + - + - + - + - + - + - + @@ -2606,9 +2606,9 @@ - + - + @@ -2639,40 +2639,40 @@ - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/cts/scheduler/exp/shutdown-lock-expiration.exp b/cts/scheduler/exp/shutdown-lock-expiration.exp index 465f12b..9941333 100644 --- a/cts/scheduler/exp/shutdown-lock-expiration.exp +++ b/cts/scheduler/exp/shutdown-lock-expiration.exp @@ -60,7 +60,7 @@ - + diff --git a/cts/scheduler/exp/timeout-by-node.exp b/cts/scheduler/exp/timeout-by-node.exp new file mode 100644 index 0000000..19d1afc --- /dev/null +++ b/cts/scheduler/exp/timeout-by-node.exp @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/unfence-definition.exp b/cts/scheduler/exp/unfence-definition.exp index 6a098ed..308f638 100644 --- a/cts/scheduler/exp/unfence-definition.exp +++ b/cts/scheduler/exp/unfence-definition.exp @@ -89,6 +89,9 @@ + + + @@ -108,6 +111,9 @@ + + + @@ -228,6 +234,9 @@ + + + @@ -275,6 +284,9 @@ + + + diff --git a/cts/scheduler/exp/unfence-parameters.exp b/cts/scheduler/exp/unfence-parameters.exp index 268bf00..0b76e26 100644 --- a/cts/scheduler/exp/unfence-parameters.exp +++ b/cts/scheduler/exp/unfence-parameters.exp @@ -73,6 +73,9 @@ + + + @@ -92,6 +95,9 @@ + + + @@ -212,6 +218,9 @@ + + + @@ -259,6 +268,9 @@ + + + diff --git a/cts/scheduler/scores/594.scores b/cts/scheduler/scores/594.scores index 5e99750..96c8f44 100644 --- a/cts/scheduler/scores/594.scores +++ b/cts/scheduler/scores/594.scores @@ -21,8 +21,11 @@ pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev1: 1 pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev2: -INFINITY pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev3: -INFINITY pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY +pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY +pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY +pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY pcmk__primitive_assign: rsc_hadev1 allocation score on hadev1: 100 pcmk__primitive_assign: rsc_hadev1 allocation score on hadev2: 0 pcmk__primitive_assign: rsc_hadev1 allocation score on hadev3: 0 diff --git a/cts/scheduler/scores/a-promote-then-b-migrate.scores b/cts/scheduler/scores/a-promote-then-b-migrate.scores index c94077d..02674be 100644 --- a/cts/scheduler/scores/a-promote-then-b-migrate.scores +++ b/cts/scheduler/scores/a-promote-then-b-migrate.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: rsc1:0 allocation score on node1: 1 pcmk__clone_assign: rsc1:0 allocation score on node2: 0 pcmk__clone_assign: rsc1:1 allocation score on node1: 0 pcmk__clone_assign: rsc1:1 allocation score on node2: 1 +pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc1:0 allocation score on node1: 1 +pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:0 allocation score on node2: 0 pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node2: 1 diff --git a/cts/scheduler/scores/asymmetric.scores b/cts/scheduler/scores/asymmetric.scores index 69310bf..93ed82f 100644 --- a/cts/scheduler/scores/asymmetric.scores +++ b/cts/scheduler/scores/asymmetric.scores @@ -11,4 +11,3 @@ pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 allocation score pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 allocation score on puma3: -INFINITY pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 allocation score on puma1: 0 pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 allocation score on puma3: 200 -pcmk__primitive_assign: vpool_ip_poolA allocation score on puma3: -INFINITY diff --git a/cts/scheduler/scores/bug-1822.scores b/cts/scheduler/scores/bug-1822.scores index 82191d1..0a9056b 100644 --- a/cts/scheduler/scores/bug-1822.scores +++ b/cts/scheduler/scores/bug-1822.scores @@ -1,5 +1,5 @@ -ms-sf_group:0 promotion score on process2b: -INFINITY +ms-sf_group:0 promotion score on process2b: 49 ms-sf_group:1 promotion score on none: 0 pcmk__clone_assign: ms-sf allocation score on process1a: 0 pcmk__clone_assign: ms-sf allocation score on process2b: 0 diff --git a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores index e698b14..d79208c 100644 --- a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores +++ b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores @@ -5,3 +5,4 @@ pcmk__clone_assign: clone1 allocation score on fc16-builder: 0 pcmk__clone_assign: clone2 allocation score on fc16-builder: 0 pcmk__primitive_assign: ClusterIP2:0 allocation score on fc16-builder: 1 pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY +pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY diff --git a/cts/scheduler/scores/bug-5143-ms-shuffle.scores b/cts/scheduler/scores/bug-5143-ms-shuffle.scores index 86a1a78..87bb4e4 100644 --- a/cts/scheduler/scores/bug-5143-ms-shuffle.scores +++ b/cts/scheduler/scores/bug-5143-ms-shuffle.scores @@ -173,7 +173,7 @@ pcmk__primitive_assign: clvmd:0 allocation score on hex-1: 4000 pcmk__primitive_assign: clvmd:0 allocation score on hex-2: -INFINITY pcmk__primitive_assign: clvmd:0 allocation score on hex-3: -INFINITY pcmk__primitive_assign: clvmd:1 allocation score on hex-1: -INFINITY -pcmk__primitive_assign: clvmd:1 allocation score on hex-2: 4000 +pcmk__primitive_assign: clvmd:1 allocation score on hex-2: 6000 pcmk__primitive_assign: clvmd:1 allocation score on hex-3: -INFINITY pcmk__primitive_assign: clvmd:2 allocation score on hex-1: -INFINITY pcmk__primitive_assign: clvmd:2 allocation score on hex-2: -INFINITY @@ -182,7 +182,7 @@ pcmk__primitive_assign: dlm:0 allocation score on hex-1: 5000 pcmk__primitive_assign: dlm:0 allocation score on hex-2: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-3: 0 pcmk__primitive_assign: dlm:1 allocation score on hex-1: 0 -pcmk__primitive_assign: dlm:1 allocation score on hex-2: 5000 +pcmk__primitive_assign: dlm:1 allocation score on hex-2: 7000 pcmk__primitive_assign: dlm:1 allocation score on hex-3: 0 pcmk__primitive_assign: dlm:2 allocation score on hex-1: -INFINITY pcmk__primitive_assign: dlm:2 allocation score on hex-2: -INFINITY @@ -227,12 +227,12 @@ pcmk__primitive_assign: fs-ocfs-1:0 allocation score on hex-1: 1000 pcmk__primitive_assign: fs-ocfs-1:0 allocation score on hex-2: -INFINITY pcmk__primitive_assign: fs-ocfs-1:0 allocation score on hex-3: -INFINITY pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-1: -INFINITY -pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-2: 1000 +pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-2: 3000 pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-3: -INFINITY pcmk__primitive_assign: fs-ocfs-1:2 allocation score on hex-1: -INFINITY pcmk__primitive_assign: fs-ocfs-1:2 allocation score on hex-2: -INFINITY pcmk__primitive_assign: fs-ocfs-1:2 allocation score on hex-3: 1000 -pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-1: 1000 +pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-1: 2000 pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-2: -INFINITY pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-3: -INFINITY pcmk__primitive_assign: fs-ocfs-2:1 allocation score on hex-1: -INFINITY @@ -254,7 +254,7 @@ pcmk__primitive_assign: o2cb:0 allocation score on hex-1: 3000 pcmk__primitive_assign: o2cb:0 allocation score on hex-2: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on hex-3: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on hex-1: -INFINITY -pcmk__primitive_assign: o2cb:1 allocation score on hex-2: 3000 +pcmk__primitive_assign: o2cb:1 allocation score on hex-2: 5000 pcmk__primitive_assign: o2cb:1 allocation score on hex-3: -INFINITY pcmk__primitive_assign: o2cb:2 allocation score on hex-1: -INFINITY pcmk__primitive_assign: o2cb:2 allocation score on hex-2: -INFINITY @@ -266,7 +266,7 @@ pcmk__primitive_assign: vg1:0 allocation score on hex-1: 2000 pcmk__primitive_assign: vg1:0 allocation score on hex-2: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on hex-3: -INFINITY pcmk__primitive_assign: vg1:1 allocation score on hex-1: -INFINITY -pcmk__primitive_assign: vg1:1 allocation score on hex-2: 2000 +pcmk__primitive_assign: vg1:1 allocation score on hex-2: 4000 pcmk__primitive_assign: vg1:1 allocation score on hex-3: -INFINITY pcmk__primitive_assign: vg1:2 allocation score on hex-1: -INFINITY pcmk__primitive_assign: vg1:2 allocation score on hex-2: -INFINITY diff --git a/cts/scheduler/scores/bug-5186-partial-migrate.scores b/cts/scheduler/scores/bug-5186-partial-migrate.scores index 93854f1..a962738 100644 --- a/cts/scheduler/scores/bug-5186-partial-migrate.scores +++ b/cts/scheduler/scores/bug-5186-partial-migrate.scores @@ -67,7 +67,7 @@ pcmk__primitive_assign: prmDiskd1:0 allocation score on bl460g1n7: -INFINITY pcmk__primitive_assign: prmDiskd1:0 allocation score on bl460g1n8: -INFINITY pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n6: INFINITY pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n7: -INFINITY -pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n8: 0 +pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n8: 200 pcmk__primitive_assign: prmDiskd1:2 allocation score on bl460g1n6: -INFINITY pcmk__primitive_assign: prmDiskd1:2 allocation score on bl460g1n7: -INFINITY pcmk__primitive_assign: prmDiskd1:2 allocation score on bl460g1n8: INFINITY @@ -76,7 +76,7 @@ pcmk__primitive_assign: prmDiskd2:0 allocation score on bl460g1n7: -INFINITY pcmk__primitive_assign: prmDiskd2:0 allocation score on bl460g1n8: -INFINITY pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n6: INFINITY pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n7: -INFINITY -pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n8: 0 +pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n8: 200 pcmk__primitive_assign: prmDiskd2:2 allocation score on bl460g1n6: -INFINITY pcmk__primitive_assign: prmDiskd2:2 allocation score on bl460g1n7: -INFINITY pcmk__primitive_assign: prmDiskd2:2 allocation score on bl460g1n8: INFINITY @@ -88,7 +88,7 @@ pcmk__primitive_assign: prmPing:0 allocation score on bl460g1n7: -INFINITY pcmk__primitive_assign: prmPing:0 allocation score on bl460g1n8: -INFINITY pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n6: INFINITY pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n7: -INFINITY -pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n8: 0 +pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n8: 200 pcmk__primitive_assign: prmPing:2 allocation score on bl460g1n6: -INFINITY pcmk__primitive_assign: prmPing:2 allocation score on bl460g1n7: -INFINITY pcmk__primitive_assign: prmPing:2 allocation score on bl460g1n8: INFINITY diff --git a/cts/scheduler/scores/bug-cl-5168.scores b/cts/scheduler/scores/bug-cl-5168.scores index 916fecb..59dee5d 100644 --- a/cts/scheduler/scores/bug-cl-5168.scores +++ b/cts/scheduler/scores/bug-cl-5168.scores @@ -200,7 +200,7 @@ pcmk__primitive_assign: drbd-r1:0 allocation score on hex-2: 1001 pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: -INFINITY pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: INFINITY pcmk__primitive_assign: drbd-r1:1 allocation score on hex-1: -INFINITY -pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: 0 +pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: -INFINITY pcmk__primitive_assign: drbd-r1:1 allocation score on hex-3: INFINITY pcmk__primitive_assign: dummy1 allocation score on hex-1: -INFINITY pcmk__primitive_assign: dummy1 allocation score on hex-2: -INFINITY diff --git a/cts/scheduler/scores/bug-lf-2106.scores b/cts/scheduler/scores/bug-lf-2106.scores index b512c6e..30e175f 100644 --- a/cts/scheduler/scores/bug-lf-2106.scores +++ b/cts/scheduler/scores/bug-lf-2106.scores @@ -64,45 +64,45 @@ pcmk__group_assign: ssh-ip2 allocation score on cl-virt-2: 100 pcmk__primitive_assign: apcstonith allocation score on cl-virt-1: 100 pcmk__primitive_assign: apcstonith allocation score on cl-virt-2: 0 pcmk__primitive_assign: bugtrack allocation score on cl-virt-1: -INFINITY -pcmk__primitive_assign: bugtrack allocation score on cl-virt-2: 275 -pcmk__primitive_assign: drbd-bugtrack:0 allocation score on cl-virt-1: 100 +pcmk__primitive_assign: bugtrack allocation score on cl-virt-2: 375 +pcmk__primitive_assign: drbd-bugtrack:0 allocation score on cl-virt-1: 150 pcmk__primitive_assign: drbd-bugtrack:0 allocation score on cl-virt-2: -INFINITY -pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-1: 0 -pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-2: 175 +pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-1: 50 +pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-2: 275 pcmk__primitive_assign: drbd-infotos:0 allocation score on cl-virt-1: 100 pcmk__primitive_assign: drbd-infotos:0 allocation score on cl-virt-2: -INFINITY pcmk__primitive_assign: drbd-infotos:1 allocation score on cl-virt-1: 0 -pcmk__primitive_assign: drbd-infotos:1 allocation score on cl-virt-2: 175 -pcmk__primitive_assign: drbd-itwiki:0 allocation score on cl-virt-1: 100 +pcmk__primitive_assign: drbd-infotos:1 allocation score on cl-virt-2: 325 +pcmk__primitive_assign: drbd-itwiki:0 allocation score on cl-virt-1: 150 pcmk__primitive_assign: drbd-itwiki:0 allocation score on cl-virt-2: -INFINITY -pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-1: 0 -pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-2: 175 +pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-1: 50 +pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-2: 275 pcmk__primitive_assign: drbd-medomus-cvs:0 allocation score on cl-virt-1: 100 pcmk__primitive_assign: drbd-medomus-cvs:0 allocation score on cl-virt-2: -INFINITY pcmk__primitive_assign: drbd-medomus-cvs:1 allocation score on cl-virt-1: 0 -pcmk__primitive_assign: drbd-medomus-cvs:1 allocation score on cl-virt-2: 175 -pcmk__primitive_assign: drbd-servsyslog:0 allocation score on cl-virt-1: 100 +pcmk__primitive_assign: drbd-medomus-cvs:1 allocation score on cl-virt-2: 325 +pcmk__primitive_assign: drbd-servsyslog:0 allocation score on cl-virt-1: 150 pcmk__primitive_assign: drbd-servsyslog:0 allocation score on cl-virt-2: -INFINITY -pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-1: 0 -pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-2: 175 +pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-1: 50 +pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-2: 275 pcmk__primitive_assign: drbd-smsprod2:0 allocation score on cl-virt-1: 100 pcmk__primitive_assign: drbd-smsprod2:0 allocation score on cl-virt-2: -INFINITY pcmk__primitive_assign: drbd-smsprod2:1 allocation score on cl-virt-1: 0 -pcmk__primitive_assign: drbd-smsprod2:1 allocation score on cl-virt-2: 175 +pcmk__primitive_assign: drbd-smsprod2:1 allocation score on cl-virt-2: 325 pcmk__primitive_assign: infotos allocation score on cl-virt-1: -INFINITY -pcmk__primitive_assign: infotos allocation score on cl-virt-2: 325 +pcmk__primitive_assign: infotos allocation score on cl-virt-2: 475 pcmk__primitive_assign: itwiki allocation score on cl-virt-1: -INFINITY -pcmk__primitive_assign: itwiki allocation score on cl-virt-2: 275 +pcmk__primitive_assign: itwiki allocation score on cl-virt-2: 375 pcmk__primitive_assign: medomus-cvs allocation score on cl-virt-1: -INFINITY -pcmk__primitive_assign: medomus-cvs allocation score on cl-virt-2: 325 +pcmk__primitive_assign: medomus-cvs allocation score on cl-virt-2: 475 pcmk__primitive_assign: pingd:0 allocation score on cl-virt-1: 100 pcmk__primitive_assign: pingd:0 allocation score on cl-virt-2: 0 pcmk__primitive_assign: pingd:1 allocation score on cl-virt-1: -INFINITY pcmk__primitive_assign: pingd:1 allocation score on cl-virt-2: 100 pcmk__primitive_assign: servsyslog allocation score on cl-virt-1: -INFINITY -pcmk__primitive_assign: servsyslog allocation score on cl-virt-2: 275 +pcmk__primitive_assign: servsyslog allocation score on cl-virt-2: 375 pcmk__primitive_assign: smsprod2 allocation score on cl-virt-1: -INFINITY -pcmk__primitive_assign: smsprod2 allocation score on cl-virt-2: 325 +pcmk__primitive_assign: smsprod2 allocation score on cl-virt-2: 475 pcmk__primitive_assign: ssh-bin allocation score on cl-virt-1: -INFINITY pcmk__primitive_assign: ssh-bin allocation score on cl-virt-2: 100 pcmk__primitive_assign: ssh-ip1 allocation score on cl-virt-1: 0 diff --git a/cts/scheduler/scores/bug-lf-2153.scores b/cts/scheduler/scores/bug-lf-2153.scores index d2492f2..afad730 100644 --- a/cts/scheduler/scores/bug-lf-2153.scores +++ b/cts/scheduler/scores/bug-lf-2153.scores @@ -29,7 +29,7 @@ pcmk__group_assign: rg_iscsivg01 allocation score on alice: 100 pcmk__group_assign: rg_iscsivg01 allocation score on bob: 0 pcmk__primitive_assign: res_drbd_iscsivg01:0 allocation score on alice: -INFINITY pcmk__primitive_assign: res_drbd_iscsivg01:0 allocation score on bob: -INFINITY -pcmk__primitive_assign: res_drbd_iscsivg01:1 allocation score on alice: 300 +pcmk__primitive_assign: res_drbd_iscsivg01:1 allocation score on alice: 1800 pcmk__primitive_assign: res_drbd_iscsivg01:1 allocation score on bob: -INFINITY pcmk__primitive_assign: res_ip_alicebob01 allocation score on alice: 400 pcmk__primitive_assign: res_ip_alicebob01 allocation score on bob: -INFINITY @@ -39,7 +39,7 @@ pcmk__primitive_assign: res_lu_iscsivg01_lun2 allocation score on alice: 600 pcmk__primitive_assign: res_lu_iscsivg01_lun2 allocation score on bob: -INFINITY pcmk__primitive_assign: res_lvm_iscsivg01 allocation score on alice: 1200 pcmk__primitive_assign: res_lvm_iscsivg01 allocation score on bob: -INFINITY -pcmk__primitive_assign: res_portblock_iscsivg01_block allocation score on alice: 1800 +pcmk__primitive_assign: res_portblock_iscsivg01_block allocation score on alice: 3300 pcmk__primitive_assign: res_portblock_iscsivg01_block allocation score on bob: -INFINITY pcmk__primitive_assign: res_portblock_iscsivg01_unblock allocation score on alice: 200 pcmk__primitive_assign: res_portblock_iscsivg01_unblock allocation score on bob: -INFINITY @@ -47,7 +47,7 @@ pcmk__primitive_assign: res_target_iscsivg01 allocation score on alice: 1000 pcmk__primitive_assign: res_target_iscsivg01 allocation score on bob: -INFINITY pcmk__primitive_assign: res_tgtd:0 allocation score on alice: -INFINITY pcmk__primitive_assign: res_tgtd:0 allocation score on bob: -INFINITY -pcmk__primitive_assign: res_tgtd:1 allocation score on alice: 200 +pcmk__primitive_assign: res_tgtd:1 allocation score on alice: 1700 pcmk__primitive_assign: res_tgtd:1 allocation score on bob: -INFINITY res_drbd_iscsivg01:0 promotion score on none: 0 res_drbd_iscsivg01:1 promotion score on alice: 3100 diff --git a/cts/scheduler/scores/bug-lf-2171.scores b/cts/scheduler/scores/bug-lf-2171.scores index 7d2bdd4..14cc28a 100644 --- a/cts/scheduler/scores/bug-lf-2171.scores +++ b/cts/scheduler/scores/bug-lf-2171.scores @@ -12,8 +12,12 @@ pcmk__group_assign: res_Dummy2 allocation score on xenserver2: 0 pcmk__group_assign: res_Dummy3 allocation score on xenserver1: 200 pcmk__group_assign: res_Dummy3 allocation score on xenserver2: 0 pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY +pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY pcmk__primitive_assign: res_Dummy2 allocation score on xenserver1: 200 pcmk__primitive_assign: res_Dummy2 allocation score on xenserver2: 0 diff --git a/cts/scheduler/scores/bug-lf-2422.scores b/cts/scheduler/scores/bug-lf-2422.scores index 99ff12e..77a284d 100644 --- a/cts/scheduler/scores/bug-lf-2422.scores +++ b/cts/scheduler/scores/bug-lf-2422.scores @@ -248,20 +248,36 @@ pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-3: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY +pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-1: 0 pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-2: 0 diff --git a/cts/scheduler/scores/bug-lf-2453.scores b/cts/scheduler/scores/bug-lf-2453.scores index eaee72d..3ef0f6d 100644 --- a/cts/scheduler/scores/bug-lf-2453.scores +++ b/cts/scheduler/scores/bug-lf-2453.scores @@ -17,6 +17,10 @@ pcmk__primitive_assign: DummyResource:1 allocation score on domu1: -INFINITY pcmk__primitive_assign: DummyResource:1 allocation score on domu2: INFINITY pcmk__primitive_assign: PrimitiveResource1 allocation score on domu1: INFINITY pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY +pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY diff --git a/cts/scheduler/scores/bug-lf-2551.scores b/cts/scheduler/scores/bug-lf-2551.scores index d9cb9f5..2dc23a8 100644 --- a/cts/scheduler/scores/bug-lf-2551.scores +++ b/cts/scheduler/scores/bug-lf-2551.scores @@ -228,14 +228,14 @@ pcmk__group_assign: vg1:3 allocation score on hex-7: -INFINITY pcmk__group_assign: vg1:3 allocation score on hex-8: -INFINITY pcmk__group_assign: vg1:3 allocation score on hex-9: -INFINITY pcmk__primitive_assign: clvm:0 allocation score on hex-0: -INFINITY -pcmk__primitive_assign: clvm:0 allocation score on hex-7: 4 +pcmk__primitive_assign: clvm:0 allocation score on hex-7: 20 pcmk__primitive_assign: clvm:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: clvm:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: clvm:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: clvm:1 allocation score on hex-7: -INFINITY -pcmk__primitive_assign: clvm:1 allocation score on hex-8: 4 +pcmk__primitive_assign: clvm:1 allocation score on hex-8: 20 pcmk__primitive_assign: clvm:1 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: clvm:2 allocation score on hex-0: 4 +pcmk__primitive_assign: clvm:2 allocation score on hex-0: 18 pcmk__primitive_assign: clvm:2 allocation score on hex-7: -INFINITY pcmk__primitive_assign: clvm:2 allocation score on hex-8: -INFINITY pcmk__primitive_assign: clvm:2 allocation score on hex-9: -INFINITY @@ -244,14 +244,14 @@ pcmk__primitive_assign: clvm:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: clvm:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: clvm:3 allocation score on hex-9: -INFINITY pcmk__primitive_assign: cmirrord:0 allocation score on hex-0: -INFINITY -pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: 3 +pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: 19 pcmk__primitive_assign: cmirrord:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: cmirrord:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: cmirrord:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: cmirrord:1 allocation score on hex-7: -INFINITY -pcmk__primitive_assign: cmirrord:1 allocation score on hex-8: 3 +pcmk__primitive_assign: cmirrord:1 allocation score on hex-8: 19 pcmk__primitive_assign: cmirrord:1 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: cmirrord:2 allocation score on hex-0: 3 +pcmk__primitive_assign: cmirrord:2 allocation score on hex-0: 17 pcmk__primitive_assign: cmirrord:2 allocation score on hex-7: -INFINITY pcmk__primitive_assign: cmirrord:2 allocation score on hex-8: -INFINITY pcmk__primitive_assign: cmirrord:2 allocation score on hex-9: -INFINITY @@ -259,15 +259,15 @@ pcmk__primitive_assign: cmirrord:3 allocation score on hex-0: -INFINITY pcmk__primitive_assign: cmirrord:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: cmirrord:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: cmirrord:3 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: dlm:0 allocation score on hex-0: 0 -pcmk__primitive_assign: dlm:0 allocation score on hex-7: 6 -pcmk__primitive_assign: dlm:0 allocation score on hex-8: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-0: 14 +pcmk__primitive_assign: dlm:0 allocation score on hex-7: 22 +pcmk__primitive_assign: dlm:0 allocation score on hex-8: 16 pcmk__primitive_assign: dlm:0 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: dlm:1 allocation score on hex-0: 0 +pcmk__primitive_assign: dlm:1 allocation score on hex-0: 14 pcmk__primitive_assign: dlm:1 allocation score on hex-7: -INFINITY -pcmk__primitive_assign: dlm:1 allocation score on hex-8: 6 +pcmk__primitive_assign: dlm:1 allocation score on hex-8: 22 pcmk__primitive_assign: dlm:1 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: dlm:2 allocation score on hex-0: 6 +pcmk__primitive_assign: dlm:2 allocation score on hex-0: 20 pcmk__primitive_assign: dlm:2 allocation score on hex-7: -INFINITY pcmk__primitive_assign: dlm:2 allocation score on hex-8: -INFINITY pcmk__primitive_assign: dlm:2 allocation score on hex-9: -INFINITY @@ -284,14 +284,14 @@ pcmk__primitive_assign: fencing-sbd allocation score on hex-7: 0 pcmk__primitive_assign: fencing-sbd allocation score on hex-8: 0 pcmk__primitive_assign: fencing-sbd allocation score on hex-9: 1 pcmk__primitive_assign: o2cb:0 allocation score on hex-0: -INFINITY -pcmk__primitive_assign: o2cb:0 allocation score on hex-7: 5 +pcmk__primitive_assign: o2cb:0 allocation score on hex-7: 21 pcmk__primitive_assign: o2cb:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on hex-7: -INFINITY -pcmk__primitive_assign: o2cb:1 allocation score on hex-8: 5 +pcmk__primitive_assign: o2cb:1 allocation score on hex-8: 21 pcmk__primitive_assign: o2cb:1 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: o2cb:2 allocation score on hex-0: 5 +pcmk__primitive_assign: o2cb:2 allocation score on hex-0: 19 pcmk__primitive_assign: o2cb:2 allocation score on hex-7: -INFINITY pcmk__primitive_assign: o2cb:2 allocation score on hex-8: -INFINITY pcmk__primitive_assign: o2cb:2 allocation score on hex-9: -INFINITY @@ -300,14 +300,14 @@ pcmk__primitive_assign: o2cb:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on hex-9: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-0: -INFINITY -pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: 1 +pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: 17 pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-7: -INFINITY -pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-8: 1 +pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-8: 17 pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-0: 1 +pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-0: 15 pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-7: -INFINITY pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-8: -INFINITY pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-9: -INFINITY @@ -316,14 +316,14 @@ pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-9: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on hex-0: -INFINITY -pcmk__primitive_assign: vg1:0 allocation score on hex-7: 2 +pcmk__primitive_assign: vg1:0 allocation score on hex-7: 18 pcmk__primitive_assign: vg1:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: vg1:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: vg1:1 allocation score on hex-7: -INFINITY -pcmk__primitive_assign: vg1:1 allocation score on hex-8: 2 +pcmk__primitive_assign: vg1:1 allocation score on hex-8: 18 pcmk__primitive_assign: vg1:1 allocation score on hex-9: -INFINITY -pcmk__primitive_assign: vg1:2 allocation score on hex-0: 2 +pcmk__primitive_assign: vg1:2 allocation score on hex-0: 16 pcmk__primitive_assign: vg1:2 allocation score on hex-7: -INFINITY pcmk__primitive_assign: vg1:2 allocation score on hex-8: -INFINITY pcmk__primitive_assign: vg1:2 allocation score on hex-9: -INFINITY diff --git a/cts/scheduler/scores/bug-lf-2574.scores b/cts/scheduler/scores/bug-lf-2574.scores index 0f5cf60..77d8b87 100644 --- a/cts/scheduler/scores/bug-lf-2574.scores +++ b/cts/scheduler/scores/bug-lf-2574.scores @@ -34,16 +34,19 @@ pcmk__primitive_assign: prmDummy1:0 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmDummy1:0 allocation score on srv03: INFINITY pcmk__primitive_assign: prmDummy1:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: prmDummy1:1 allocation score on srv02: INFINITY -pcmk__primitive_assign: prmDummy1:1 allocation score on srv03: 0 +pcmk__primitive_assign: prmDummy1:1 allocation score on srv03: 200 pcmk__primitive_assign: prmDummy1:2 allocation score on srv01: -INFINITY pcmk__primitive_assign: prmDummy1:2 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmDummy1:2 allocation score on srv03: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on srv02: -INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on srv03: INFINITY pcmk__primitive_assign: prmPingd:2 allocation score on srv01: -INFINITY pcmk__primitive_assign: prmPingd:2 allocation score on srv02: INFINITY -pcmk__primitive_assign: prmPingd:2 allocation score on srv03: 0 +pcmk__primitive_assign: prmPingd:2 allocation score on srv03: 200 diff --git a/cts/scheduler/scores/bug-lf-2581.scores b/cts/scheduler/scores/bug-lf-2581.scores index 267eb6c..29170dd 100644 --- a/cts/scheduler/scores/bug-lf-2581.scores +++ b/cts/scheduler/scores/bug-lf-2581.scores @@ -43,7 +43,7 @@ pcmk__group_assign: Z:0 allocation score on elvis: 1 pcmk__group_assign: Z:0 allocation score on queen: 0 pcmk__group_assign: Z:1 allocation score on elvis: -INFINITY pcmk__group_assign: Z:1 allocation score on queen: 0 -pcmk__primitive_assign: A:0 allocation score on elvis: 2 +pcmk__primitive_assign: A:0 allocation score on elvis: 6 pcmk__primitive_assign: A:0 allocation score on queen: 0 pcmk__primitive_assign: A:1 allocation score on elvis: -INFINITY pcmk__primitive_assign: A:1 allocation score on queen: 0 @@ -55,7 +55,7 @@ pcmk__primitive_assign: C-1 allocation score on elvis: 1 pcmk__primitive_assign: C-1 allocation score on queen: -INFINITY pcmk__primitive_assign: C-2 allocation score on elvis: 1 pcmk__primitive_assign: C-2 allocation score on queen: -INFINITY -pcmk__primitive_assign: Z:0 allocation score on elvis: 1 +pcmk__primitive_assign: Z:0 allocation score on elvis: 5 pcmk__primitive_assign: Z:0 allocation score on queen: -INFINITY pcmk__primitive_assign: Z:1 allocation score on elvis: -INFINITY pcmk__primitive_assign: Z:1 allocation score on queen: 0 diff --git a/cts/scheduler/scores/bug-lf-2619.scores b/cts/scheduler/scores/bug-lf-2619.scores index 32f947f..6fb3857 100644 --- a/cts/scheduler/scores/bug-lf-2619.scores +++ b/cts/scheduler/scores/bug-lf-2619.scores @@ -236,7 +236,7 @@ pcmk__primitive_assign: prmPingd:1 allocation score on sby1: INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on sby2: 0 pcmk__primitive_assign: prmPingd:2 allocation score on act1: -INFINITY pcmk__primitive_assign: prmPingd:2 allocation score on act2: INFINITY -pcmk__primitive_assign: prmPingd:2 allocation score on act3: 0 +pcmk__primitive_assign: prmPingd:2 allocation score on act3: INFINITY pcmk__primitive_assign: prmPingd:2 allocation score on sby1: 0 pcmk__primitive_assign: prmPingd:2 allocation score on sby2: 0 pcmk__primitive_assign: prmPingd:3 allocation score on act1: -INFINITY diff --git a/cts/scheduler/scores/bug-n-387749.scores b/cts/scheduler/scores/bug-n-387749.scores index 5165421..bcd4706 100644 --- a/cts/scheduler/scores/bug-n-387749.scores +++ b/cts/scheduler/scores/bug-n-387749.scores @@ -20,11 +20,14 @@ pcmk__group_assign: resource_ipaddr1_single allocation score on power720-4: 0 pcmk__group_assign: resource_nfsserver_single allocation score on power720-1: 0 pcmk__group_assign: resource_nfsserver_single allocation score on power720-2: 1000 pcmk__group_assign: resource_nfsserver_single allocation score on power720-4: 0 -pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-1: 0 +pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-1: INFINITY pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-2: -INFINITY pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-4: -INFINITY -pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-1: 0 -pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-2: 1000 +pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-1: -INFINITY +pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-1: INFINITY +pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-2: 3000 +pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-2: 3000 +pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-4: -INFINITY pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-4: -INFINITY pcmk__primitive_assign: export_home_ocfs2:2 allocation score on power720-1: -INFINITY pcmk__primitive_assign: export_home_ocfs2:2 allocation score on power720-2: -INFINITY diff --git a/cts/scheduler/scores/bug-suse-707150.scores b/cts/scheduler/scores/bug-suse-707150.scores index 7f35079..4e85c86 100644 --- a/cts/scheduler/scores/bug-suse-707150.scores +++ b/cts/scheduler/scores/bug-suse-707150.scores @@ -116,8 +116,12 @@ pcmk__clone_assign: vg1:3 allocation score on hex-7: 0 pcmk__clone_assign: vg1:3 allocation score on hex-8: 0 pcmk__clone_assign: vg1:3 allocation score on hex-9: 0 pcmk__group_assign: base-group:0 allocation score on hex-0: 0 +pcmk__group_assign: base-group:0 allocation score on hex-0: 0 +pcmk__group_assign: base-group:0 allocation score on hex-7: -INFINITY pcmk__group_assign: base-group:0 allocation score on hex-7: -INFINITY pcmk__group_assign: base-group:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: base-group:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: base-group:0 allocation score on hex-9: -INFINITY pcmk__group_assign: base-group:0 allocation score on hex-9: 0 pcmk__group_assign: base-group:1 allocation score on hex-0: -INFINITY pcmk__group_assign: base-group:1 allocation score on hex-7: -INFINITY @@ -132,8 +136,12 @@ pcmk__group_assign: base-group:3 allocation score on hex-7: -INFINITY pcmk__group_assign: base-group:3 allocation score on hex-8: -INFINITY pcmk__group_assign: base-group:3 allocation score on hex-9: -INFINITY pcmk__group_assign: clvm:0 allocation score on hex-0: 0 +pcmk__group_assign: clvm:0 allocation score on hex-0: 0 +pcmk__group_assign: clvm:0 allocation score on hex-7: -INFINITY pcmk__group_assign: clvm:0 allocation score on hex-7: -INFINITY pcmk__group_assign: clvm:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: clvm:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: clvm:0 allocation score on hex-9: -INFINITY pcmk__group_assign: clvm:0 allocation score on hex-9: 0 pcmk__group_assign: clvm:1 allocation score on hex-0: -INFINITY pcmk__group_assign: clvm:1 allocation score on hex-7: -INFINITY @@ -148,8 +156,12 @@ pcmk__group_assign: clvm:3 allocation score on hex-7: -INFINITY pcmk__group_assign: clvm:3 allocation score on hex-8: -INFINITY pcmk__group_assign: clvm:3 allocation score on hex-9: -INFINITY pcmk__group_assign: cmirrord:0 allocation score on hex-0: 0 +pcmk__group_assign: cmirrord:0 allocation score on hex-0: 0 +pcmk__group_assign: cmirrord:0 allocation score on hex-7: -INFINITY pcmk__group_assign: cmirrord:0 allocation score on hex-7: -INFINITY pcmk__group_assign: cmirrord:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: cmirrord:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: cmirrord:0 allocation score on hex-9: -INFINITY pcmk__group_assign: cmirrord:0 allocation score on hex-9: 0 pcmk__group_assign: cmirrord:1 allocation score on hex-0: -INFINITY pcmk__group_assign: cmirrord:1 allocation score on hex-7: -INFINITY @@ -164,8 +176,12 @@ pcmk__group_assign: cmirrord:3 allocation score on hex-7: -INFINITY pcmk__group_assign: cmirrord:3 allocation score on hex-8: -INFINITY pcmk__group_assign: cmirrord:3 allocation score on hex-9: -INFINITY pcmk__group_assign: dlm:0 allocation score on hex-0: 1 +pcmk__group_assign: dlm:0 allocation score on hex-0: 1 pcmk__group_assign: dlm:0 allocation score on hex-7: -INFINITY +pcmk__group_assign: dlm:0 allocation score on hex-7: -INFINITY +pcmk__group_assign: dlm:0 allocation score on hex-8: -INFINITY pcmk__group_assign: dlm:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: dlm:0 allocation score on hex-9: -INFINITY pcmk__group_assign: dlm:0 allocation score on hex-9: 0 pcmk__group_assign: dlm:1 allocation score on hex-0: -INFINITY pcmk__group_assign: dlm:1 allocation score on hex-7: -INFINITY @@ -180,8 +196,12 @@ pcmk__group_assign: dlm:3 allocation score on hex-7: -INFINITY pcmk__group_assign: dlm:3 allocation score on hex-8: -INFINITY pcmk__group_assign: dlm:3 allocation score on hex-9: -INFINITY pcmk__group_assign: o2cb:0 allocation score on hex-0: 0 +pcmk__group_assign: o2cb:0 allocation score on hex-0: 0 pcmk__group_assign: o2cb:0 allocation score on hex-7: -INFINITY +pcmk__group_assign: o2cb:0 allocation score on hex-7: -INFINITY +pcmk__group_assign: o2cb:0 allocation score on hex-8: -INFINITY pcmk__group_assign: o2cb:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: o2cb:0 allocation score on hex-9: -INFINITY pcmk__group_assign: o2cb:0 allocation score on hex-9: 0 pcmk__group_assign: o2cb:1 allocation score on hex-0: -INFINITY pcmk__group_assign: o2cb:1 allocation score on hex-7: -INFINITY @@ -196,8 +216,12 @@ pcmk__group_assign: o2cb:3 allocation score on hex-7: -INFINITY pcmk__group_assign: o2cb:3 allocation score on hex-8: -INFINITY pcmk__group_assign: o2cb:3 allocation score on hex-9: -INFINITY pcmk__group_assign: ocfs2-1:0 allocation score on hex-0: 0 +pcmk__group_assign: ocfs2-1:0 allocation score on hex-0: 0 +pcmk__group_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY pcmk__group_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY pcmk__group_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY pcmk__group_assign: ocfs2-1:0 allocation score on hex-9: 0 pcmk__group_assign: ocfs2-1:1 allocation score on hex-0: -INFINITY pcmk__group_assign: ocfs2-1:1 allocation score on hex-7: -INFINITY @@ -212,8 +236,12 @@ pcmk__group_assign: ocfs2-1:3 allocation score on hex-7: -INFINITY pcmk__group_assign: ocfs2-1:3 allocation score on hex-8: -INFINITY pcmk__group_assign: ocfs2-1:3 allocation score on hex-9: -INFINITY pcmk__group_assign: vg1:0 allocation score on hex-0: 0 +pcmk__group_assign: vg1:0 allocation score on hex-0: 0 pcmk__group_assign: vg1:0 allocation score on hex-7: -INFINITY +pcmk__group_assign: vg1:0 allocation score on hex-7: -INFINITY +pcmk__group_assign: vg1:0 allocation score on hex-8: -INFINITY pcmk__group_assign: vg1:0 allocation score on hex-8: -INFINITY +pcmk__group_assign: vg1:0 allocation score on hex-9: -INFINITY pcmk__group_assign: vg1:0 allocation score on hex-9: 0 pcmk__group_assign: vg1:1 allocation score on hex-0: -INFINITY pcmk__group_assign: vg1:1 allocation score on hex-7: -INFINITY @@ -227,10 +255,14 @@ pcmk__group_assign: vg1:3 allocation score on hex-0: -INFINITY pcmk__group_assign: vg1:3 allocation score on hex-7: -INFINITY pcmk__group_assign: vg1:3 allocation score on hex-8: -INFINITY pcmk__group_assign: vg1:3 allocation score on hex-9: -INFINITY +pcmk__primitive_assign: clvm:0 allocation score on hex-0: -INFINITY pcmk__primitive_assign: clvm:0 allocation score on hex-0: 0 pcmk__primitive_assign: clvm:0 allocation score on hex-7: -INFINITY +pcmk__primitive_assign: clvm:0 allocation score on hex-7: -INFINITY +pcmk__primitive_assign: clvm:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: clvm:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: clvm:0 allocation score on hex-9: -INFINITY +pcmk__primitive_assign: clvm:0 allocation score on hex-9: 0 pcmk__primitive_assign: clvm:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: clvm:1 allocation score on hex-7: -INFINITY pcmk__primitive_assign: clvm:1 allocation score on hex-8: -INFINITY @@ -243,10 +275,14 @@ pcmk__primitive_assign: clvm:3 allocation score on hex-0: -INFINITY pcmk__primitive_assign: clvm:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: clvm:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: clvm:3 allocation score on hex-9: -INFINITY +pcmk__primitive_assign: cmirrord:0 allocation score on hex-0: -INFINITY pcmk__primitive_assign: cmirrord:0 allocation score on hex-0: 0 pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: -INFINITY +pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: -INFINITY +pcmk__primitive_assign: cmirrord:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: cmirrord:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: cmirrord:0 allocation score on hex-9: -INFINITY +pcmk__primitive_assign: cmirrord:0 allocation score on hex-9: 0 pcmk__primitive_assign: cmirrord:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: cmirrord:1 allocation score on hex-7: -INFINITY pcmk__primitive_assign: cmirrord:1 allocation score on hex-8: -INFINITY @@ -259,9 +295,13 @@ pcmk__primitive_assign: cmirrord:3 allocation score on hex-0: -INFINITY pcmk__primitive_assign: cmirrord:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: cmirrord:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: cmirrord:3 allocation score on hex-9: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on hex-0: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-0: 1 pcmk__primitive_assign: dlm:0 allocation score on hex-7: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on hex-7: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-8: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on hex-8: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-9: 0 pcmk__primitive_assign: dlm:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: dlm:1 allocation score on hex-7: -INFINITY @@ -283,10 +323,14 @@ pcmk__primitive_assign: fencing-sbd allocation score on hex-0: 0 pcmk__primitive_assign: fencing-sbd allocation score on hex-7: 0 pcmk__primitive_assign: fencing-sbd allocation score on hex-8: 0 pcmk__primitive_assign: fencing-sbd allocation score on hex-9: 1 +pcmk__primitive_assign: o2cb:0 allocation score on hex-0: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on hex-0: 0 pcmk__primitive_assign: o2cb:0 allocation score on hex-7: -INFINITY +pcmk__primitive_assign: o2cb:0 allocation score on hex-7: -INFINITY +pcmk__primitive_assign: o2cb:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on hex-8: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on hex-9: -INFINITY +pcmk__primitive_assign: o2cb:0 allocation score on hex-9: 0 pcmk__primitive_assign: o2cb:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on hex-7: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on hex-8: -INFINITY @@ -300,8 +344,12 @@ pcmk__primitive_assign: o2cb:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: o2cb:3 allocation score on hex-9: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-0: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-0: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-7: -INFINITY @@ -316,8 +364,12 @@ pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-7: -INFINITY pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-8: -INFINITY pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-9: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on hex-0: -INFINITY +pcmk__primitive_assign: vg1:0 allocation score on hex-0: -INFINITY +pcmk__primitive_assign: vg1:0 allocation score on hex-7: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on hex-7: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on hex-8: -INFINITY +pcmk__primitive_assign: vg1:0 allocation score on hex-8: -INFINITY +pcmk__primitive_assign: vg1:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on hex-9: -INFINITY pcmk__primitive_assign: vg1:1 allocation score on hex-0: -INFINITY pcmk__primitive_assign: vg1:1 allocation score on hex-7: -INFINITY diff --git a/cts/scheduler/scores/bundle-connection-with-container.scores b/cts/scheduler/scores/bundle-connection-with-container.scores index fc20405..30d63ac 100644 --- a/cts/scheduler/scores/bundle-connection-with-container.scores +++ b/cts/scheduler/scores/bundle-connection-with-container.scores @@ -1,76 +1,76 @@ -pcmk__bundle_allocate: httpd-bundle allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-4: -INFINITY -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-5: -INFINITY -pcmk__bundle_allocate: httpd-bundle-0 allocation score on remote-rhel8-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on remote-rhel8-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on remote-rhel8-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: -INFINITY -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: -INFINITY -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: -INFINITY -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: -INFINITY -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: -INFINITY -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-4: -INFINITY -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-5: -INFINITY -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on remote-rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-4: -INFINITY -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-5: -INFINITY -pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 501 -pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 501 -pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 501 +pcmk__bundle_assign: httpd-bundle allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-4: -INFINITY +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-5: -INFINITY +pcmk__bundle_assign: httpd-bundle-0 allocation score on remote-rhel8-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on remote-rhel8-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on remote-rhel8-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: -INFINITY +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: -INFINITY +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: -INFINITY +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: -INFINITY +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: -INFINITY +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-4: -INFINITY +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-5: -INFINITY +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on remote-rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-4: -INFINITY +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-5: -INFINITY +pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 501 +pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 501 +pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 501 pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-0: 0 pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-1: 0 pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-2: 0 diff --git a/cts/scheduler/scores/bundle-interleave-promote.scores b/cts/scheduler/scores/bundle-interleave-promote.scores index 8996be1..85c5aed 100644 --- a/cts/scheduler/scores/bundle-interleave-promote.scores +++ b/cts/scheduler/scores/bundle-interleave-promote.scores @@ -1,102 +1,102 @@ app:0 promotion score on app-bundle-0: 12 app:1 promotion score on app-bundle-1: 13 -app:2 promotion score on app-bundle-2: 14 +app:2 promotion score on app-bundle-2: INFINITY base:0 promotion score on base-bundle-0: 12 base:1 promotion score on base-bundle-1: 13 base:2 promotion score on base-bundle-2: 14 -pcmk__bundle_allocate: app-bundle allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-0: -INFINITY -pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-1: -INFINITY -pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-2: -INFINITY -pcmk__bundle_allocate: app-bundle-clone allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node5: 0 -pcmk__bundle_allocate: app:0 allocation score on app-bundle-0: 501 -pcmk__bundle_allocate: app:1 allocation score on app-bundle-1: 501 -pcmk__bundle_allocate: app:2 allocation score on app-bundle-2: 501 -pcmk__bundle_allocate: base-bundle allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY -pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY -pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY -pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node5: 0 -pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501 -pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501 -pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 501 +pcmk__bundle_assign: app-bundle allocation score on node1: 0 +pcmk__bundle_assign: app-bundle allocation score on node2: 0 +pcmk__bundle_assign: app-bundle allocation score on node3: 0 +pcmk__bundle_assign: app-bundle allocation score on node4: 0 +pcmk__bundle_assign: app-bundle allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-0: -INFINITY +pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-1: -INFINITY +pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-2: -INFINITY +pcmk__bundle_assign: app-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node5: 0 +pcmk__bundle_assign: app:0 allocation score on app-bundle-0: 501 +pcmk__bundle_assign: app:1 allocation score on app-bundle-1: 501 +pcmk__bundle_assign: app:2 allocation score on app-bundle-2: 501 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle allocation score on node4: 0 +pcmk__bundle_assign: base-bundle allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node5: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-0: 0 pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-1: 0 pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-2: 0 diff --git a/cts/scheduler/scores/bundle-interleave-start.scores b/cts/scheduler/scores/bundle-interleave-start.scores index 7f4a370..52f95e8 100644 --- a/cts/scheduler/scores/bundle-interleave-start.scores +++ b/cts/scheduler/scores/bundle-interleave-start.scores @@ -1,102 +1,102 @@ -app:0 promotion score on app-bundle-0: -1 -app:1 promotion score on app-bundle-1: -1 -app:2 promotion score on app-bundle-2: -1 -base:0 promotion score on base-bundle-0: -1 -base:1 promotion score on base-bundle-1: -1 -base:2 promotion score on base-bundle-2: -1 -pcmk__bundle_allocate: app-bundle allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-0 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-1 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-2 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-0: -INFINITY -pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-1: -INFINITY -pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-2: -INFINITY -pcmk__bundle_allocate: app-bundle-clone allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-clone allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node5: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node1: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node2: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node3: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node4: 0 -pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node5: 0 -pcmk__bundle_allocate: app:0 allocation score on app-bundle-0: 500 -pcmk__bundle_allocate: app:1 allocation score on app-bundle-1: 500 -pcmk__bundle_allocate: app:2 allocation score on app-bundle-2: 500 -pcmk__bundle_allocate: base-bundle allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-0 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-1 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-2 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY -pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY -pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY -pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-clone allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node5: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node4: 0 -pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node5: 0 -pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 500 -pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 500 -pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500 +app:0 promotion score on app-bundle-0: 12 +app:1 promotion score on app-bundle-1: 13 +app:2 promotion score on app-bundle-2: INFINITY +base:0 promotion score on base-bundle-0: 12 +base:1 promotion score on base-bundle-1: 13 +base:2 promotion score on base-bundle-2: 14 +pcmk__bundle_assign: app-bundle allocation score on node1: 0 +pcmk__bundle_assign: app-bundle allocation score on node2: 0 +pcmk__bundle_assign: app-bundle allocation score on node3: 0 +pcmk__bundle_assign: app-bundle allocation score on node4: 0 +pcmk__bundle_assign: app-bundle allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-0 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-1 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-2 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-0: -INFINITY +pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-1: -INFINITY +pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-2: -INFINITY +pcmk__bundle_assign: app-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-clone allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-podman-0 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-podman-1 allocation score on node5: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node4: 0 +pcmk__bundle_assign: app-bundle-podman-2 allocation score on node5: 0 +pcmk__bundle_assign: app:0 allocation score on app-bundle-0: 500 +pcmk__bundle_assign: app:1 allocation score on app-bundle-1: 500 +pcmk__bundle_assign: app:2 allocation score on app-bundle-2: 500 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle allocation score on node4: 0 +pcmk__bundle_assign: base-bundle allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node5: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node4: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node5: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 500 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 500 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500 pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-0: 0 pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-1: 0 pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-2: 0 diff --git a/cts/scheduler/scores/bundle-nested-colocation.scores b/cts/scheduler/scores/bundle-nested-colocation.scores index b83b212..9baa073 100644 --- a/cts/scheduler/scores/bundle-nested-colocation.scores +++ b/cts/scheduler/scores/bundle-nested-colocation.scores @@ -1,118 +1,118 @@ -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-galera-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-galera-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-galera-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-rabbit-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-rabbit-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-rabbit-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-galera-0: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-galera-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-galera-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-rabbit-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-rabbit-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-rabbit-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-galera-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-galera-1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-galera-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-rabbit-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-rabbit-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-rabbit-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-galera-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-galera-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-galera-2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-rabbit-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-rabbit-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-rabbit-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-galera-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-galera-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-galera-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-rabbit-0: 1 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-rabbit-1: 1 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-rabbit-2: 1 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-galera-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-galera-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-galera-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-rabbit-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-rabbit-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-rabbit-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-galera-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-galera-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-galera-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-rabbit-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-rabbit-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-rabbit-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-galera-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-galera-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-galera-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-rabbit-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-rabbit-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-rabbit-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-galera-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-galera-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-galera-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-rabbit-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-rabbit-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-rabbit-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-1: 1 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-2: 1 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-0: 1 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-2: 1 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-0: 1 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-1: 1 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-2: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500 -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 500 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-galera-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-galera-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-galera-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-rabbit-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-rabbit-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-rabbit-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-galera-0: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-galera-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-galera-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-rabbit-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-rabbit-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-rabbit-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-galera-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-galera-1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-galera-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-rabbit-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-rabbit-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-rabbit-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-galera-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-galera-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-galera-2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-rabbit-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-rabbit-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-rabbit-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-galera-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-galera-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-galera-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-rabbit-0: 1 +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-rabbit-1: 1 +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-rabbit-2: 1 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-galera-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-galera-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-galera-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-rabbit-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-rabbit-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-rabbit-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-galera-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-galera-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-galera-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-rabbit-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-rabbit-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-rabbit-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-galera-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-galera-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-galera-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-rabbit-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-rabbit-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-rabbit-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-galera-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-galera-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-galera-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-rabbit-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-rabbit-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-rabbit-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-1: 1 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-2: 1 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-0: 1 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-2: 1 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-0: 1 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-1: 1 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-2: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500 +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 500 pcmk__clone_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-0: -INFINITY pcmk__clone_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-1: -INFINITY pcmk__clone_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-2: -INFINITY diff --git a/cts/scheduler/scores/bundle-order-fencing.scores b/cts/scheduler/scores/bundle-order-fencing.scores index a3dee02..54db322 100644 --- a/cts/scheduler/scores/bundle-order-fencing.scores +++ b/cts/scheduler/scores/bundle-order-fencing.scores @@ -2,129 +2,129 @@ galera:0 promotion score on galera-bundle-0: -1 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY diff --git a/cts/scheduler/scores/bundle-order-partial-start-2.scores b/cts/scheduler/scores/bundle-order-partial-start-2.scores index bb77c77..3d3eb82 100644 --- a/cts/scheduler/scores/bundle-order-partial-start-2.scores +++ b/cts/scheduler/scores/bundle-order-partial-start-2.scores @@ -1,30 +1,30 @@ galera:0 promotion score on galera-bundle-0: -1 -pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 -pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 +pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY @@ -37,7 +37,7 @@ pcmk__clone_assign: redis:0 allocation score on redis-bundle-0: INFINITY pcmk__primitive_assign: galera-bundle-0 allocation score on undercloud: INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY pcmk__primitive_assign: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0 +pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY pcmk__primitive_assign: ip-192.168.122.247 allocation score on undercloud: INFINITY pcmk__primitive_assign: ip-192.168.122.248 allocation score on undercloud: INFINITY pcmk__primitive_assign: ip-192.168.122.249 allocation score on undercloud: INFINITY diff --git a/cts/scheduler/scores/bundle-order-partial-start.scores b/cts/scheduler/scores/bundle-order-partial-start.scores index d765883..7e76f44 100644 --- a/cts/scheduler/scores/bundle-order-partial-start.scores +++ b/cts/scheduler/scores/bundle-order-partial-start.scores @@ -1,30 +1,30 @@ galera:0 promotion score on galera-bundle-0: -1 -pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 -pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 +pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY @@ -37,7 +37,7 @@ pcmk__clone_assign: redis:0 allocation score on redis-bundle-0: INFINITY pcmk__primitive_assign: galera-bundle-0 allocation score on undercloud: 10000 pcmk__primitive_assign: galera-bundle-docker-0 allocation score on undercloud: 0 pcmk__primitive_assign: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0 +pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY pcmk__primitive_assign: ip-192.168.122.247 allocation score on undercloud: INFINITY pcmk__primitive_assign: ip-192.168.122.248 allocation score on undercloud: INFINITY pcmk__primitive_assign: ip-192.168.122.249 allocation score on undercloud: INFINITY diff --git a/cts/scheduler/scores/bundle-order-partial-stop.scores b/cts/scheduler/scores/bundle-order-partial-stop.scores index e00df39..2bb6cb6 100644 --- a/cts/scheduler/scores/bundle-order-partial-stop.scores +++ b/cts/scheduler/scores/bundle-order-partial-stop.scores @@ -1,30 +1,30 @@ galera:0 promotion score on galera-bundle-0: 100 -pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY diff --git a/cts/scheduler/scores/bundle-order-startup-clone-2.scores b/cts/scheduler/scores/bundle-order-startup-clone-2.scores index d44e358..f4e5353 100644 --- a/cts/scheduler/scores/bundle-order-startup-clone-2.scores +++ b/cts/scheduler/scores/bundle-order-startup-clone-2.scores @@ -2,98 +2,98 @@ galera:0 promotion score on galera-bundle-0: -1 galera:1 promotion score on galera-bundle-1: -1 galera:2 promotion score on galera-bundle-2: -1 -pcmk__bundle_allocate: galera-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500 -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 500 -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 500 +pcmk__bundle_assign: galera-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500 +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 500 +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 500 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0 diff --git a/cts/scheduler/scores/bundle-order-startup-clone.scores b/cts/scheduler/scores/bundle-order-startup-clone.scores index f749b33..e64246b 100644 --- a/cts/scheduler/scores/bundle-order-startup-clone.scores +++ b/cts/scheduler/scores/bundle-order-startup-clone.scores @@ -1,49 +1,49 @@ galera:0 promotion score on galera-bundle-0: -1 -pcmk__bundle_allocate: galera-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500 +pcmk__bundle_assign: galera-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on metal-1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on metal-2: -INFINITY diff --git a/cts/scheduler/scores/bundle-order-startup.scores b/cts/scheduler/scores/bundle-order-startup.scores index 9b32784..92fce09 100644 --- a/cts/scheduler/scores/bundle-order-startup.scores +++ b/cts/scheduler/scores/bundle-order-startup.scores @@ -1,30 +1,30 @@ galera:0 promotion score on galera-bundle-0: -1 -pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 -pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500 +pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 +pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY diff --git a/cts/scheduler/scores/bundle-order-stop-clone.scores b/cts/scheduler/scores/bundle-order-stop-clone.scores index 707260b..59419eb 100644 --- a/cts/scheduler/scores/bundle-order-stop-clone.scores +++ b/cts/scheduler/scores/bundle-order-stop-clone.scores @@ -2,98 +2,98 @@ galera:0 promotion score on galera-bundle-0: -1 galera:1 promotion score on galera-bundle-1: -1 galera:2 promotion score on galera-bundle-2: -1 -pcmk__bundle_allocate: galera-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-1: INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-2: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-3: INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-3: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on metal-3: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-1: INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-2: INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-3: INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-1: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-2: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-3: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on metal-3: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-1: INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on metal-2: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on metal-3: INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-3: INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on metal-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on metal-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on metal-3: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-3: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-1: INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on metal-2: INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on metal-3: INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-1: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-2: INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-3: INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on metal-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on metal-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on metal-3: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on rabbitmq-bundle-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0 @@ -147,8 +147,12 @@ pcmk__primitive_assign: galera-bundle-2 allocation score on metal-2: 0 pcmk__primitive_assign: galera-bundle-2 allocation score on metal-3: INFINITY pcmk__primitive_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-1: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-2: INFINITY diff --git a/cts/scheduler/scores/bundle-order-stop-on-remote.scores b/cts/scheduler/scores/bundle-order-stop-on-remote.scores index 4f592d1..7d92b2c 100644 --- a/cts/scheduler/scores/bundle-order-stop-on-remote.scores +++ b/cts/scheduler/scores/bundle-order-stop-on-remote.scores @@ -2,312 +2,312 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-0: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 500 -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-0: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 500 +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY diff --git a/cts/scheduler/scores/bundle-order-stop.scores b/cts/scheduler/scores/bundle-order-stop.scores index e00df39..2bb6cb6 100644 --- a/cts/scheduler/scores/bundle-order-stop.scores +++ b/cts/scheduler/scores/bundle-order-stop.scores @@ -1,30 +1,30 @@ galera:0 promotion score on galera-bundle-0: 100 -pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0 -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0 +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY diff --git a/cts/scheduler/scores/bundle-probe-order-1.scores b/cts/scheduler/scores/bundle-probe-order-1.scores index 0716be6..edaaaa3 100644 --- a/cts/scheduler/scores/bundle-probe-order-1.scores +++ b/cts/scheduler/scores/bundle-probe-order-1.scores @@ -2,36 +2,36 @@ galera:0 promotion score on none: 0 galera:1 promotion score on none: 0 galera:2 promotion score on none: 0 -pcmk__bundle_allocate: galera-bundle allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500 +pcmk__bundle_assign: galera-bundle allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500 pcmk__clone_assign: galera-bundle-master allocation score on centos1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on centos2: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on centos3: -INFINITY diff --git a/cts/scheduler/scores/bundle-probe-order-2.scores b/cts/scheduler/scores/bundle-probe-order-2.scores index ed8f93f..2de3bbc 100644 --- a/cts/scheduler/scores/bundle-probe-order-2.scores +++ b/cts/scheduler/scores/bundle-probe-order-2.scores @@ -2,36 +2,36 @@ galera:0 promotion score on none: 0 galera:1 promotion score on none: 0 galera:2 promotion score on none: 0 -pcmk__bundle_allocate: galera-bundle allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos2: INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500 +pcmk__bundle_assign: galera-bundle allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on centos2: INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500 pcmk__clone_assign: galera-bundle-master allocation score on centos1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on centos2: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on centos3: -INFINITY diff --git a/cts/scheduler/scores/bundle-probe-order-3.scores b/cts/scheduler/scores/bundle-probe-order-3.scores index 3343ae3..e63fa84 100644 --- a/cts/scheduler/scores/bundle-probe-order-3.scores +++ b/cts/scheduler/scores/bundle-probe-order-3.scores @@ -2,36 +2,36 @@ galera:0 promotion score on none: 0 galera:1 promotion score on none: 0 galera:2 promotion score on none: 0 -pcmk__bundle_allocate: galera-bundle allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos3: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on centos1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on centos2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on centos3: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500 +pcmk__bundle_assign: galera-bundle allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos3: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on centos1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on centos2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on centos3: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500 pcmk__clone_assign: galera-bundle-master allocation score on centos1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on centos2: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on centos3: -INFINITY diff --git a/cts/scheduler/scores/bundle-probe-remotes.scores b/cts/scheduler/scores/bundle-probe-remotes.scores index 3361749..13180af 100644 --- a/cts/scheduler/scores/bundle-probe-remotes.scores +++ b/cts/scheduler/scores/bundle-probe-remotes.scores @@ -1,100 +1,100 @@ -pcmk__bundle_allocate: dummy1:0 allocation score on scale1-bundle-0: 500 -pcmk__bundle_allocate: dummy1:1 allocation score on scale1-bundle-1: 500 -pcmk__bundle_allocate: dummy1:2 allocation score on scale1-bundle-2: 500 -pcmk__bundle_allocate: dummy1:3 allocation score on scale1-bundle-3: 500 -pcmk__bundle_allocate: dummy1:4 allocation score on scale1-bundle-4: 500 -pcmk__bundle_allocate: dummy1:5 allocation score on scale1-bundle-5: 500 -pcmk__bundle_allocate: scale1-bundle allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle allocation score on c09-h10-r630: 0 -pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h08-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h09-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h10-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h08-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h09-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h10-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h08-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h09-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h10-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h08-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h09-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h10-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h08-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h09-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h10-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h08-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h09-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h10-r630: -INFINITY -pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h10-r630: 0 -pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-0: -INFINITY -pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-1: -INFINITY -pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-2: -INFINITY -pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-3: -INFINITY -pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-4: -INFINITY -pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-5: -INFINITY -pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h10-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h10-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h10-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h10-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h10-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h05-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h06-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h07-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h08-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h09-r630: 0 -pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: dummy1:0 allocation score on scale1-bundle-0: 500 +pcmk__bundle_assign: dummy1:1 allocation score on scale1-bundle-1: 500 +pcmk__bundle_assign: dummy1:2 allocation score on scale1-bundle-2: 500 +pcmk__bundle_assign: dummy1:3 allocation score on scale1-bundle-3: 500 +pcmk__bundle_assign: dummy1:4 allocation score on scale1-bundle-4: 500 +pcmk__bundle_assign: dummy1:5 allocation score on scale1-bundle-5: 500 +pcmk__bundle_assign: scale1-bundle allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h08-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h09-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h10-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h08-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h09-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h10-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h08-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h09-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h10-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h08-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h09-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h10-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h08-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h09-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h10-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h08-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h09-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h10-r630: -INFINITY +pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-0: -INFINITY +pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-1: -INFINITY +pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-2: -INFINITY +pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-3: -INFINITY +pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-4: -INFINITY +pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-5: -INFINITY +pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h10-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h05-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h06-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h07-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h08-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h09-r630: 0 +pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h10-r630: 0 pcmk__clone_assign: dummy1:0 allocation score on scale1-bundle-0: INFINITY pcmk__clone_assign: dummy1:1 allocation score on scale1-bundle-1: INFINITY pcmk__clone_assign: dummy1:2 allocation score on scale1-bundle-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-1.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-1.scores new file mode 100644 index 0000000..cd53588 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-anticolocation-1.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 11 +base:1 promotion score on base-bundle-1: 12 +base:2 promotion score on base-bundle-2: 13 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: 0 +pcmk__primitive_assign: vip allocation score on node2: 0 +pcmk__primitive_assign: vip allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-2.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-2.scores new file mode 100644 index 0000000..9930eeb --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-anticolocation-2.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 11 +base:1 promotion score on base-bundle-1: 12 +base:2 promotion score on base-bundle-2: 13 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: 0 +pcmk__primitive_assign: vip allocation score on node2: 0 +pcmk__primitive_assign: vip allocation score on node3: -5000 diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-3.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-3.scores new file mode 100644 index 0000000..63bea1c --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-anticolocation-3.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 11 +base:1 promotion score on base-bundle-1: 12 +base:2 promotion score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: 0 +pcmk__primitive_assign: vip allocation score on node2: 0 +pcmk__primitive_assign: vip allocation score on node3: 0 diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-4.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-4.scores new file mode 100644 index 0000000..6e7cdd7 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-anticolocation-4.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 11 +base:1 promotion score on base-bundle-1: 12 +base:2 promotion score on base-bundle-2: -4987 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: 0 +pcmk__primitive_assign: vip allocation score on node2: 0 +pcmk__primitive_assign: vip allocation score on node3: 0 diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-5.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-5.scores new file mode 100644 index 0000000..b36fa42 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-anticolocation-5.scores @@ -0,0 +1,160 @@ + +bundle-a-rsc:0 promotion score on bundle-a-0: 11 +bundle-a-rsc:1 promotion score on bundle-a-1: -INFINITY +bundle-a-rsc:2 promotion score on bundle-a-2: 12 +bundle-b-rsc:0 promotion score on bundle-b-0: 12 +bundle-b-rsc:1 promotion score on bundle-b-1: 14 +bundle-b-rsc:2 promotion score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-a allocation score on node1: 0 +pcmk__bundle_assign: bundle-a allocation score on node2: 0 +pcmk__bundle_assign: bundle-a allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501 +pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501 +pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 14 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501 +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501 +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0 +pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0 +pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-6.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-6.scores new file mode 100644 index 0000000..779495e --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-anticolocation-6.scores @@ -0,0 +1,160 @@ + +bundle-a-rsc:0 promotion score on bundle-a-0: 11 +bundle-a-rsc:1 promotion score on bundle-a-1: -4987 +bundle-a-rsc:2 promotion score on bundle-a-2: 12 +bundle-b-rsc:0 promotion score on bundle-b-0: 12 +bundle-b-rsc:1 promotion score on bundle-b-1: 14 +bundle-b-rsc:2 promotion score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-a allocation score on node1: 0 +pcmk__bundle_assign: bundle-a allocation score on node2: 0 +pcmk__bundle_assign: bundle-a allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501 +pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501 +pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 14 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501 +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501 +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0 +pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0 +pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-colocation-1.scores b/cts/scheduler/scores/bundle-promoted-colocation-1.scores new file mode 100644 index 0000000..36f2bc5 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-colocation-1.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 11 +base:1 promotion score on base-bundle-1: 12 +base:2 promotion score on base-bundle-2: 13 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: -INFINITY +pcmk__primitive_assign: vip allocation score on node2: -INFINITY +pcmk__primitive_assign: vip allocation score on node3: 0 diff --git a/cts/scheduler/scores/bundle-promoted-colocation-2.scores b/cts/scheduler/scores/bundle-promoted-colocation-2.scores new file mode 100644 index 0000000..384fbbb --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-colocation-2.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 11 +base:1 promotion score on base-bundle-1: 12 +base:2 promotion score on base-bundle-2: 13 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: 0 +pcmk__primitive_assign: vip allocation score on node2: 0 +pcmk__primitive_assign: vip allocation score on node3: 5000 diff --git a/cts/scheduler/scores/bundle-promoted-colocation-3.scores b/cts/scheduler/scores/bundle-promoted-colocation-3.scores new file mode 100644 index 0000000..1792152 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-colocation-3.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: INFINITY +base:1 promotion score on base-bundle-1: -INFINITY +base:2 promotion score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: 0 +pcmk__primitive_assign: vip allocation score on node2: 0 +pcmk__primitive_assign: vip allocation score on node3: 0 diff --git a/cts/scheduler/scores/bundle-promoted-colocation-4.scores b/cts/scheduler/scores/bundle-promoted-colocation-4.scores new file mode 100644 index 0000000..3cb1ed9 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-colocation-4.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 5011 +base:1 promotion score on base-bundle-1: 12 +base:2 promotion score on base-bundle-2: 13 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: vip allocation score on node1: 0 +pcmk__primitive_assign: vip allocation score on node2: 0 +pcmk__primitive_assign: vip allocation score on node3: 0 diff --git a/cts/scheduler/scores/bundle-promoted-colocation-5.scores b/cts/scheduler/scores/bundle-promoted-colocation-5.scores new file mode 100644 index 0000000..a5bbab9 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-colocation-5.scores @@ -0,0 +1,160 @@ + +bundle-a-rsc:0 promotion score on bundle-a-0: 11 +bundle-a-rsc:1 promotion score on bundle-a-1: 13 +bundle-a-rsc:2 promotion score on bundle-a-2: INFINITY +bundle-b-rsc:0 promotion score on bundle-b-0: 12 +bundle-b-rsc:1 promotion score on bundle-b-1: 11 +bundle-b-rsc:2 promotion score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-a allocation score on node1: 0 +pcmk__bundle_assign: bundle-a allocation score on node2: 0 +pcmk__bundle_assign: bundle-a allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501 +pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501 +pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 11 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501 +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501 +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0 +pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0 +pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-colocation-6.scores b/cts/scheduler/scores/bundle-promoted-colocation-6.scores new file mode 100644 index 0000000..f31a870 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-colocation-6.scores @@ -0,0 +1,160 @@ + +bundle-a-rsc:0 promotion score on bundle-a-0: 11 +bundle-a-rsc:1 promotion score on bundle-a-1: 13 +bundle-a-rsc:2 promotion score on bundle-a-2: 5012 +bundle-b-rsc:0 promotion score on bundle-b-0: 12 +bundle-b-rsc:1 promotion score on bundle-b-1: 11 +bundle-b-rsc:2 promotion score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-a allocation score on node1: 0 +pcmk__bundle_assign: bundle-a allocation score on node2: 0 +pcmk__bundle_assign: bundle-a allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY +pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501 +pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501 +pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node1: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node2: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 11 +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13 +pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501 +pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501 +pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501 +pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0 +pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0 +pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0 +pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0 +pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY +pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY +pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY +pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY +pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY +pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000 +pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000 +pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0 +pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY +pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY +pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-location-1.scores b/cts/scheduler/scores/bundle-promoted-location-1.scores new file mode 100644 index 0000000..6bf9a23 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-location-1.scores @@ -0,0 +1,70 @@ + +base:0 promotion score on base-bundle-0: 10 +base:1 promotion score on base-bundle-1: 5 +base:2 promotion score on base-bundle-2: 5 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 5000 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 5000 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 5000 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 5000 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 5000 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 5000 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-location-2.scores b/cts/scheduler/scores/bundle-promoted-location-2.scores new file mode 100644 index 0000000..468a131 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-location-2.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: -1 +base:1 promotion score on base-bundle-1: 5 +base:2 promotion score on base-bundle-2: 5 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: -INFINITY +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: -INFINITY +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-location-3.scores b/cts/scheduler/scores/bundle-promoted-location-3.scores new file mode 100644 index 0000000..fa937e9 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-location-3.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 10 +base:1 promotion score on base-bundle-1: 5 +base:2 promotion score on base-bundle-2: 5 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-location-4.scores b/cts/scheduler/scores/bundle-promoted-location-4.scores new file mode 100644 index 0000000..fa937e9 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-location-4.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 10 +base:1 promotion score on base-bundle-1: 5 +base:2 promotion score on base-bundle-2: 5 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-location-5.scores b/cts/scheduler/scores/bundle-promoted-location-5.scores new file mode 100644 index 0000000..eccb072 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-location-5.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 10 +base:1 promotion score on base-bundle-1: 5 +base:2 promotion score on base-bundle-2: 5 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 5000 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 5000 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 5000 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 5000 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 5000 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/bundle-promoted-location-6.scores b/cts/scheduler/scores/bundle-promoted-location-6.scores new file mode 100644 index 0000000..0eb1d51 --- /dev/null +++ b/cts/scheduler/scores/bundle-promoted-location-6.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 10 +base:1 promotion score on base-bundle-1: -1 +base:2 promotion score on base-bundle-2: 5 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: -INFINITY +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: -INFINITY +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: -INFINITY +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/bundle-replicas-change.scores b/cts/scheduler/scores/bundle-replicas-change.scores index ade2466..01b1d5b 100644 --- a/cts/scheduler/scores/bundle-replicas-change.scores +++ b/cts/scheduler/scores/bundle-replicas-change.scores @@ -1,21 +1,21 @@ -pcmk__bundle_allocate: httpd-bundle allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rh74-test: INFINITY -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on rh74-test: INFINITY -pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.20.188 allocation score on rh74-test: INFINITY -pcmk__bundle_allocate: httpd-bundle-ip-192.168.20.189 allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.20.190 allocation score on rh74-test: 0 -pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 500 -pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500 -pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500 +pcmk__bundle_assign: httpd-bundle allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rh74-test: INFINITY +pcmk__bundle_assign: httpd-bundle-1 allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd-bundle-docker-0 allocation score on rh74-test: INFINITY +pcmk__bundle_assign: httpd-bundle-docker-1 allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd-bundle-docker-2 allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.20.188 allocation score on rh74-test: INFINITY +pcmk__bundle_assign: httpd-bundle-ip-192.168.20.189 allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.20.190 allocation score on rh74-test: 0 +pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 500 +pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 500 +pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 500 pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-0: 0 pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-1: 0 pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-2: 0 diff --git a/cts/scheduler/scores/cancel-behind-moving-remote.scores b/cts/scheduler/scores/cancel-behind-moving-remote.scores index 0dfd78c..e79d28c 100644 --- a/cts/scheduler/scores/cancel-behind-moving-remote.scores +++ b/cts/scheduler/scores/cancel-behind-moving-remote.scores @@ -2,495 +2,495 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1 -ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 +ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5 +ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 1 ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 -pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on compute-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on compute-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501 -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 500 -pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501 -pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500 -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 -pcmk__bundle_allocate: redis-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on compute-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on compute-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501 -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 +pcmk__bundle_assign: galera-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on compute-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on compute-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501 +pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 500 +pcmk__bundle_assign: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501 +pcmk__bundle_assign: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501 +pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500 +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 +pcmk__bundle_assign: redis-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on compute-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on compute-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501 +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501 +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-0: 0 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-1: 0 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY @@ -1799,8 +1799,8 @@ pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-1: -INFINI pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: 0 -pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-1: -INFINITY +pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: -INFINITY +pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-1: 0 pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-2: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on database-0: -INFINITY pcmk__primitive_assign: ip-172.17.1.87 allocation score on database-1: -INFINITY @@ -1865,9 +1865,9 @@ pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on mes pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0 +pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 10000 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0 -pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 10000 +pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-2: 0 @@ -1876,9 +1876,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 10000 +pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0 -pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 10000 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-2: 0 @@ -1898,9 +1898,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY @@ -1909,24 +1909,35 @@ pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1: pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY diff --git a/cts/scheduler/scores/clone-anon-failcount.scores b/cts/scheduler/scores/clone-anon-failcount.scores index a01e0f3..435546d 100644 --- a/cts/scheduler/scores/clone-anon-failcount.scores +++ b/cts/scheduler/scores/clone-anon-failcount.scores @@ -224,69 +224,85 @@ pcmk__primitive_assign: UmVIPcheck allocation score on srv02: -INFINITY pcmk__primitive_assign: UmVIPcheck allocation score on srv03: -INFINITY pcmk__primitive_assign: UmVIPcheck allocation score on srv04: 100 pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 101 +pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 101 pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 100 -pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: 100 -pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: 0 -pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: 0 -pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: 0 +pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: -2999 +pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: -2999 +pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 104 +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: -1894 +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: 106 +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: -2000 +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: -2999 +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: -2000 +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: -2999 +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: -INFINITY +pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: 4 pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 101 +pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 101 pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 100 -pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: 100 -pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: 0 -pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: 0 -pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: 0 +pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: -2999 +pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: -2999 +pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 105 +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: -1893 +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: 107 +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: -2000 +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: -2999 +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: -2000 +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: -2999 +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: -INFINITY +pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: 5 pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 101 +pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 101 pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 100 -pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: 100 -pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: 0 -pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: 0 -pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: 0 +pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: -2999 +pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: -2999 +pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 104 +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: -1895 +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: 105 +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: -2000 +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: -2999 +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: -2000 +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: -2999 +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: -INFINITY +pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: 4 pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 101 +pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 101 pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 100 -pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: 100 -pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: 0 -pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: 0 -pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv04: 0 +pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: -2999 +pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: -2999 +pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 106 +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: -1892 +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: 108 +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: -2000 +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: -2999 +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: -2000 +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: -2999 +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv04: -INFINITY +pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv04: 6 pcmk__primitive_assign: clnUMdummy01:0 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnUMdummy01:0 allocation score on srv02: -INFINITY pcmk__primitive_assign: clnUMdummy01:0 allocation score on srv03: -INFINITY diff --git a/cts/scheduler/scores/clone-fail-block-colocation.scores b/cts/scheduler/scores/clone-fail-block-colocation.scores index 1925eeb..c4cee8c 100644 --- a/cts/scheduler/scores/clone-fail-block-colocation.scores +++ b/cts/scheduler/scores/clone-fail-block-colocation.scores @@ -37,7 +37,9 @@ pcmk__primitive_assign: d_bird:1 allocation score on DEM-1: -INFINITY pcmk__primitive_assign: d_bird:1 allocation score on DEM-2: 1 pcmk__primitive_assign: d_bird_subnet_state allocation score on DEM-1: -INFINITY pcmk__primitive_assign: d_bird_subnet_state allocation score on DEM-2: 0 +pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-1: -INFINITY pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-1: 1 +pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-2: -INFINITY pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-2: 0 pcmk__primitive_assign: d_tomcat_nms:1 allocation score on DEM-1: -INFINITY pcmk__primitive_assign: d_tomcat_nms:1 allocation score on DEM-2: 1 diff --git a/cts/scheduler/scores/clone-max-zero.scores b/cts/scheduler/scores/clone-max-zero.scores index f1711b7..bd116a2 100644 --- a/cts/scheduler/scores/clone-max-zero.scores +++ b/cts/scheduler/scores/clone-max-zero.scores @@ -26,10 +26,18 @@ pcmk__primitive_assign: drbd0:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: fencing allocation score on c001n11: 0 pcmk__primitive_assign: fencing allocation score on c001n12: 0 pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY +pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-1.scores b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores new file mode 100644 index 0000000..c1d60b2 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores @@ -0,0 +1,25 @@ + +pcmk__clone_assign: dummy-clone allocation score on node1: 0 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 0 +pcmk__clone_assign: dummy:0 allocation score on node2: 1 +pcmk__clone_assign: dummy:0 allocation score on node3: 0 +pcmk__clone_assign: dummy:1 allocation score on node1: 0 +pcmk__clone_assign: dummy:1 allocation score on node2: 0 +pcmk__clone_assign: dummy:1 allocation score on node3: 1 +pcmk__clone_assign: dummy:2 allocation score on node1: 0 +pcmk__clone_assign: dummy:2 allocation score on node2: 0 +pcmk__clone_assign: dummy:2 allocation score on node3: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: 0 +pcmk__primitive_assign: dummy:0 allocation score on node2: 1 +pcmk__primitive_assign: dummy:0 allocation score on node3: 0 +pcmk__primitive_assign: dummy:1 allocation score on node1: 0 +pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node3: 1 +pcmk__primitive_assign: dummy:2 allocation score on node1: 0 +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores new file mode 100644 index 0000000..4f4c29e --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores @@ -0,0 +1,31 @@ + +dummy:0 promotion score on node3: 5 +dummy:1 promotion score on node2: 15 +dummy:2 promotion score on node1: 10 +pcmk__clone_assign: dummy-clone allocation score on node1: 0 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 10 +pcmk__clone_assign: dummy:0 allocation score on node2: 0 +pcmk__clone_assign: dummy:0 allocation score on node3: 6 +pcmk__clone_assign: dummy:1 allocation score on node1: 10 +pcmk__clone_assign: dummy:1 allocation score on node2: 16 +pcmk__clone_assign: dummy:1 allocation score on node3: 0 +pcmk__clone_assign: dummy:2 allocation score on node1: 10 +pcmk__clone_assign: dummy:2 allocation score on node2: 15 +pcmk__clone_assign: dummy:2 allocation score on node3: 5 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node1: 10 +pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:1 allocation score on node1: 10 +pcmk__primitive_assign: dummy:1 allocation score on node2: 16 +pcmk__primitive_assign: dummy:1 allocation score on node3: 0 +pcmk__primitive_assign: dummy:2 allocation score on node1: 10 +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-11.scores b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores new file mode 100644 index 0000000..1216dba --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores @@ -0,0 +1,82 @@ + +grp:0 promotion score on node3: 10 +grp:1 promotion score on node2: 30 +grp:2 promotion score on node1: 20 +pcmk__clone_assign: grp-clone allocation score on node1: 0 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 20 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 10 +pcmk__clone_assign: grp:1 allocation score on node1: 20 +pcmk__clone_assign: grp:1 allocation score on node2: 30 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 20 +pcmk__clone_assign: grp:2 allocation score on node2: 30 +pcmk__clone_assign: grp:2 allocation score on node3: 10 +pcmk__clone_assign: rsc1:0 allocation score on node1: 0 +pcmk__clone_assign: rsc1:0 allocation score on node2: 0 +pcmk__clone_assign: rsc1:0 allocation score on node3: 1 +pcmk__clone_assign: rsc1:1 allocation score on node1: 0 +pcmk__clone_assign: rsc1:1 allocation score on node2: 1 +pcmk__clone_assign: rsc1:1 allocation score on node3: 0 +pcmk__clone_assign: rsc1:2 allocation score on node1: 0 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 0 +pcmk__clone_assign: rsc2:0 allocation score on node3: 1 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 1 +pcmk__clone_assign: rsc2:1 allocation score on node3: 0 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: 20 +pcmk__group_assign: grp:0 allocation score on node2: -INFINITY +pcmk__group_assign: grp:0 allocation score on node3: 10 +pcmk__group_assign: grp:1 allocation score on node1: 20 +pcmk__group_assign: grp:1 allocation score on node2: 30 +pcmk__group_assign: grp:1 allocation score on node3: 0 +pcmk__group_assign: grp:2 allocation score on node1: 20 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 0 +pcmk__group_assign: rsc1:0 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node3: 1 +pcmk__group_assign: rsc1:1 allocation score on node1: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: 1 +pcmk__group_assign: rsc1:1 allocation score on node3: 0 +pcmk__group_assign: rsc1:2 allocation score on node1: 0 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node3: 1 +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: 1 +pcmk__group_assign: rsc2:1 allocation score on node3: 0 +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:0 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-12.scores b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores new file mode 100644 index 0000000..fb96134 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 5 +base:1 promotion score on base-bundle-1: 15 +base:2 promotion score on base-bundle-2: 10 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-2.scores b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores new file mode 100644 index 0000000..cfbd5bf --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores @@ -0,0 +1,79 @@ + +pcmk__clone_assign: grp-clone allocation score on node1: 0 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 0 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 0 +pcmk__clone_assign: grp:1 allocation score on node1: 0 +pcmk__clone_assign: grp:1 allocation score on node2: 0 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 0 +pcmk__clone_assign: grp:2 allocation score on node2: 0 +pcmk__clone_assign: grp:2 allocation score on node3: 0 +pcmk__clone_assign: rsc1:0 allocation score on node1: 0 +pcmk__clone_assign: rsc1:0 allocation score on node2: 1 +pcmk__clone_assign: rsc1:0 allocation score on node3: 0 +pcmk__clone_assign: rsc1:1 allocation score on node1: 0 +pcmk__clone_assign: rsc1:1 allocation score on node2: 0 +pcmk__clone_assign: rsc1:1 allocation score on node3: 1 +pcmk__clone_assign: rsc1:2 allocation score on node1: 0 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 1 +pcmk__clone_assign: rsc2:0 allocation score on node3: 0 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 0 +pcmk__clone_assign: rsc2:1 allocation score on node3: 1 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: 0 +pcmk__group_assign: grp:0 allocation score on node2: 0 +pcmk__group_assign: grp:0 allocation score on node3: 0 +pcmk__group_assign: grp:1 allocation score on node1: 0 +pcmk__group_assign: grp:1 allocation score on node2: -INFINITY +pcmk__group_assign: grp:1 allocation score on node3: 0 +pcmk__group_assign: grp:2 allocation score on node1: 0 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 0 +pcmk__group_assign: rsc1:0 allocation score on node2: 1 +pcmk__group_assign: rsc1:0 allocation score on node3: 0 +pcmk__group_assign: rsc1:1 allocation score on node1: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:1 allocation score on node3: 1 +pcmk__group_assign: rsc1:2 allocation score on node1: 0 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: 1 +pcmk__group_assign: rsc2:0 allocation score on node3: 0 +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:1 allocation score on node3: 1 +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: 1 +pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-3.scores b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores new file mode 100644 index 0000000..91fe06c --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores @@ -0,0 +1,64 @@ + +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores new file mode 100644 index 0000000..2a52c81 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores @@ -0,0 +1,31 @@ + +pcmk__clone_assign: dummy-clone allocation score on node1: 100 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 100 +pcmk__clone_assign: dummy:0 allocation score on node2: 1 +pcmk__clone_assign: dummy:0 allocation score on node3: 0 +pcmk__clone_assign: dummy:1 allocation score on node1: 100 +pcmk__clone_assign: dummy:1 allocation score on node2: 0 +pcmk__clone_assign: dummy:1 allocation score on node3: 1 +pcmk__clone_assign: dummy:2 allocation score on node1: 100 +pcmk__clone_assign: dummy:2 allocation score on node2: 0 +pcmk__clone_assign: dummy:2 allocation score on node3: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node1: 100 +pcmk__primitive_assign: dummy:0 allocation score on node2: 1 +pcmk__primitive_assign: dummy:0 allocation score on node2: 1 +pcmk__primitive_assign: dummy:0 allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node3: 0 +pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node1: 100 +pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node3: 1 +pcmk__primitive_assign: dummy:1 allocation score on node3: 1 +pcmk__primitive_assign: dummy:2 allocation score on node1: 100 +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores new file mode 100644 index 0000000..c6c8072 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores @@ -0,0 +1,109 @@ + +pcmk__clone_assign: grp-clone allocation score on node1: 100 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 100 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 0 +pcmk__clone_assign: grp:1 allocation score on node1: 100 +pcmk__clone_assign: grp:1 allocation score on node2: 0 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 100 +pcmk__clone_assign: grp:2 allocation score on node2: 0 +pcmk__clone_assign: grp:2 allocation score on node3: 0 +pcmk__clone_assign: rsc1:0 allocation score on node1: 100 +pcmk__clone_assign: rsc1:0 allocation score on node2: 1 +pcmk__clone_assign: rsc1:0 allocation score on node3: 0 +pcmk__clone_assign: rsc1:1 allocation score on node1: 100 +pcmk__clone_assign: rsc1:1 allocation score on node2: 0 +pcmk__clone_assign: rsc1:1 allocation score on node3: 1 +pcmk__clone_assign: rsc1:2 allocation score on node1: 100 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 1 +pcmk__clone_assign: rsc2:0 allocation score on node3: 0 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 0 +pcmk__clone_assign: rsc2:1 allocation score on node3: 1 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: -INFINITY +pcmk__group_assign: grp:0 allocation score on node1: 100 +pcmk__group_assign: grp:0 allocation score on node2: 0 +pcmk__group_assign: grp:0 allocation score on node2: 0 +pcmk__group_assign: grp:0 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node3: 0 +pcmk__group_assign: grp:1 allocation score on node1: -INFINITY +pcmk__group_assign: grp:1 allocation score on node1: 100 +pcmk__group_assign: grp:1 allocation score on node2: -INFINITY +pcmk__group_assign: grp:1 allocation score on node2: -INFINITY +pcmk__group_assign: grp:1 allocation score on node3: 0 +pcmk__group_assign: grp:1 allocation score on node3: 0 +pcmk__group_assign: grp:2 allocation score on node1: 100 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 100 +pcmk__group_assign: rsc1:0 allocation score on node2: 1 +pcmk__group_assign: rsc1:0 allocation score on node2: 1 +pcmk__group_assign: rsc1:0 allocation score on node3: 0 +pcmk__group_assign: rsc1:0 allocation score on node3: 0 +pcmk__group_assign: rsc1:1 allocation score on node1: -INFINITY +pcmk__group_assign: rsc1:1 allocation score on node1: 100 +pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:1 allocation score on node3: 1 +pcmk__group_assign: rsc1:1 allocation score on node3: 1 +pcmk__group_assign: rsc1:2 allocation score on node1: 100 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: 1 +pcmk__group_assign: rsc2:0 allocation score on node2: 1 +pcmk__group_assign: rsc2:0 allocation score on node3: 0 +pcmk__group_assign: rsc2:0 allocation score on node3: 0 +pcmk__group_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:1 allocation score on node3: 1 +pcmk__group_assign: rsc2:1 allocation score on node3: 1 +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:0 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 100 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: 1 +pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores new file mode 100644 index 0000000..a7231a7 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores @@ -0,0 +1,70 @@ + +pcmk__bundle_assign: base-bundle allocation score on node1: 100 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 100 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 100 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 100 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 100 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores new file mode 100644 index 0000000..503cbb3 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores @@ -0,0 +1,34 @@ + +dummy:0 promotion score on node3: 5 +dummy:1 promotion score on node2: 10 +dummy:2 promotion score on node1: 15 +pcmk__clone_assign: dummy-clone allocation score on node1: 0 +pcmk__clone_assign: dummy-clone allocation score on node2: 0 +pcmk__clone_assign: dummy-clone allocation score on node3: 0 +pcmk__clone_assign: dummy:0 allocation score on node1: 15 +pcmk__clone_assign: dummy:0 allocation score on node2: 0 +pcmk__clone_assign: dummy:0 allocation score on node3: 6 +pcmk__clone_assign: dummy:1 allocation score on node1: 15 +pcmk__clone_assign: dummy:1 allocation score on node2: 11 +pcmk__clone_assign: dummy:1 allocation score on node3: 0 +pcmk__clone_assign: dummy:2 allocation score on node1: 15 +pcmk__clone_assign: dummy:2 allocation score on node2: 10 +pcmk__clone_assign: dummy:2 allocation score on node3: 5 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:0 allocation score on node1: 15 +pcmk__primitive_assign: dummy:0 allocation score on node2: 0 +pcmk__primitive_assign: dummy:0 allocation score on node2: 0 +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:0 allocation score on node3: 6 +pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node1: 15 +pcmk__primitive_assign: dummy:1 allocation score on node2: 11 +pcmk__primitive_assign: dummy:1 allocation score on node2: 11 +pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node1: 15 +pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-8.scores b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores new file mode 100644 index 0000000..56d4cc8 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores @@ -0,0 +1,82 @@ + +grp:0 promotion score on node3: 10 +grp:1 promotion score on node2: 20 +grp:2 promotion score on node1: 30 +pcmk__clone_assign: grp-clone allocation score on node1: 0 +pcmk__clone_assign: grp-clone allocation score on node2: 0 +pcmk__clone_assign: grp-clone allocation score on node3: 0 +pcmk__clone_assign: grp:0 allocation score on node1: 30 +pcmk__clone_assign: grp:0 allocation score on node2: 0 +pcmk__clone_assign: grp:0 allocation score on node3: 10 +pcmk__clone_assign: grp:1 allocation score on node1: 30 +pcmk__clone_assign: grp:1 allocation score on node2: 20 +pcmk__clone_assign: grp:1 allocation score on node3: 0 +pcmk__clone_assign: grp:2 allocation score on node1: 30 +pcmk__clone_assign: grp:2 allocation score on node2: 20 +pcmk__clone_assign: grp:2 allocation score on node3: 10 +pcmk__clone_assign: rsc1:0 allocation score on node1: 0 +pcmk__clone_assign: rsc1:0 allocation score on node2: 0 +pcmk__clone_assign: rsc1:0 allocation score on node3: 1 +pcmk__clone_assign: rsc1:1 allocation score on node1: 0 +pcmk__clone_assign: rsc1:1 allocation score on node2: 1 +pcmk__clone_assign: rsc1:1 allocation score on node3: 0 +pcmk__clone_assign: rsc1:2 allocation score on node1: 0 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc2:0 allocation score on node1: 0 +pcmk__clone_assign: rsc2:0 allocation score on node2: 0 +pcmk__clone_assign: rsc2:0 allocation score on node3: 1 +pcmk__clone_assign: rsc2:1 allocation score on node1: 0 +pcmk__clone_assign: rsc2:1 allocation score on node2: 1 +pcmk__clone_assign: rsc2:1 allocation score on node3: 0 +pcmk__clone_assign: rsc2:2 allocation score on node1: 0 +pcmk__clone_assign: rsc2:2 allocation score on node2: 0 +pcmk__clone_assign: rsc2:2 allocation score on node3: 0 +pcmk__group_assign: grp:0 allocation score on node1: 30 +pcmk__group_assign: grp:0 allocation score on node2: 0 +pcmk__group_assign: grp:0 allocation score on node3: 10 +pcmk__group_assign: grp:1 allocation score on node1: 30 +pcmk__group_assign: grp:1 allocation score on node2: 20 +pcmk__group_assign: grp:1 allocation score on node3: -INFINITY +pcmk__group_assign: grp:2 allocation score on node1: 30 +pcmk__group_assign: grp:2 allocation score on node2: -INFINITY +pcmk__group_assign: grp:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:0 allocation score on node1: 0 +pcmk__group_assign: rsc1:0 allocation score on node2: 0 +pcmk__group_assign: rsc1:0 allocation score on node3: 1 +pcmk__group_assign: rsc1:1 allocation score on node1: 0 +pcmk__group_assign: rsc1:1 allocation score on node2: 1 +pcmk__group_assign: rsc1:1 allocation score on node3: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node1: 0 +pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:0 allocation score on node1: 0 +pcmk__group_assign: rsc2:0 allocation score on node2: 0 +pcmk__group_assign: rsc2:0 allocation score on node3: 1 +pcmk__group_assign: rsc2:1 allocation score on node1: 0 +pcmk__group_assign: rsc2:1 allocation score on node2: 1 +pcmk__group_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node1: 0 +pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: 2 +pcmk__primitive_assign: rsc1:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:0 allocation score on node3: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc2:1 allocation score on node2: 1 +pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-9.scores b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores new file mode 100644 index 0000000..eb7a941 --- /dev/null +++ b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores @@ -0,0 +1,67 @@ + +base:0 promotion score on base-bundle-0: 5 +base:1 promotion score on base-bundle-1: 10 +base:2 promotion score on base-bundle-2: 15 +pcmk__bundle_assign: base-bundle allocation score on node1: 0 +pcmk__bundle_assign: base-bundle allocation score on node2: 0 +pcmk__bundle_assign: base-bundle allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY +pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0 +pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0 +pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501 +pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501 +pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0 +pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0 +pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY +pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY +pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000 +pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000 +pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0 +pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0 +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY +pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY +pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY +pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY +pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY diff --git a/cts/scheduler/scores/cloned-group-stop.scores b/cts/scheduler/scores/cloned-group-stop.scores index be835fa..7e406c6 100644 --- a/cts/scheduler/scores/cloned-group-stop.scores +++ b/cts/scheduler/scores/cloned-group-stop.scores @@ -122,8 +122,12 @@ pcmk__primitive_assign: mysql-fs allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: mysql-vip allocation score on rhos4-node3: 300 pcmk__primitive_assign: mysql-vip allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY +pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY pcmk__primitive_assign: virt-fencing allocation score on rhos4-node3: 100 pcmk__primitive_assign: virt-fencing allocation score on rhos4-node4: 0 diff --git a/cts/scheduler/scores/coloc-clone-stays-active.scores b/cts/scheduler/scores/coloc-clone-stays-active.scores index 9b46943..52c6bc5 100644 --- a/cts/scheduler/scores/coloc-clone-stays-active.scores +++ b/cts/scheduler/scores/coloc-clone-stays-active.scores @@ -31,7 +31,7 @@ pcmk__clone_assign: cl-drbdlinks-s01-service allocation score on s01-0: 0 pcmk__clone_assign: cl-drbdlinks-s01-service allocation score on s01-1: 0 pcmk__clone_assign: cl-gfs2 allocation score on s01-0: 0 pcmk__clone_assign: cl-gfs2 allocation score on s01-1: 0 -pcmk__clone_assign: cl-ietd allocation score on s01-0: 11001 +pcmk__clone_assign: cl-ietd allocation score on s01-0: 12001 pcmk__clone_assign: cl-ietd allocation score on s01-1: 1000 pcmk__clone_assign: cl-libvirtd allocation score on s01-0: 0 pcmk__clone_assign: cl-libvirtd allocation score on s01-1: 0 @@ -337,16 +337,16 @@ pcmk__primitive_assign: dhcpd:0 allocation score on s01-0: -INFINITY pcmk__primitive_assign: dhcpd:0 allocation score on s01-1: -INFINITY pcmk__primitive_assign: dhcpd:1 allocation score on s01-0: -INFINITY pcmk__primitive_assign: dhcpd:1 allocation score on s01-1: -INFINITY -pcmk__primitive_assign: dlm:0 allocation score on s01-0: 0 +pcmk__primitive_assign: dlm:0 allocation score on s01-0: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on s01-1: 1 pcmk__primitive_assign: dlm:1 allocation score on s01-0: 1 pcmk__primitive_assign: dlm:1 allocation score on s01-1: -INFINITY pcmk__primitive_assign: drbd-pool-0:0 allocation score on s01-0: -INFINITY pcmk__primitive_assign: drbd-pool-0:0 allocation score on s01-1: 10001 -pcmk__primitive_assign: drbd-pool-0:1 allocation score on s01-0: 10001 +pcmk__primitive_assign: drbd-pool-0:1 allocation score on s01-0: 11001 pcmk__primitive_assign: drbd-pool-0:1 allocation score on s01-1: 0 pcmk__primitive_assign: drbd-pool-1:0 allocation score on s01-0: 0 -pcmk__primitive_assign: drbd-pool-1:0 allocation score on s01-1: 10001 +pcmk__primitive_assign: drbd-pool-1:0 allocation score on s01-1: 11001 pcmk__primitive_assign: drbd-pool-1:1 allocation score on s01-0: 10001 pcmk__primitive_assign: drbd-pool-1:1 allocation score on s01-1: -INFINITY pcmk__primitive_assign: drbd-s01-logs:0 allocation score on s01-0: 0 @@ -357,7 +357,7 @@ pcmk__primitive_assign: drbd-s01-service:0 allocation score on s01-0: 0 pcmk__primitive_assign: drbd-s01-service:0 allocation score on s01-1: 10001 pcmk__primitive_assign: drbd-s01-service:1 allocation score on s01-0: 10001 pcmk__primitive_assign: drbd-s01-service:1 allocation score on s01-1: -INFINITY -pcmk__primitive_assign: drbd-s01-vm-data:0 allocation score on s01-0: 0 +pcmk__primitive_assign: drbd-s01-vm-data:0 allocation score on s01-0: -INFINITY pcmk__primitive_assign: drbd-s01-vm-data:0 allocation score on s01-1: 10001 pcmk__primitive_assign: drbd-s01-vm-data:1 allocation score on s01-0: 10001 pcmk__primitive_assign: drbd-s01-vm-data:1 allocation score on s01-1: -INFINITY @@ -382,22 +382,22 @@ pcmk__primitive_assign: gfs2:0 allocation score on s01-1: 1 pcmk__primitive_assign: gfs2:1 allocation score on s01-0: 1 pcmk__primitive_assign: gfs2:1 allocation score on s01-1: -INFINITY pcmk__primitive_assign: ietd:0 allocation score on s01-0: -INFINITY -pcmk__primitive_assign: ietd:0 allocation score on s01-1: 1 -pcmk__primitive_assign: ietd:1 allocation score on s01-0: 1 -pcmk__primitive_assign: ietd:1 allocation score on s01-1: 0 +pcmk__primitive_assign: ietd:0 allocation score on s01-1: 1001 +pcmk__primitive_assign: ietd:1 allocation score on s01-0: 12002 +pcmk__primitive_assign: ietd:1 allocation score on s01-1: -INFINITY pcmk__primitive_assign: iscsi-pool-0-lun-1 allocation score on s01-0: 0 pcmk__primitive_assign: iscsi-pool-0-lun-1 allocation score on s01-1: -INFINITY -pcmk__primitive_assign: iscsi-pool-0-target allocation score on s01-0: 11001 +pcmk__primitive_assign: iscsi-pool-0-target allocation score on s01-0: 12001 pcmk__primitive_assign: iscsi-pool-0-target allocation score on s01-1: -INFINITY pcmk__primitive_assign: iscsi-pool-1-lun-1 allocation score on s01-0: -INFINITY pcmk__primitive_assign: iscsi-pool-1-lun-1 allocation score on s01-1: 0 pcmk__primitive_assign: iscsi-pool-1-target allocation score on s01-0: -INFINITY -pcmk__primitive_assign: iscsi-pool-1-target allocation score on s01-1: 11001 +pcmk__primitive_assign: iscsi-pool-1-target allocation score on s01-1: 12001 pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-0: -INFINITY pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-1: -INFINITY pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-target allocation score on s01-0: -INFINITY pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-target allocation score on s01-1: -INFINITY -pcmk__primitive_assign: libvirtd:0 allocation score on s01-0: 0 +pcmk__primitive_assign: libvirtd:0 allocation score on s01-0: -INFINITY pcmk__primitive_assign: libvirtd:0 allocation score on s01-1: 1 pcmk__primitive_assign: libvirtd:1 allocation score on s01-0: 1 pcmk__primitive_assign: libvirtd:1 allocation score on s01-1: -INFINITY diff --git a/cts/scheduler/scores/coloc-with-inner-group-member.scores b/cts/scheduler/scores/coloc-with-inner-group-member.scores new file mode 100644 index 0000000..8d1c6f6 --- /dev/null +++ b/cts/scheduler/scores/coloc-with-inner-group-member.scores @@ -0,0 +1,46 @@ + +pcmk__group_assign: bar allocation score on rhel8-1: 0 +pcmk__group_assign: bar allocation score on rhel8-2: 0 +pcmk__group_assign: bar allocation score on rhel8-3: 0 +pcmk__group_assign: bar allocation score on rhel8-4: 0 +pcmk__group_assign: bar allocation score on rhel8-5: 0 +pcmk__group_assign: foo allocation score on rhel8-1: 0 +pcmk__group_assign: foo allocation score on rhel8-2: 0 +pcmk__group_assign: foo allocation score on rhel8-3: 0 +pcmk__group_assign: foo allocation score on rhel8-4: 0 +pcmk__group_assign: foo allocation score on rhel8-5: 0 +pcmk__group_assign: grp allocation score on rhel8-1: 0 +pcmk__group_assign: grp allocation score on rhel8-2: 0 +pcmk__group_assign: grp allocation score on rhel8-3: 0 +pcmk__group_assign: grp allocation score on rhel8-4: 0 +pcmk__group_assign: grp allocation score on rhel8-5: 0 +pcmk__group_assign: vip allocation score on rhel8-1: 0 +pcmk__group_assign: vip allocation score on rhel8-2: 0 +pcmk__group_assign: vip allocation score on rhel8-3: 0 +pcmk__group_assign: vip allocation score on rhel8-4: 0 +pcmk__group_assign: vip allocation score on rhel8-5: 0 +pcmk__primitive_assign: Fencing allocation score on rhel8-1: 0 +pcmk__primitive_assign: Fencing allocation score on rhel8-2: 0 +pcmk__primitive_assign: Fencing allocation score on rhel8-3: 0 +pcmk__primitive_assign: Fencing allocation score on rhel8-4: 0 +pcmk__primitive_assign: Fencing allocation score on rhel8-5: 0 +pcmk__primitive_assign: bar allocation score on rhel8-1: -INFINITY +pcmk__primitive_assign: bar allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: bar allocation score on rhel8-3: 0 +pcmk__primitive_assign: bar allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: bar allocation score on rhel8-5: -INFINITY +pcmk__primitive_assign: foo allocation score on rhel8-1: -INFINITY +pcmk__primitive_assign: foo allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: foo allocation score on rhel8-3: 0 +pcmk__primitive_assign: foo allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: foo allocation score on rhel8-5: -INFINITY +pcmk__primitive_assign: vip allocation score on rhel8-1: -INFINITY +pcmk__primitive_assign: vip allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: vip allocation score on rhel8-3: 0 +pcmk__primitive_assign: vip allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: vip allocation score on rhel8-5: -INFINITY +pcmk__primitive_assign: vip-dep allocation score on rhel8-1: 0 +pcmk__primitive_assign: vip-dep allocation score on rhel8-2: 0 +pcmk__primitive_assign: vip-dep allocation score on rhel8-3: 0 +pcmk__primitive_assign: vip-dep allocation score on rhel8-4: 0 +pcmk__primitive_assign: vip-dep allocation score on rhel8-5: 0 diff --git a/cts/scheduler/scores/colocate-primitive-with-clone.scores b/cts/scheduler/scores/colocate-primitive-with-clone.scores index 58b4556..62615f6 100644 --- a/cts/scheduler/scores/colocate-primitive-with-clone.scores +++ b/cts/scheduler/scores/colocate-primitive-with-clone.scores @@ -284,65 +284,65 @@ pcmk__primitive_assign: UmVIPcheck allocation score on srv02: -INFINITY pcmk__primitive_assign: UmVIPcheck allocation score on srv03: -INFINITY pcmk__primitive_assign: UmVIPcheck allocation score on srv04: 100 pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 106 +pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 106 pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 100 +pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: -2994 +pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: -2994 +pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 109 pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: -INFINITY pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: -INFINITY pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 106 +pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 106 pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 100 +pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: -2994 +pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: -2994 +pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 109 pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: -INFINITY pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: -INFINITY pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 106 +pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 106 pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 100 +pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: -2994 +pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: -2994 +pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 109 pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: -INFINITY pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 100 -pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: 0 +pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 106 +pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: -1000 pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv02: -INFINITY -pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 100 +pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 106 pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv04: -INFINITY pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv01: -INFINITY -pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: 0 -pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: 0 -pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 100 +pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: -2994 +pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: -2994 +pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 109 pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: -INFINITY pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: -INFINITY pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: -INFINITY diff --git a/cts/scheduler/scores/colocation-influence.scores b/cts/scheduler/scores/colocation-influence.scores index e15bdf5..2eb86ec 100644 --- a/cts/scheduler/scores/colocation-influence.scores +++ b/cts/scheduler/scores/colocation-influence.scores @@ -1,136 +1,136 @@ -pcmk__bundle_allocate: bundle10 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-2: 10 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-2: 10010 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-3: 10 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-3: 10010 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-0: -INFINITY -pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-0: 0 -pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-1: -INFINITY -pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-1: 0 -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-1: -INFINITY -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-2: -INFINITY -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-3: -INFINITY -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-4: -INFINITY -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-2: 10 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-2: 20 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-2: -INFINITY -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-3: 10 -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-3: 20 -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10 -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10 -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-1: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-2: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10 -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10 -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle11 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle11 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-1: 10 -pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle11-clone allocation score on bundle11-0: -INFINITY -pcmk__bundle_allocate: bundle11-clone allocation score on bundle11-1: -INFINITY -pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-5: 0 -pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-1: 10 -pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-1: 10 -pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-2: 0 -pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-5: -INFINITY -pcmk__bundle_allocate: bundle11a:0 allocation score on bundle11-0: 510 -pcmk__bundle_allocate: bundle11a:1 allocation score on bundle11-1: 500 -pcmk__bundle_allocate: httpd:0 allocation score on bundle10-0: 510 -pcmk__bundle_allocate: httpd:0 allocation score on bundle10-0: INFINITY -pcmk__bundle_allocate: httpd:1 allocation score on bundle10-1: 510 -pcmk__bundle_allocate: httpd:1 allocation score on bundle10-1: INFINITY +pcmk__bundle_assign: bundle10 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-2: 10 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-2: 10010 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle10-0 allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-3: 10 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-3: 10010 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle10-1 allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle10-clone allocation score on bundle10-0: -INFINITY +pcmk__bundle_assign: bundle10-clone allocation score on bundle10-0: 0 +pcmk__bundle_assign: bundle10-clone allocation score on bundle10-1: -INFINITY +pcmk__bundle_assign: bundle10-clone allocation score on bundle10-1: 0 +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-1: -INFINITY +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-2: -INFINITY +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-3: -INFINITY +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-4: -INFINITY +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-clone allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-2: 10 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-2: 20 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-2: -INFINITY +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-3: 10 +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-3: 20 +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10 +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10 +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-1: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-2: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10 +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10 +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle11 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle11 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle11-0 allocation score on rhel7-1: 10 +pcmk__bundle_assign: bundle11-0 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11-0 allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle11-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle11-1 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11-1 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11-1 allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle11-clone allocation score on bundle11-0: -INFINITY +pcmk__bundle_assign: bundle11-clone allocation score on bundle11-1: -INFINITY +pcmk__bundle_assign: bundle11-clone allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle11-clone allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11-clone allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11-clone allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11-clone allocation score on rhel7-5: 0 +pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-1: 10 +pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-1: 10 +pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-1: 0 +pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-2: 0 +pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-3: 0 +pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-4: 0 +pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-5: -INFINITY +pcmk__bundle_assign: bundle11a:0 allocation score on bundle11-0: 510 +pcmk__bundle_assign: bundle11a:1 allocation score on bundle11-1: 500 +pcmk__bundle_assign: httpd:0 allocation score on bundle10-0: 510 +pcmk__bundle_assign: httpd:0 allocation score on bundle10-0: INFINITY +pcmk__bundle_assign: httpd:1 allocation score on bundle10-1: 510 +pcmk__bundle_assign: httpd:1 allocation score on bundle10-1: INFINITY pcmk__clone_assign: bundle10-clone allocation score on bundle10-0: 0 pcmk__clone_assign: bundle10-clone allocation score on bundle10-1: 0 pcmk__clone_assign: bundle10-clone allocation score on rhel7-1: -INFINITY diff --git a/cts/scheduler/scores/complex_enforce_colo.scores b/cts/scheduler/scores/complex_enforce_colo.scores index 9968e10..a5d0b2b 100644 --- a/cts/scheduler/scores/complex_enforce_colo.scores +++ b/cts/scheduler/scores/complex_enforce_colo.scores @@ -588,13 +588,22 @@ pcmk__primitive_assign: horizon:2 allocation score on rhos6-node1: -INFINITY pcmk__primitive_assign: horizon:2 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: horizon:2 allocation score on rhos6-node3: 1 pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node1: 1 pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node2: 0 diff --git a/cts/scheduler/scores/enforce-colo1.scores b/cts/scheduler/scores/enforce-colo1.scores index 8194789..262cbd9 100644 --- a/cts/scheduler/scores/enforce-colo1.scores +++ b/cts/scheduler/scores/enforce-colo1.scores @@ -18,13 +18,22 @@ pcmk__primitive_assign: engine allocation score on rhel7-auto1: -INFINITY pcmk__primitive_assign: engine allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: engine allocation score on rhel7-auto3: 0 pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY +pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY +pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY +pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY pcmk__primitive_assign: shooter allocation score on rhel7-auto1: 0 pcmk__primitive_assign: shooter allocation score on rhel7-auto2: 0 diff --git a/cts/scheduler/scores/group-anticolocation-2.scores b/cts/scheduler/scores/group-anticolocation-2.scores new file mode 100644 index 0000000..ab0a4c9 --- /dev/null +++ b/cts/scheduler/scores/group-anticolocation-2.scores @@ -0,0 +1,23 @@ + +pcmk__group_assign: group1 allocation score on node1: 0 +pcmk__group_assign: group1 allocation score on node2: 0 +pcmk__group_assign: group2 allocation score on node1: 0 +pcmk__group_assign: group2 allocation score on node2: 0 +pcmk__group_assign: member1a allocation score on node1: 0 +pcmk__group_assign: member1a allocation score on node2: INFINITY +pcmk__group_assign: member1b allocation score on node1: 0 +pcmk__group_assign: member1b allocation score on node2: INFINITY +pcmk__group_assign: member2a allocation score on node1: INFINITY +pcmk__group_assign: member2a allocation score on node2: 0 +pcmk__group_assign: member2b allocation score on node1: -INFINITY +pcmk__group_assign: member2b allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node1: INFINITY +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: member1a allocation score on node1: -5000 +pcmk__primitive_assign: member1a allocation score on node2: INFINITY +pcmk__primitive_assign: member1b allocation score on node1: -INFINITY +pcmk__primitive_assign: member1b allocation score on node2: INFINITY +pcmk__primitive_assign: member2a allocation score on node1: -INFINITY +pcmk__primitive_assign: member2a allocation score on node2: 0 +pcmk__primitive_assign: member2b allocation score on node1: -INFINITY +pcmk__primitive_assign: member2b allocation score on node2: 0 diff --git a/cts/scheduler/scores/group-anticolocation-3.scores b/cts/scheduler/scores/group-anticolocation-3.scores new file mode 100644 index 0000000..5b2b8e4 --- /dev/null +++ b/cts/scheduler/scores/group-anticolocation-3.scores @@ -0,0 +1,23 @@ + +pcmk__group_assign: group1 allocation score on node1: 0 +pcmk__group_assign: group1 allocation score on node2: 0 +pcmk__group_assign: group2 allocation score on node1: 0 +pcmk__group_assign: group2 allocation score on node2: 0 +pcmk__group_assign: member1a allocation score on node1: 0 +pcmk__group_assign: member1a allocation score on node2: INFINITY +pcmk__group_assign: member1b allocation score on node1: 0 +pcmk__group_assign: member1b allocation score on node2: INFINITY +pcmk__group_assign: member2a allocation score on node1: INFINITY +pcmk__group_assign: member2a allocation score on node2: 0 +pcmk__group_assign: member2b allocation score on node1: -INFINITY +pcmk__group_assign: member2b allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node1: INFINITY +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: member1a allocation score on node1: -INFINITY +pcmk__primitive_assign: member1a allocation score on node2: INFINITY +pcmk__primitive_assign: member1b allocation score on node1: -INFINITY +pcmk__primitive_assign: member1b allocation score on node2: INFINITY +pcmk__primitive_assign: member2a allocation score on node1: INFINITY +pcmk__primitive_assign: member2a allocation score on node2: -INFINITY +pcmk__primitive_assign: member2b allocation score on node1: -INFINITY +pcmk__primitive_assign: member2b allocation score on node2: -INFINITY diff --git a/cts/scheduler/scores/group-anticolocation-4.scores b/cts/scheduler/scores/group-anticolocation-4.scores new file mode 100644 index 0000000..4449511 --- /dev/null +++ b/cts/scheduler/scores/group-anticolocation-4.scores @@ -0,0 +1,23 @@ + +pcmk__group_assign: group1 allocation score on node1: 0 +pcmk__group_assign: group1 allocation score on node2: 0 +pcmk__group_assign: group2 allocation score on node1: 0 +pcmk__group_assign: group2 allocation score on node2: 0 +pcmk__group_assign: member1a allocation score on node1: 0 +pcmk__group_assign: member1a allocation score on node2: 0 +pcmk__group_assign: member1b allocation score on node1: 0 +pcmk__group_assign: member1b allocation score on node2: 0 +pcmk__group_assign: member2a allocation score on node1: 0 +pcmk__group_assign: member2a allocation score on node2: 0 +pcmk__group_assign: member2b allocation score on node1: -INFINITY +pcmk__group_assign: member2b allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: member1a allocation score on node1: 0 +pcmk__primitive_assign: member1a allocation score on node2: 0 +pcmk__primitive_assign: member1b allocation score on node1: -INFINITY +pcmk__primitive_assign: member1b allocation score on node2: 0 +pcmk__primitive_assign: member2a allocation score on node1: -INFINITY +pcmk__primitive_assign: member2a allocation score on node2: 0 +pcmk__primitive_assign: member2b allocation score on node1: -INFINITY +pcmk__primitive_assign: member2b allocation score on node2: 0 diff --git a/cts/scheduler/scores/group-anticolocation-5.scores b/cts/scheduler/scores/group-anticolocation-5.scores new file mode 100644 index 0000000..2af165f --- /dev/null +++ b/cts/scheduler/scores/group-anticolocation-5.scores @@ -0,0 +1,34 @@ + +pcmk__group_assign: group1 allocation score on node1: 0 +pcmk__group_assign: group1 allocation score on node2: 0 +pcmk__group_assign: group1 allocation score on node3: 0 +pcmk__group_assign: group2 allocation score on node1: 0 +pcmk__group_assign: group2 allocation score on node2: 0 +pcmk__group_assign: group2 allocation score on node3: 0 +pcmk__group_assign: member1a allocation score on node1: 0 +pcmk__group_assign: member1a allocation score on node2: 0 +pcmk__group_assign: member1a allocation score on node3: 0 +pcmk__group_assign: member1b allocation score on node1: 0 +pcmk__group_assign: member1b allocation score on node2: 0 +pcmk__group_assign: member1b allocation score on node3: 0 +pcmk__group_assign: member2a allocation score on node1: 0 +pcmk__group_assign: member2a allocation score on node2: 0 +pcmk__group_assign: member2a allocation score on node3: 0 +pcmk__group_assign: member2b allocation score on node1: -INFINITY +pcmk__group_assign: member2b allocation score on node2: 0 +pcmk__group_assign: member2b allocation score on node3: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: member1a allocation score on node1: 0 +pcmk__primitive_assign: member1a allocation score on node2: 0 +pcmk__primitive_assign: member1a allocation score on node3: 0 +pcmk__primitive_assign: member1b allocation score on node1: -INFINITY +pcmk__primitive_assign: member1b allocation score on node2: 0 +pcmk__primitive_assign: member1b allocation score on node3: -INFINITY +pcmk__primitive_assign: member2a allocation score on node1: -INFINITY +pcmk__primitive_assign: member2a allocation score on node2: -5000 +pcmk__primitive_assign: member2a allocation score on node3: 0 +pcmk__primitive_assign: member2b allocation score on node1: -INFINITY +pcmk__primitive_assign: member2b allocation score on node2: -INFINITY +pcmk__primitive_assign: member2b allocation score on node3: 0 diff --git a/cts/scheduler/scores/group-anticolocation.scores b/cts/scheduler/scores/group-anticolocation.scores index 4449511..5d38fa1 100644 --- a/cts/scheduler/scores/group-anticolocation.scores +++ b/cts/scheduler/scores/group-anticolocation.scores @@ -13,10 +13,10 @@ pcmk__group_assign: member2b allocation score on node1: -INFINITY pcmk__group_assign: member2b allocation score on node2: 0 pcmk__primitive_assign: Fencing allocation score on node1: 0 pcmk__primitive_assign: Fencing allocation score on node2: 0 -pcmk__primitive_assign: member1a allocation score on node1: 0 +pcmk__primitive_assign: member1a allocation score on node1: 5000 pcmk__primitive_assign: member1a allocation score on node2: 0 -pcmk__primitive_assign: member1b allocation score on node1: -INFINITY -pcmk__primitive_assign: member1b allocation score on node2: 0 +pcmk__primitive_assign: member1b allocation score on node1: 5000 +pcmk__primitive_assign: member1b allocation score on node2: -INFINITY pcmk__primitive_assign: member2a allocation score on node1: -INFINITY pcmk__primitive_assign: member2a allocation score on node2: 0 pcmk__primitive_assign: member2b allocation score on node1: -INFINITY diff --git a/cts/scheduler/scores/group-dependents.scores b/cts/scheduler/scores/group-dependents.scores index 22ed3e5..ece06a3 100644 --- a/cts/scheduler/scores/group-dependents.scores +++ b/cts/scheduler/scores/group-dependents.scores @@ -57,14 +57,16 @@ pcmk__primitive_assign: asterisk allocation score on asttest1: -INFINITY pcmk__primitive_assign: asterisk allocation score on asttest2: 0 pcmk__primitive_assign: dahdi allocation score on asttest1: -INFINITY pcmk__primitive_assign: dahdi allocation score on asttest2: 0 -pcmk__primitive_assign: drbd:0 allocation score on asttest1: 6 -pcmk__primitive_assign: drbd:0 allocation score on asttest2: 0 +pcmk__primitive_assign: drbd:0 allocation score on asttest1: -INFINITY +pcmk__primitive_assign: drbd:0 allocation score on asttest1: 8 +pcmk__primitive_assign: drbd:0 allocation score on asttest2: -INFINITY +pcmk__primitive_assign: drbd:0 allocation score on asttest2: 1 pcmk__primitive_assign: drbd:1 allocation score on asttest1: -INFINITY -pcmk__primitive_assign: drbd:1 allocation score on asttest2: 6 +pcmk__primitive_assign: drbd:1 allocation score on asttest2: 7 pcmk__primitive_assign: fonulator allocation score on asttest1: -INFINITY pcmk__primitive_assign: fonulator allocation score on asttest2: 0 pcmk__primitive_assign: fs_drbd allocation score on asttest1: -INFINITY -pcmk__primitive_assign: fs_drbd allocation score on asttest2: 7 +pcmk__primitive_assign: fs_drbd allocation score on asttest2: 8 pcmk__primitive_assign: httpd allocation score on asttest1: -INFINITY pcmk__primitive_assign: httpd allocation score on asttest2: 0 pcmk__primitive_assign: iax2_mon allocation score on asttest1: -INFINITY diff --git a/cts/scheduler/scores/guest-host-not-fenceable.scores b/cts/scheduler/scores/guest-host-not-fenceable.scores index e4c7fc2..21f5daa 100644 --- a/cts/scheduler/scores/guest-host-not-fenceable.scores +++ b/cts/scheduler/scores/guest-host-not-fenceable.scores @@ -1,67 +1,67 @@ galera:0 promotion score on galera-bundle-0: 100 -galera:1 promotion score on galera-bundle-1: 100 +galera:1 promotion score on galera-bundle-1: -1 galera:2 promotion score on galera-bundle-2: -1 -pcmk__bundle_allocate: galera-bundle allocation score on node1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on node2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on node3: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on node1: INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on node2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on node3: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on node1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on node2: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on node3: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on node1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on node2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on node3: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on node1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on node2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on node3: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on node1: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on node2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on node3: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on node1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on node2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on node3: INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on node1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on node2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on node3: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on node1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on node2: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on node3: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on node1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on node2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on node3: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on node1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on node2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on node3: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on node1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on node2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on node3: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on node1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on node2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on node3: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on node1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on node2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on node3: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on node1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on node2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on node3: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on node1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on node2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on node3: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on node1: 0 +pcmk__bundle_assign: galera-bundle allocation score on node2: 0 +pcmk__bundle_assign: galera-bundle allocation score on node3: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on node1: INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on node2: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on node3: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on node1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on node2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on node3: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on node1: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on node2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on node3: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on node1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on node2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on node3: INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on node1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on node2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on node3: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on node1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on node2: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on node3: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on node1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on node2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on node3: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on node1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on node2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on node3: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on node1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on node2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on node3: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on node1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on node2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on node3: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on node1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on node2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on node3: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on node1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on node2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on node3: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on node1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on node2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on node3: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0 diff --git a/cts/scheduler/scores/load-stopped-loop-2.scores b/cts/scheduler/scores/load-stopped-loop-2.scores index 0b28a72..704ca03 100644 --- a/cts/scheduler/scores/load-stopped-loop-2.scores +++ b/cts/scheduler/scores/load-stopped-loop-2.scores @@ -42,7 +42,7 @@ pcmk__clone_assign: p_glusterd:3 allocation score on xfc3: 0 pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc0: -INFINITY pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc1: -INFINITY pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc2: -INFINITY -pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc3: 1 +pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc3: INFINITY pcmk__primitive_assign: p_bl_glusterfs:1 allocation score on xfc0: 1 pcmk__primitive_assign: p_bl_glusterfs:1 allocation score on xfc1: -INFINITY pcmk__primitive_assign: p_bl_glusterfs:1 allocation score on xfc2: -INFINITY @@ -58,7 +58,7 @@ pcmk__primitive_assign: p_bl_glusterfs:3 allocation score on xfc3: -INFINITY pcmk__primitive_assign: p_glusterd:0 allocation score on xfc0: 0 pcmk__primitive_assign: p_glusterd:0 allocation score on xfc1: 0 pcmk__primitive_assign: p_glusterd:0 allocation score on xfc2: 0 -pcmk__primitive_assign: p_glusterd:0 allocation score on xfc3: 1 +pcmk__primitive_assign: p_glusterd:0 allocation score on xfc3: INFINITY pcmk__primitive_assign: p_glusterd:1 allocation score on xfc0: 1 pcmk__primitive_assign: p_glusterd:1 allocation score on xfc1: 0 pcmk__primitive_assign: p_glusterd:1 allocation score on xfc2: 0 diff --git a/cts/scheduler/scores/load-stopped-loop.scores b/cts/scheduler/scores/load-stopped-loop.scores index 1a35119..17f8b48 100644 --- a/cts/scheduler/scores/load-stopped-loop.scores +++ b/cts/scheduler/scores/load-stopped-loop.scores @@ -881,8 +881,14 @@ pcmk__primitive_assign: dist.express-consult.org-vm allocation score on v03-b: - pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-a: -INFINITY pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-b: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on mgmt01: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on mgmt01: 1 +pcmk__primitive_assign: dlm:0 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on v03-a: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on v03-a: 0 +pcmk__primitive_assign: dlm:0 allocation score on v03-b: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on v03-b: 0 pcmk__primitive_assign: dlm:0 allocation score on v03-b: 0 pcmk__primitive_assign: dlm:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: dlm:1 allocation score on v03-a: 0 @@ -932,8 +938,14 @@ pcmk__primitive_assign: gw.gleb.vds-ok.com-vm allocation score on v03-b: -INFINI pcmk__primitive_assign: gw.gotin.vds-ok.com-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: gw.gotin.vds-ok.com-vm allocation score on v03-a: -INFINITY pcmk__primitive_assign: gw.gotin.vds-ok.com-vm allocation score on v03-b: -INFINITY +pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: -INFINITY +pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: 1 +pcmk__primitive_assign: iscsid:0 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: iscsid:0 allocation score on v03-a: -INFINITY pcmk__primitive_assign: iscsid:0 allocation score on v03-a: 0 +pcmk__primitive_assign: iscsid:0 allocation score on v03-b: -INFINITY +pcmk__primitive_assign: iscsid:0 allocation score on v03-b: 0 pcmk__primitive_assign: iscsid:0 allocation score on v03-b: 0 pcmk__primitive_assign: iscsid:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: iscsid:1 allocation score on v03-a: 0 @@ -965,8 +977,14 @@ pcmk__primitive_assign: ktstudio.net-vm allocation score on v03-b: 0 pcmk__primitive_assign: lenny-x32-devel-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-a: 0 pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-b: 0 +pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: -INFINITY +pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: 1 +pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: -INFINITY pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: 0 +pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: -INFINITY +pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: 0 pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: 0 pcmk__primitive_assign: libvirt-images-fs:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-a: 0 @@ -1017,7 +1035,13 @@ pcmk__primitive_assign: libvirt-images-pool:7 allocation score on mgmt01: -INFIN pcmk__primitive_assign: libvirt-images-pool:7 allocation score on v03-a: -INFINITY pcmk__primitive_assign: libvirt-images-pool:7 allocation score on v03-b: -INFINITY pcmk__primitive_assign: libvirt-install-fs:0 allocation score on mgmt01: 1 +pcmk__primitive_assign: libvirt-install-fs:0 allocation score on mgmt01: 1 +pcmk__primitive_assign: libvirt-install-fs:0 allocation score on mgmt01: 1 +pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-a: -INFINITY pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-a: 0 +pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-b: -INFINITY +pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-b: 0 pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-b: 0 pcmk__primitive_assign: libvirt-install-fs:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: libvirt-install-fs:1 allocation score on v03-a: 0 @@ -1196,8 +1220,14 @@ pcmk__primitive_assign: mcast-test-net:7 allocation score on v03-b: -INFINITY pcmk__primitive_assign: metae.ru-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: metae.ru-vm allocation score on v03-a: -INFINITY pcmk__primitive_assign: metae.ru-vm allocation score on v03-b: -INFINITY +pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: -INFINITY +pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: 1 +pcmk__primitive_assign: multipathd:0 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: multipathd:0 allocation score on v03-a: -INFINITY pcmk__primitive_assign: multipathd:0 allocation score on v03-a: 0 +pcmk__primitive_assign: multipathd:0 allocation score on v03-b: -INFINITY +pcmk__primitive_assign: multipathd:0 allocation score on v03-b: 0 pcmk__primitive_assign: multipathd:0 allocation score on v03-b: 0 pcmk__primitive_assign: multipathd:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: multipathd:1 allocation score on v03-a: 0 diff --git a/cts/scheduler/scores/migrate-begin.scores b/cts/scheduler/scores/migrate-begin.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-begin.scores +++ b/cts/scheduler/scores/migrate-begin.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-2.scores b/cts/scheduler/scores/migrate-fail-2.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-fail-2.scores +++ b/cts/scheduler/scores/migrate-fail-2.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-3.scores b/cts/scheduler/scores/migrate-fail-3.scores index 159b82b..b75abc0 100644 --- a/cts/scheduler/scores/migrate-fail-3.scores +++ b/cts/scheduler/scores/migrate-fail-3.scores @@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2 +pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY pcmk__primitive_assign: test-vm allocation score on hex-13: 1 pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-4.scores b/cts/scheduler/scores/migrate-fail-4.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-fail-4.scores +++ b/cts/scheduler/scores/migrate-fail-4.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-5.scores b/cts/scheduler/scores/migrate-fail-5.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-fail-5.scores +++ b/cts/scheduler/scores/migrate-fail-5.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-6.scores b/cts/scheduler/scores/migrate-fail-6.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-fail-6.scores +++ b/cts/scheduler/scores/migrate-fail-6.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-7.scores b/cts/scheduler/scores/migrate-fail-7.scores index 159b82b..b75abc0 100644 --- a/cts/scheduler/scores/migrate-fail-7.scores +++ b/cts/scheduler/scores/migrate-fail-7.scores @@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2 +pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY pcmk__primitive_assign: test-vm allocation score on hex-13: 1 pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-8.scores b/cts/scheduler/scores/migrate-fail-8.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-fail-8.scores +++ b/cts/scheduler/scores/migrate-fail-8.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-fail-9.scores b/cts/scheduler/scores/migrate-fail-9.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-fail-9.scores +++ b/cts/scheduler/scores/migrate-fail-9.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-partial-1.scores b/cts/scheduler/scores/migrate-partial-1.scores index 159b82b..b75abc0 100644 --- a/cts/scheduler/scores/migrate-partial-1.scores +++ b/cts/scheduler/scores/migrate-partial-1.scores @@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2 +pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY pcmk__primitive_assign: test-vm allocation score on hex-13: 1 pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-partial-2.scores b/cts/scheduler/scores/migrate-partial-2.scores index 4763646..7d0c5c3 100644 --- a/cts/scheduler/scores/migrate-partial-2.scores +++ b/cts/scheduler/scores/migrate-partial-2.scores @@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0 pcmk__clone_assign: dlm:0 allocation score on hex-14: 1 pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/migrate-partial-3.scores b/cts/scheduler/scores/migrate-partial-3.scores index cfcd402..cec2f31 100644 --- a/cts/scheduler/scores/migrate-partial-3.scores +++ b/cts/scheduler/scores/migrate-partial-3.scores @@ -11,9 +11,12 @@ pcmk__clone_assign: dlm:1 allocation score on hex-15: 0 pcmk__clone_assign: dlm:2 allocation score on hex-13: 0 pcmk__clone_assign: dlm:2 allocation score on hex-14: 0 pcmk__clone_assign: dlm:2 allocation score on hex-15: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0 +pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 pcmk__primitive_assign: dlm:0 allocation score on hex-15: -INFINITY +pcmk__primitive_assign: dlm:0 allocation score on hex-15: -INFINITY pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY pcmk__primitive_assign: dlm:1 allocation score on hex-15: -INFINITY diff --git a/cts/scheduler/scores/migrate-start-complex.scores b/cts/scheduler/scores/migrate-start-complex.scores index 31f46d3..859664c 100644 --- a/cts/scheduler/scores/migrate-start-complex.scores +++ b/cts/scheduler/scores/migrate-start-complex.scores @@ -15,21 +15,27 @@ pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-01: 0 pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-02: 0 pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-01: 0 pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-02: 0 +pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000 +pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000 pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY +pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: -INFINITY pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0 +pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0 pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-02: -INFINITY -pcmk__primitive_assign: bottom:0 allocation score on dom0-01: 0 -pcmk__primitive_assign: bottom:0 allocation score on dom0-02: 0 +pcmk__primitive_assign: bottom:0 allocation score on dom0-01: INFINITY +pcmk__primitive_assign: bottom:0 allocation score on dom0-02: 10000 pcmk__primitive_assign: bottom:1 allocation score on dom0-01: -INFINITY -pcmk__primitive_assign: bottom:1 allocation score on dom0-02: 0 -pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0 -pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000 -pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0 +pcmk__primitive_assign: bottom:1 allocation score on dom0-02: 10000 +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: INFINITY +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 15000 +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 15000 +pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: INFINITY pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY pcmk__primitive_assign: domU-test01 allocation score on dom0-01: INFINITY pcmk__primitive_assign: domU-test01 allocation score on dom0-02: 10000 diff --git a/cts/scheduler/scores/migrate-start.scores b/cts/scheduler/scores/migrate-start.scores index 277e152..3cc2f29 100644 --- a/cts/scheduler/scores/migrate-start.scores +++ b/cts/scheduler/scores/migrate-start.scores @@ -9,17 +9,23 @@ pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-01: 0 pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-02: 0 pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-01: 0 pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-02: 0 +pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000 +pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000 pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY +pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: -INFINITY pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0 +pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0 pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-02: -INFINITY -pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0 -pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000 -pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0 +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: INFINITY +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 10000 +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 10000 +pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: INFINITY pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY pcmk__primitive_assign: domU-test01 allocation score on dom0-01: INFINITY pcmk__primitive_assign: domU-test01 allocation score on dom0-02: 5000 diff --git a/cts/scheduler/scores/migrate-stop-start-complex.scores b/cts/scheduler/scores/migrate-stop-start-complex.scores index 78a5dc3..62ff5f2 100644 --- a/cts/scheduler/scores/migrate-stop-start-complex.scores +++ b/cts/scheduler/scores/migrate-stop-start-complex.scores @@ -23,11 +23,11 @@ pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: 0 pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: -INFINITY pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-01: -INFINITY pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-02: -INFINITY -pcmk__primitive_assign: bottom:0 allocation score on dom0-01: 0 +pcmk__primitive_assign: bottom:0 allocation score on dom0-01: 5000 pcmk__primitive_assign: bottom:0 allocation score on dom0-02: -INFINITY pcmk__primitive_assign: bottom:1 allocation score on dom0-01: -INFINITY pcmk__primitive_assign: bottom:1 allocation score on dom0-02: -INFINITY -pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 5000 +pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 10000 pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: -INFINITY pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: -INFINITY pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY diff --git a/cts/scheduler/scores/migrate-success.scores b/cts/scheduler/scores/migrate-success.scores index 159b82b..b75abc0 100644 --- a/cts/scheduler/scores/migrate-success.scores +++ b/cts/scheduler/scores/migrate-success.scores @@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1 pcmk__clone_assign: dlm:1 allocation score on hex-14: 0 pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1 -pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0 +pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2 +pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY pcmk__primitive_assign: test-vm allocation score on hex-13: 1 pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY diff --git a/cts/scheduler/scores/nested-remote-recovery.scores b/cts/scheduler/scores/nested-remote-recovery.scores index bfbd8ba..e872849 100644 --- a/cts/scheduler/scores/nested-remote-recovery.scores +++ b/cts/scheduler/scores/nested-remote-recovery.scores @@ -2,330 +2,330 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-0: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-0: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY diff --git a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores index 7923cdc..12f4c7f 100644 --- a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores +++ b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores @@ -2,135 +2,135 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5 +ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1 ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY -pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY -pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY +pcmk__bundle_assign: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY +pcmk__bundle_assign: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY diff --git a/cts/scheduler/scores/node-pending-timeout.scores b/cts/scheduler/scores/node-pending-timeout.scores new file mode 100644 index 0000000..90a7c8b --- /dev/null +++ b/cts/scheduler/scores/node-pending-timeout.scores @@ -0,0 +1,3 @@ + +pcmk__primitive_assign: st-sbd allocation score on node-1: 0 +pcmk__primitive_assign: st-sbd allocation score on node-2: 0 diff --git a/cts/scheduler/scores/notifs-for-unrunnable.scores b/cts/scheduler/scores/notifs-for-unrunnable.scores index dd823a4..95d0f7b 100644 --- a/cts/scheduler/scores/notifs-for-unrunnable.scores +++ b/cts/scheduler/scores/notifs-for-unrunnable.scores @@ -2,120 +2,120 @@ galera:0 promotion score on galera-bundle-0: -1 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500 -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500 +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500 +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY @@ -169,7 +169,7 @@ pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-1 pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY diff --git a/cts/scheduler/scores/notify-behind-stopping-remote.scores b/cts/scheduler/scores/notify-behind-stopping-remote.scores index 015404a..17a5ca9 100644 --- a/cts/scheduler/scores/notify-behind-stopping-remote.scores +++ b/cts/scheduler/scores/notify-behind-stopping-remote.scores @@ -1,34 +1,34 @@ -pcmk__bundle_allocate: redis-bundle allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on ra2: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on ra2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on ra2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on ra2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on ra2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on ra2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on ra2: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on ra1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on ra2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on ra3: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 500 -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 +pcmk__bundle_assign: redis-bundle allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle allocation score on ra2: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on ra2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on ra2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on ra2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on ra2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on ra2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on ra2: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on ra1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on ra2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on ra3: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501 +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 500 +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501 pcmk__clone_assign: redis-bundle-master allocation score on ra1: -INFINITY pcmk__clone_assign: redis-bundle-master allocation score on ra2: -INFINITY pcmk__clone_assign: redis-bundle-master allocation score on ra3: -INFINITY diff --git a/cts/scheduler/scores/novell-239087.scores b/cts/scheduler/scores/novell-239087.scores index 7da4f35..34dbc1d 100644 --- a/cts/scheduler/scores/novell-239087.scores +++ b/cts/scheduler/scores/novell-239087.scores @@ -9,7 +9,7 @@ pcmk__clone_assign: ms-drbd0 allocation score on xen-1: 100 pcmk__clone_assign: ms-drbd0 allocation score on xen-2: 0 pcmk__primitive_assign: drbd0:0 allocation score on xen-1: -INFINITY pcmk__primitive_assign: drbd0:0 allocation score on xen-2: 110 -pcmk__primitive_assign: drbd0:1 allocation score on xen-1: 110 +pcmk__primitive_assign: drbd0:1 allocation score on xen-1: 210 pcmk__primitive_assign: drbd0:1 allocation score on xen-2: 0 -pcmk__primitive_assign: fs_1 allocation score on xen-1: 210 +pcmk__primitive_assign: fs_1 allocation score on xen-1: 310 pcmk__primitive_assign: fs_1 allocation score on xen-2: -INFINITY diff --git a/cts/scheduler/scores/on_fail_demote1.scores b/cts/scheduler/scores/on_fail_demote1.scores index 8810211..2a22478 100644 --- a/cts/scheduler/scores/on_fail_demote1.scores +++ b/cts/scheduler/scores/on_fail_demote1.scores @@ -4,89 +4,89 @@ bundled:1 promotion score on stateful-bundle-1: 5 bundled:2 promotion score on stateful-bundle-2: 5 lxc-ms:0 promotion score on lxc2: INFINITY lxc-ms:1 promotion score on lxc1: INFINITY -pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 -pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 -pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY +pcmk__bundle_assign: bundled:0 allocation score on stateful-bundle-0: 501 +pcmk__bundle_assign: bundled:1 allocation score on stateful-bundle-1: 501 +pcmk__bundle_assign: bundled:2 allocation score on stateful-bundle-2: 501 +pcmk__bundle_assign: stateful-bundle allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc1: -INFINITY +pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc2: -INFINITY +pcmk__bundle_assign: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc1: -INFINITY +pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc2: -INFINITY +pcmk__bundle_assign: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc1: -INFINITY +pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc2: -INFINITY +pcmk__bundle_assign: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY +pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY +pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY pcmk__clone_assign: bundled:0 allocation score on stateful-bundle-0: INFINITY pcmk__clone_assign: bundled:1 allocation score on stateful-bundle-1: INFINITY pcmk__clone_assign: bundled:2 allocation score on stateful-bundle-2: INFINITY diff --git a/cts/scheduler/scores/on_fail_demote4.scores b/cts/scheduler/scores/on_fail_demote4.scores index cff13e7..b4896e1 100644 --- a/cts/scheduler/scores/on_fail_demote4.scores +++ b/cts/scheduler/scores/on_fail_demote4.scores @@ -4,89 +4,89 @@ bundled:1 promotion score on stateful-bundle-1: 5 bundled:2 promotion score on stateful-bundle-2: 5 lxc-ms:0 promotion score on lxc2: INFINITY lxc-ms:1 promotion score on lxc1: INFINITY -pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 -pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 -pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY +pcmk__bundle_assign: bundled:0 allocation score on stateful-bundle-0: 501 +pcmk__bundle_assign: bundled:1 allocation score on stateful-bundle-1: 501 +pcmk__bundle_assign: bundled:2 allocation score on stateful-bundle-2: 501 +pcmk__bundle_assign: stateful-bundle allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc1: -INFINITY +pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc2: -INFINITY +pcmk__bundle_assign: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc1: -INFINITY +pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc2: -INFINITY +pcmk__bundle_assign: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc1: -INFINITY +pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc2: -INFINITY +pcmk__bundle_assign: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on lxc1: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on lxc2: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on remote-rhel7-2: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-1: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-3: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-4: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-5: 0 +pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY +pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY +pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY pcmk__clone_assign: bundled:0 allocation score on stateful-bundle-0: INFINITY pcmk__clone_assign: bundled:1 allocation score on stateful-bundle-1: INFINITY pcmk__clone_assign: bundled:2 allocation score on stateful-bundle-2: INFINITY diff --git a/cts/scheduler/scores/order-expired-failure.scores b/cts/scheduler/scores/order-expired-failure.scores index 1605ec0..a2fe598 100644 --- a/cts/scheduler/scores/order-expired-failure.scores +++ b/cts/scheduler/scores/order-expired-failure.scores @@ -2,194 +2,194 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-1: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501 -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 +pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-1: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501 +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501 +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-1: -INFINITY pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-2: -INFINITY diff --git a/cts/scheduler/scores/params-6.scores b/cts/scheduler/scores/params-6.scores index 39d172c..b6d28af 100644 --- a/cts/scheduler/scores/params-6.scores +++ b/cts/scheduler/scores/params-6.scores @@ -827,11 +827,14 @@ pcmk__primitive_assign: dist.express-consult.org-vm allocation score on v03-b: - pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-a: -INFINITY pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-b: -INFINITY -pcmk__primitive_assign: dlm:0 allocation score on mgmt01: 0 +pcmk__primitive_assign: dlm:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: dlm:0 allocation score on v03-a: 1 pcmk__primitive_assign: dlm:0 allocation score on v03-b: 0 +pcmk__primitive_assign: dlm:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: dlm:1 allocation score on mgmt01: 1 pcmk__primitive_assign: dlm:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: dlm:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: dlm:1 allocation score on v03-b: -INFINITY pcmk__primitive_assign: dlm:1 allocation score on v03-b: 0 pcmk__primitive_assign: dlm:2 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: dlm:2 allocation score on v03-a: -INFINITY @@ -869,11 +872,14 @@ pcmk__primitive_assign: git.vds-ok.com-vm allocation score on v03-b: -INFINITY pcmk__primitive_assign: gotin-bbb-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: gotin-bbb-vm allocation score on v03-a: -INFINITY pcmk__primitive_assign: gotin-bbb-vm allocation score on v03-b: -INFINITY -pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: 0 +pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: iscsid:0 allocation score on v03-a: 1 pcmk__primitive_assign: iscsid:0 allocation score on v03-b: 0 +pcmk__primitive_assign: iscsid:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: iscsid:1 allocation score on mgmt01: 1 pcmk__primitive_assign: iscsid:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: iscsid:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: iscsid:1 allocation score on v03-b: -INFINITY pcmk__primitive_assign: iscsid:1 allocation score on v03-b: 0 pcmk__primitive_assign: iscsid:2 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: iscsid:2 allocation score on v03-a: -INFINITY @@ -899,11 +905,14 @@ pcmk__primitive_assign: iscsid:8 allocation score on v03-b: -INFINITY pcmk__primitive_assign: lenny-x32-devel-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-a: 0 pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-b: 0 -pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: 0 +pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: 1 pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: 0 +pcmk__primitive_assign: libvirt-images-fs:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: libvirt-images-fs:1 allocation score on mgmt01: 1 pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-b: -INFINITY pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-b: 0 pcmk__primitive_assign: libvirt-images-fs:2 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: libvirt-images-fs:2 allocation score on v03-a: -INFINITY @@ -1055,11 +1064,14 @@ pcmk__primitive_assign: mcast-test-net:7 allocation score on v03-b: -INFINITY pcmk__primitive_assign: metae.ru-vm allocation score on mgmt01: -INFINITY pcmk__primitive_assign: metae.ru-vm allocation score on v03-a: -INFINITY pcmk__primitive_assign: metae.ru-vm allocation score on v03-b: -INFINITY -pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: 0 +pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: multipathd:0 allocation score on v03-a: 1 pcmk__primitive_assign: multipathd:0 allocation score on v03-b: 0 +pcmk__primitive_assign: multipathd:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: multipathd:1 allocation score on mgmt01: 1 pcmk__primitive_assign: multipathd:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: multipathd:1 allocation score on v03-a: -INFINITY +pcmk__primitive_assign: multipathd:1 allocation score on v03-b: -INFINITY pcmk__primitive_assign: multipathd:1 allocation score on v03-b: 0 pcmk__primitive_assign: multipathd:2 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: multipathd:2 allocation score on v03-a: -INFINITY @@ -1083,7 +1095,10 @@ pcmk__primitive_assign: multipathd:8 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: multipathd:8 allocation score on v03-a: -INFINITY pcmk__primitive_assign: multipathd:8 allocation score on v03-b: -INFINITY pcmk__primitive_assign: node-params:0 allocation score on mgmt01: -INFINITY +pcmk__primitive_assign: node-params:0 allocation score on mgmt01: -INFINITY +pcmk__primitive_assign: node-params:0 allocation score on v03-a: -INFINITY pcmk__primitive_assign: node-params:0 allocation score on v03-a: 1 +pcmk__primitive_assign: node-params:0 allocation score on v03-b: -INFINITY pcmk__primitive_assign: node-params:0 allocation score on v03-b: 0 pcmk__primitive_assign: node-params:1 allocation score on mgmt01: -INFINITY pcmk__primitive_assign: node-params:1 allocation score on v03-a: -INFINITY diff --git a/cts/scheduler/scores/pending-node-no-uname.scores b/cts/scheduler/scores/pending-node-no-uname.scores new file mode 100644 index 0000000..90a7c8b --- /dev/null +++ b/cts/scheduler/scores/pending-node-no-uname.scores @@ -0,0 +1,3 @@ + +pcmk__primitive_assign: st-sbd allocation score on node-1: 0 +pcmk__primitive_assign: st-sbd allocation score on node-2: 0 diff --git a/cts/scheduler/scores/probe-2.scores b/cts/scheduler/scores/probe-2.scores index d396171..d3b50ce 100644 --- a/cts/scheduler/scores/probe-2.scores +++ b/cts/scheduler/scores/probe-2.scores @@ -137,7 +137,7 @@ pcmk__primitive_assign: mysql-proxy:1 allocation score on wc01: -INFINITY pcmk__primitive_assign: mysql-proxy:1 allocation score on wc02: -INFINITY pcmk__primitive_assign: mysql-server allocation score on wc01: 0 pcmk__primitive_assign: mysql-server allocation score on wc02: -INFINITY -pcmk__primitive_assign: nfs-common:0 allocation score on wc01: 1 +pcmk__primitive_assign: nfs-common:0 allocation score on wc01: 77 pcmk__primitive_assign: nfs-common:0 allocation score on wc02: -INFINITY pcmk__primitive_assign: nfs-common:1 allocation score on wc01: -INFINITY pcmk__primitive_assign: nfs-common:1 allocation score on wc02: -INFINITY diff --git a/cts/scheduler/scores/promoted-13.scores b/cts/scheduler/scores/promoted-13.scores index 5ee6994..19b299c 100644 --- a/cts/scheduler/scores/promoted-13.scores +++ b/cts/scheduler/scores/promoted-13.scores @@ -18,7 +18,7 @@ pcmk__primitive_assign: IPaddr0 allocation score on frigg: -INFINITY pcmk__primitive_assign: IPaddr0 allocation score on odin: INFINITY pcmk__primitive_assign: MailTo allocation score on frigg: -INFINITY pcmk__primitive_assign: MailTo allocation score on odin: 0 -pcmk__primitive_assign: drbd0:0 allocation score on frigg: 0 +pcmk__primitive_assign: drbd0:0 allocation score on frigg: -INFINITY pcmk__primitive_assign: drbd0:0 allocation score on odin: INFINITY pcmk__primitive_assign: drbd0:1 allocation score on frigg: INFINITY pcmk__primitive_assign: drbd0:1 allocation score on odin: -INFINITY diff --git a/cts/scheduler/scores/promoted-asymmetrical-order.scores b/cts/scheduler/scores/promoted-asymmetrical-order.scores index 382e0eb..18bc704 100644 --- a/cts/scheduler/scores/promoted-asymmetrical-order.scores +++ b/cts/scheduler/scores/promoted-asymmetrical-order.scores @@ -12,8 +12,12 @@ pcmk__clone_assign: rsc2:0 allocation score on node2: 0 pcmk__clone_assign: rsc2:1 allocation score on node1: 0 pcmk__clone_assign: rsc2:1 allocation score on node2: 1 pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 pcmk__primitive_assign: rsc2:0 allocation score on node2: 0 diff --git a/cts/scheduler/scores/promoted-demote.scores b/cts/scheduler/scores/promoted-demote.scores index 0a04576..a0ddf9a 100644 --- a/cts/scheduler/scores/promoted-demote.scores +++ b/cts/scheduler/scores/promoted-demote.scores @@ -30,10 +30,10 @@ pcmk__clone_assign: pingd_node:1 allocation score on cxa1: 0 pcmk__clone_assign: pingd_node:1 allocation score on cxb1: 1 pcmk__primitive_assign: cyrus_address allocation score on cxa1: 210 pcmk__primitive_assign: cyrus_address allocation score on cxb1: 200 -pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxa1: 76 -pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxb1: 0 +pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxa1: 286 +pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxb1: 200 pcmk__primitive_assign: cyrus_drbd_node:1 allocation score on cxa1: -INFINITY -pcmk__primitive_assign: cyrus_drbd_node:1 allocation score on cxb1: 76 +pcmk__primitive_assign: cyrus_drbd_node:1 allocation score on cxb1: 276 pcmk__primitive_assign: cyrus_filesys allocation score on cxa1: -INFINITY pcmk__primitive_assign: cyrus_filesys allocation score on cxb1: -INFINITY pcmk__primitive_assign: cyrus_master allocation score on cxa1: -INFINITY @@ -50,10 +50,10 @@ pcmk__primitive_assign: named_address allocation score on cxa1: 200 pcmk__primitive_assign: named_address allocation score on cxb1: 210 pcmk__primitive_assign: named_daemon allocation score on cxa1: -INFINITY pcmk__primitive_assign: named_daemon allocation score on cxb1: -INFINITY -pcmk__primitive_assign: named_drbd_node:0 allocation score on cxa1: 76 -pcmk__primitive_assign: named_drbd_node:0 allocation score on cxb1: 0 +pcmk__primitive_assign: named_drbd_node:0 allocation score on cxa1: 276 +pcmk__primitive_assign: named_drbd_node:0 allocation score on cxb1: 210 pcmk__primitive_assign: named_drbd_node:1 allocation score on cxa1: -INFINITY -pcmk__primitive_assign: named_drbd_node:1 allocation score on cxb1: 76 +pcmk__primitive_assign: named_drbd_node:1 allocation score on cxb1: 286 pcmk__primitive_assign: named_filesys allocation score on cxa1: -INFINITY pcmk__primitive_assign: named_filesys allocation score on cxb1: -INFINITY pcmk__primitive_assign: named_syslogd allocation score on cxa1: -INFINITY diff --git a/cts/scheduler/scores/promoted-failed-demote-2.scores b/cts/scheduler/scores/promoted-failed-demote-2.scores index 2a85ae6..39399d9 100644 --- a/cts/scheduler/scores/promoted-failed-demote-2.scores +++ b/cts/scheduler/scores/promoted-failed-demote-2.scores @@ -16,14 +16,20 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:1 allocation score on dl380g5a: 0 pcmk__group_assign: group:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 diff --git a/cts/scheduler/scores/promoted-failed-demote.scores b/cts/scheduler/scores/promoted-failed-demote.scores index 2a85ae6..39399d9 100644 --- a/cts/scheduler/scores/promoted-failed-demote.scores +++ b/cts/scheduler/scores/promoted-failed-demote.scores @@ -16,14 +16,20 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:0 allocation score on dl380g5b: 0 pcmk__group_assign: group:1 allocation score on dl380g5a: 0 pcmk__group_assign: group:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0 pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY +pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0 diff --git a/cts/scheduler/scores/promoted-move.scores b/cts/scheduler/scores/promoted-move.scores index 7006cda..5ed2b60 100644 --- a/cts/scheduler/scores/promoted-move.scores +++ b/cts/scheduler/scores/promoted-move.scores @@ -19,7 +19,9 @@ pcmk__primitive_assign: dummy02 allocation score on bl460g1n13: -INFINITY pcmk__primitive_assign: dummy02 allocation score on bl460g1n14: 0 pcmk__primitive_assign: dummy03 allocation score on bl460g1n13: -INFINITY pcmk__primitive_assign: dummy03 allocation score on bl460g1n14: 0 +pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n13: -INFINITY pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n13: INFINITY +pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n14: -INFINITY pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n14: 0 pcmk__primitive_assign: prmDRBD:1 allocation score on bl460g1n13: -INFINITY pcmk__primitive_assign: prmDRBD:1 allocation score on bl460g1n14: INFINITY diff --git a/cts/scheduler/scores/promoted-ordering.scores b/cts/scheduler/scores/promoted-ordering.scores index 46dac66..5162abf 100644 --- a/cts/scheduler/scores/promoted-ordering.scores +++ b/cts/scheduler/scores/promoted-ordering.scores @@ -55,7 +55,7 @@ pcmk__primitive_assign: apache2:0 allocation score on webcluster01: -INFINITY pcmk__primitive_assign: apache2:0 allocation score on webcluster02: -INFINITY pcmk__primitive_assign: apache2:1 allocation score on webcluster01: -INFINITY pcmk__primitive_assign: apache2:1 allocation score on webcluster02: -INFINITY -pcmk__primitive_assign: drbd_mysql:0 allocation score on webcluster01: 0 +pcmk__primitive_assign: drbd_mysql:0 allocation score on webcluster01: 100 pcmk__primitive_assign: drbd_mysql:0 allocation score on webcluster02: -INFINITY pcmk__primitive_assign: drbd_mysql:1 allocation score on webcluster01: -INFINITY pcmk__primitive_assign: drbd_mysql:1 allocation score on webcluster02: -INFINITY @@ -71,10 +71,10 @@ pcmk__primitive_assign: fs_mysql allocation score on webcluster01: -INFINITY pcmk__primitive_assign: fs_mysql allocation score on webcluster02: -INFINITY pcmk__primitive_assign: intip_0_main allocation score on webcluster01: -INFINITY pcmk__primitive_assign: intip_0_main allocation score on webcluster02: -INFINITY -pcmk__primitive_assign: intip_1_master allocation score on webcluster01: 200 -pcmk__primitive_assign: intip_1_master allocation score on webcluster02: 0 -pcmk__primitive_assign: intip_2_slave allocation score on webcluster01: 0 -pcmk__primitive_assign: intip_2_slave allocation score on webcluster02: 100 +pcmk__primitive_assign: intip_1_active allocation score on webcluster01: 200 +pcmk__primitive_assign: intip_1_active allocation score on webcluster02: 0 +pcmk__primitive_assign: intip_2_passive allocation score on webcluster01: 0 +pcmk__primitive_assign: intip_2_passive allocation score on webcluster02: 100 pcmk__primitive_assign: mysql-proxy:0 allocation score on webcluster01: -INFINITY pcmk__primitive_assign: mysql-proxy:0 allocation score on webcluster02: -INFINITY pcmk__primitive_assign: mysql-proxy:1 allocation score on webcluster01: -INFINITY diff --git a/cts/scheduler/scores/promoted-partially-demoted-group.scores b/cts/scheduler/scores/promoted-partially-demoted-group.scores index 5205aa5..f266c64 100644 --- a/cts/scheduler/scores/promoted-partially-demoted-group.scores +++ b/cts/scheduler/scores/promoted-partially-demoted-group.scores @@ -60,17 +60,17 @@ pcmk__group_assign: vip-165-fw:0 allocation score on sd01-1: 100 pcmk__group_assign: vip-165-fw:1 allocation score on sd01-0: 100 pcmk__group_assign: vip-165-fw:1 allocation score on sd01-1: 0 pcmk__primitive_assign: cdev-pool-0-drbd:0 allocation score on sd01-0: -INFINITY -pcmk__primitive_assign: cdev-pool-0-drbd:0 allocation score on sd01-1: 10100 -pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-0: 10100 -pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-1: 0 +pcmk__primitive_assign: cdev-pool-0-drbd:0 allocation score on sd01-1: 10500 +pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-0: INFINITY +pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-1: 400 pcmk__primitive_assign: cdev-pool-0-iscsi-lun-1 allocation score on sd01-0: 0 pcmk__primitive_assign: cdev-pool-0-iscsi-lun-1 allocation score on sd01-1: -INFINITY pcmk__primitive_assign: cdev-pool-0-iscsi-target allocation score on sd01-0: INFINITY pcmk__primitive_assign: cdev-pool-0-iscsi-target allocation score on sd01-1: -INFINITY pcmk__primitive_assign: ietd:0 allocation score on sd01-0: -INFINITY pcmk__primitive_assign: ietd:0 allocation score on sd01-1: 100 -pcmk__primitive_assign: ietd:1 allocation score on sd01-0: 100 -pcmk__primitive_assign: ietd:1 allocation score on sd01-1: 0 +pcmk__primitive_assign: ietd:1 allocation score on sd01-0: INFINITY +pcmk__primitive_assign: ietd:1 allocation score on sd01-1: -INFINITY pcmk__primitive_assign: stonith-xvm-sd01-0 allocation score on sd01-0: -INFINITY pcmk__primitive_assign: stonith-xvm-sd01-0 allocation score on sd01-1: 100 pcmk__primitive_assign: stonith-xvm-sd01-1 allocation score on sd01-0: 100 diff --git a/cts/scheduler/scores/promoted-probed-score.scores b/cts/scheduler/scores/promoted-probed-score.scores index 1a01a5b..bf6d7fc 100644 --- a/cts/scheduler/scores/promoted-probed-score.scores +++ b/cts/scheduler/scores/promoted-probed-score.scores @@ -1,11 +1,11 @@ -AdminDrbd:0 promotion score on hypatia-corosync.nevis.columbia.edu: 5 -AdminDrbd:1 promotion score on orestes-corosync.nevis.columbia.edu: INFINITY +AdminDrbd:0 promotion score on orestes-corosync.nevis.columbia.edu: INFINITY +AdminDrbd:1 promotion score on hypatia-corosync.nevis.columbia.edu: 5 pcmk__clone_assign: AdminClone allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__clone_assign: AdminClone allocation score on orestes-corosync.nevis.columbia.edu: INFINITY pcmk__clone_assign: AdminDrbd:0 allocation score on hypatia-corosync.nevis.columbia.edu: 5 -pcmk__clone_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__clone_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__clone_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: 5 +pcmk__clone_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: 5 pcmk__clone_assign: AdminDrbd:1 allocation score on orestes-corosync.nevis.columbia.edu: 5 pcmk__clone_assign: AdminLvm:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__clone_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 @@ -141,8 +141,8 @@ pcmk__clone_assign: Xinetd:1 allocation score on hypatia-corosync.nevis.columbia pcmk__clone_assign: Xinetd:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__group_assign: AdminLvm:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__group_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__group_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: ClusterIP:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: ClusterIP:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__group_assign: ClusterIP:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 @@ -207,28 +207,28 @@ pcmk__group_assign: ExportsGroup:1 allocation score on hypatia-corosync.nevis.co pcmk__group_assign: ExportsGroup:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__group_assign: FSMail:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: FSMail:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__group_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__group_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: FSUsrNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: FSUsrNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__group_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__group_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: FSVarNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: FSVarNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__group_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__group_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: FSVirtualMachines:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: FSVirtualMachines:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__group_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__group_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: FSWork:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: FSWork:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__group_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__group_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: FilesystemGroup:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: FilesystemGroup:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__group_assign: FilesystemGroup:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__group_assign: FilesystemGroup:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FilesystemGroup:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__group_assign: FilesystemGroup:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: IPGroup:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__group_assign: IPGroup:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__group_assign: IPGroup:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 @@ -272,13 +272,13 @@ pcmk__group_assign: Xinetd:0 allocation score on orestes-corosync.nevis.columbia pcmk__group_assign: Xinetd:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY pcmk__group_assign: Xinetd:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: AdminDrbd:0 allocation score on hypatia-corosync.nevis.columbia.edu: 5 -pcmk__primitive_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: AdminDrbd:1 allocation score on orestes-corosync.nevis.columbia.edu: 5 +pcmk__primitive_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY +pcmk__primitive_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: 5 +pcmk__primitive_assign: AdminDrbd:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: AdminLvm:0 allocation score on hypatia-corosync.nevis.columbia.edu: 5 -pcmk__primitive_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: 5 -pcmk__primitive_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: 5 +pcmk__primitive_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY +pcmk__primitive_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: 5 +pcmk__primitive_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: ClusterIP:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: ClusterIP:0 allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: ClusterIP:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 @@ -333,32 +333,32 @@ pcmk__primitive_assign: ExportWWW:0 allocation score on hypatia-corosync.nevis.c pcmk__primitive_assign: ExportWWW:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: ExportWWW:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: ExportWWW:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSMail:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSMail:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSUsrNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSUsrNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSVarNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSVarNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSVirtualMachines:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSVirtualMachines:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSWork:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 -pcmk__primitive_assign: FSWork:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: FSMail:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSMail:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY +pcmk__primitive_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSUsrNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSUsrNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY +pcmk__primitive_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSVarNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSVarNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY +pcmk__primitive_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSVirtualMachines:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSVirtualMachines:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY +pcmk__primitive_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSWork:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY +pcmk__primitive_assign: FSWork:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY +pcmk__primitive_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: KVM-guest allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: KVM-guest allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: Libvirtd:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: Libvirtd:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: Libvirtd:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: Libvirtd:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: Libvirtd:1 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY pcmk__primitive_assign: Proxy allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: Proxy allocation score on orestes-corosync.nevis.columbia.edu: INFINITY pcmk__primitive_assign: StonithHypatia allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY @@ -376,7 +376,7 @@ pcmk__primitive_assign: SymlinkDhcpdLeases allocation score on orestes-corosync. pcmk__primitive_assign: SymlinkEtcLibvirt:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: SymlinkEtcLibvirt:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: SymlinkEtcLibvirt:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY -pcmk__primitive_assign: SymlinkEtcLibvirt:1 allocation score on orestes-corosync.nevis.columbia.edu: 0 +pcmk__primitive_assign: SymlinkEtcLibvirt:1 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY pcmk__primitive_assign: SymlinkSysconfigDhcpd allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY pcmk__primitive_assign: SymlinkSysconfigDhcpd allocation score on orestes-corosync.nevis.columbia.edu: 0 pcmk__primitive_assign: SymlinkTftp:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0 diff --git a/cts/scheduler/scores/remote-connection-shutdown.scores b/cts/scheduler/scores/remote-connection-shutdown.scores index 176580b..c1d43ec 100644 --- a/cts/scheduler/scores/remote-connection-shutdown.scores +++ b/cts/scheduler/scores/remote-connection-shutdown.scores @@ -2,495 +2,495 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 10 +ovndb_servers:0 promotion score on ovn-dbs-bundle-0: INFINITY ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 -pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on compute-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on compute-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-0: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-1: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-2: 0 -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501 -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 501 -pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501 -pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 -pcmk__bundle_allocate: redis-bundle allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on compute-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on compute-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501 -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 +pcmk__bundle_assign: galera-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on compute-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on compute-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-0: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-1: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-2: 0 +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501 +pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on database-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 501 +pcmk__bundle_assign: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501 +pcmk__bundle_assign: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501 +pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 +pcmk__bundle_assign: redis-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on compute-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on compute-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501 +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501 +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-0: 0 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-1: 0 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY diff --git a/cts/scheduler/scores/remote-fence-unclean-3.scores b/cts/scheduler/scores/remote-fence-unclean-3.scores index dc157a3..b3bcf0c 100644 --- a/cts/scheduler/scores/remote-fence-unclean-3.scores +++ b/cts/scheduler/scores/remote-fence-unclean-3.scores @@ -2,177 +2,177 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: redis-bundle allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0 pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0 @@ -238,12 +238,12 @@ pcmk__primitive_assign: galera:0 allocation score on galera-bundle-0: INFINITY pcmk__primitive_assign: galera:1 allocation score on galera-bundle-1: INFINITY pcmk__primitive_assign: galera:2 allocation score on galera-bundle-2: INFINITY pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY -pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0 -pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0 +pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: INFINITY +pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: INFINITY pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY -pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0 +pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: INFINITY pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY diff --git a/cts/scheduler/scores/route-remote-notify.scores b/cts/scheduler/scores/route-remote-notify.scores index e6fc549..e25fe64 100644 --- a/cts/scheduler/scores/route-remote-notify.scores +++ b/cts/scheduler/scores/route-remote-notify.scores @@ -1,64 +1,64 @@ -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY pcmk__clone_assign: rabbitmq-bundle-clone allocation score on controller-0: -INFINITY pcmk__clone_assign: rabbitmq-bundle-clone allocation score on controller-1: -INFINITY pcmk__clone_assign: rabbitmq-bundle-clone allocation score on controller-2: -INFINITY @@ -73,7 +73,7 @@ pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-1 pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY -pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY diff --git a/cts/scheduler/scores/rsc-sets-clone-1.scores b/cts/scheduler/scores/rsc-sets-clone-1.scores index 8552073..4dc2187 100644 --- a/cts/scheduler/scores/rsc-sets-clone-1.scores +++ b/cts/scheduler/scores/rsc-sets-clone-1.scores @@ -82,23 +82,23 @@ pcmk__group_assign: vg2:1 allocation score on sys3: 0 pcmk__primitive_assign: clvmd:0 allocation score on sys2: INFINITY pcmk__primitive_assign: clvmd:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: clvmd:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: clvmd:1 allocation score on sys3: 0 +pcmk__primitive_assign: clvmd:1 allocation score on sys3: 10000 pcmk__primitive_assign: controld:0 allocation score on sys2: INFINITY -pcmk__primitive_assign: controld:0 allocation score on sys3: 0 +pcmk__primitive_assign: controld:0 allocation score on sys3: 10000 pcmk__primitive_assign: controld:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: controld:1 allocation score on sys3: 0 +pcmk__primitive_assign: controld:1 allocation score on sys3: 10000 pcmk__primitive_assign: fs2:0 allocation score on sys2: INFINITY pcmk__primitive_assign: fs2:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: fs2:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: fs2:1 allocation score on sys3: 0 +pcmk__primitive_assign: fs2:1 allocation score on sys3: 10000 pcmk__primitive_assign: iscsi1:0 allocation score on sys2: INFINITY pcmk__primitive_assign: iscsi1:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: iscsi1:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: iscsi1:1 allocation score on sys3: 0 +pcmk__primitive_assign: iscsi1:1 allocation score on sys3: 10000 pcmk__primitive_assign: iscsi2:0 allocation score on sys2: INFINITY pcmk__primitive_assign: iscsi2:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: iscsi2:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: iscsi2:1 allocation score on sys3: 0 +pcmk__primitive_assign: iscsi2:1 allocation score on sys3: 10000 pcmk__primitive_assign: nfs1:0 allocation score on sys2: -INFINITY pcmk__primitive_assign: nfs1:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: nfs1:1 allocation score on sys2: -INFINITY @@ -106,7 +106,7 @@ pcmk__primitive_assign: nfs1:1 allocation score on sys3: -INFINITY pcmk__primitive_assign: o2cb:0 allocation score on sys2: INFINITY pcmk__primitive_assign: o2cb:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: o2cb:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: o2cb:1 allocation score on sys3: 0 +pcmk__primitive_assign: o2cb:1 allocation score on sys3: 10000 pcmk__primitive_assign: stonithsys2 allocation score on sys2: -INFINITY pcmk__primitive_assign: stonithsys2 allocation score on sys3: 0 pcmk__primitive_assign: stonithsys3 allocation score on sys2: INFINITY @@ -114,11 +114,11 @@ pcmk__primitive_assign: stonithsys3 allocation score on sys3: -INFINITY pcmk__primitive_assign: vg1:0 allocation score on sys2: INFINITY pcmk__primitive_assign: vg1:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: vg1:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: vg1:1 allocation score on sys3: 0 +pcmk__primitive_assign: vg1:1 allocation score on sys3: 10000 pcmk__primitive_assign: vg2:0 allocation score on sys2: INFINITY pcmk__primitive_assign: vg2:0 allocation score on sys3: -INFINITY pcmk__primitive_assign: vg2:1 allocation score on sys2: -INFINITY -pcmk__primitive_assign: vg2:1 allocation score on sys3: 0 +pcmk__primitive_assign: vg2:1 allocation score on sys3: 10000 pcmk__primitive_assign: vm1 allocation score on sys2: INFINITY pcmk__primitive_assign: vm1 allocation score on sys3: 0 pcmk__primitive_assign: vm2 allocation score on sys2: -INFINITY diff --git a/cts/scheduler/scores/start-then-stop-with-unfence.scores b/cts/scheduler/scores/start-then-stop-with-unfence.scores index 5cc77e5..d8cd4ac 100644 --- a/cts/scheduler/scores/start-then-stop-with-unfence.scores +++ b/cts/scheduler/scores/start-then-stop-with-unfence.scores @@ -9,9 +9,9 @@ pcmk__primitive_assign: ip1 allocation score on rhel7-node1.example.com: 500 pcmk__primitive_assign: ip1 allocation score on rhel7-node2.example.com: 0 pcmk__primitive_assign: ip2 allocation score on rhel7-node1.example.com: 0 pcmk__primitive_assign: ip2 allocation score on rhel7-node2.example.com: 500 -pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node1.example.com: 0 -pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node2.example.com: 1 -pcmk__primitive_assign: jrummy:1 allocation score on rhel7-node1.example.com: 0 +pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node1.example.com: 500 +pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node2.example.com: 501 +pcmk__primitive_assign: jrummy:1 allocation score on rhel7-node1.example.com: 500 pcmk__primitive_assign: jrummy:1 allocation score on rhel7-node2.example.com: -INFINITY pcmk__primitive_assign: mpath-node1 allocation score on rhel7-node1.example.com: 0 pcmk__primitive_assign: mpath-node1 allocation score on rhel7-node2.example.com: 0 diff --git a/cts/scheduler/scores/stop-all-resources.scores b/cts/scheduler/scores/stop-all-resources.scores index 119ac99..d471564 100644 --- a/cts/scheduler/scores/stop-all-resources.scores +++ b/cts/scheduler/scores/stop-all-resources.scores @@ -1,32 +1,32 @@ -pcmk__bundle_allocate: httpd-bundle allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: 0 -pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 500 -pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500 -pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500 +pcmk__bundle_assign: httpd-bundle allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-docker-0 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-docker-0 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-docker-1 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-docker-1 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-docker-2 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-docker-2 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: 0 +pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 500 +pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 500 +pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 500 pcmk__clone_assign: httpd-bundle-clone allocation score on cluster01: -INFINITY pcmk__clone_assign: httpd-bundle-clone allocation score on cluster02: -INFINITY pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-0: 0 diff --git a/cts/scheduler/scores/timeout-by-node.scores b/cts/scheduler/scores/timeout-by-node.scores new file mode 100644 index 0000000..adb96a5 --- /dev/null +++ b/cts/scheduler/scores/timeout-by-node.scores @@ -0,0 +1,61 @@ + +pcmk__clone_assign: rsc1-clone allocation score on node1: 0 +pcmk__clone_assign: rsc1-clone allocation score on node2: 0 +pcmk__clone_assign: rsc1-clone allocation score on node3: 0 +pcmk__clone_assign: rsc1-clone allocation score on node4: 0 +pcmk__clone_assign: rsc1-clone allocation score on node5: 0 +pcmk__clone_assign: rsc1:0 allocation score on node1: 0 +pcmk__clone_assign: rsc1:0 allocation score on node2: 0 +pcmk__clone_assign: rsc1:0 allocation score on node3: 0 +pcmk__clone_assign: rsc1:0 allocation score on node4: 0 +pcmk__clone_assign: rsc1:0 allocation score on node5: 0 +pcmk__clone_assign: rsc1:1 allocation score on node1: 0 +pcmk__clone_assign: rsc1:1 allocation score on node2: 0 +pcmk__clone_assign: rsc1:1 allocation score on node3: 0 +pcmk__clone_assign: rsc1:1 allocation score on node4: 0 +pcmk__clone_assign: rsc1:1 allocation score on node5: 0 +pcmk__clone_assign: rsc1:2 allocation score on node1: 0 +pcmk__clone_assign: rsc1:2 allocation score on node2: 0 +pcmk__clone_assign: rsc1:2 allocation score on node3: 0 +pcmk__clone_assign: rsc1:2 allocation score on node4: 0 +pcmk__clone_assign: rsc1:2 allocation score on node5: 0 +pcmk__clone_assign: rsc1:3 allocation score on node1: 0 +pcmk__clone_assign: rsc1:3 allocation score on node2: 0 +pcmk__clone_assign: rsc1:3 allocation score on node3: 0 +pcmk__clone_assign: rsc1:3 allocation score on node4: 0 +pcmk__clone_assign: rsc1:3 allocation score on node5: 0 +pcmk__clone_assign: rsc1:4 allocation score on node1: 0 +pcmk__clone_assign: rsc1:4 allocation score on node2: 0 +pcmk__clone_assign: rsc1:4 allocation score on node3: 0 +pcmk__clone_assign: rsc1:4 allocation score on node4: 0 +pcmk__clone_assign: rsc1:4 allocation score on node5: 0 +pcmk__primitive_assign: Fencing allocation score on node1: 0 +pcmk__primitive_assign: Fencing allocation score on node2: 0 +pcmk__primitive_assign: Fencing allocation score on node3: 0 +pcmk__primitive_assign: Fencing allocation score on node4: 0 +pcmk__primitive_assign: Fencing allocation score on node5: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node2: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node4: 0 +pcmk__primitive_assign: rsc1:0 allocation score on node5: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:1 allocation score on node3: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node4: 0 +pcmk__primitive_assign: rsc1:1 allocation score on node5: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc1:2 allocation score on node4: 0 +pcmk__primitive_assign: rsc1:2 allocation score on node5: 0 +pcmk__primitive_assign: rsc1:3 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:3 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:3 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc1:3 allocation score on node4: -INFINITY +pcmk__primitive_assign: rsc1:3 allocation score on node5: 0 +pcmk__primitive_assign: rsc1:4 allocation score on node1: 0 +pcmk__primitive_assign: rsc1:4 allocation score on node2: -INFINITY +pcmk__primitive_assign: rsc1:4 allocation score on node3: -INFINITY +pcmk__primitive_assign: rsc1:4 allocation score on node4: -INFINITY +pcmk__primitive_assign: rsc1:4 allocation score on node5: -INFINITY diff --git a/cts/scheduler/scores/unrunnable-2.scores b/cts/scheduler/scores/unrunnable-2.scores index 52b7ffd..50ecabc 100644 --- a/cts/scheduler/scores/unrunnable-2.scores +++ b/cts/scheduler/scores/unrunnable-2.scores @@ -476,7 +476,7 @@ pcmk__primitive_assign: galera:1 allocation score on overcloud-controller-2: 0 pcmk__primitive_assign: galera:2 allocation score on overcloud-controller-0: -INFINITY pcmk__primitive_assign: galera:2 allocation score on overcloud-controller-1: -INFINITY pcmk__primitive_assign: galera:2 allocation score on overcloud-controller-2: INFINITY -pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-0: 0 +pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-0: INFINITY pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-1: INFINITY pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-2: 0 pcmk__primitive_assign: haproxy:1 allocation score on overcloud-controller-0: INFINITY diff --git a/cts/scheduler/scores/utilization-complex.scores b/cts/scheduler/scores/utilization-complex.scores index 29bc92c..c37023d 100644 --- a/cts/scheduler/scores/utilization-complex.scores +++ b/cts/scheduler/scores/utilization-complex.scores @@ -1,80 +1,80 @@ -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on httpd-bundle-1: -INFINITY -pcmk__bundle_allocate: httpd-bundle-0 allocation score on httpd-bundle-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on httpd-bundle-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-0: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on httpd-bundle-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on httpd-bundle-0: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on httpd-bundle-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on httpd-bundle-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on httpd-bundle-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on httpd-bundle-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on httpd-bundle-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-1: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-2: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-3: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-4: 0 -pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-5: 0 -pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 501 -pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500 -pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on httpd-bundle-1: -INFINITY +pcmk__bundle_assign: httpd-bundle-0 allocation score on httpd-bundle-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on httpd-bundle-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-0: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on httpd-bundle-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on httpd-bundle-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-1: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-2: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-3: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-4: 0 +pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-5: 0 +pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 501 +pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 500 +pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 500 pcmk__clone_assign: clone1-clone allocation score on httpd-bundle-0: -INFINITY pcmk__clone_assign: clone1-clone allocation score on httpd-bundle-1: -INFINITY pcmk__clone_assign: clone1-clone allocation score on httpd-bundle-2: -INFINITY @@ -312,18 +312,26 @@ pcmk__primitive_assign: clone1:2 allocation score on rhel8-4: 1 pcmk__primitive_assign: clone1:2 allocation score on rhel8-5: 0 pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: 0 pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1 pcmk__primitive_assign: clone1:4 allocation score on httpd-bundle-0: -INFINITY @@ -384,18 +392,26 @@ pcmk__primitive_assign: clone2:2 allocation score on rhel8-4: 1 pcmk__primitive_assign: clone2:2 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: 0 pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: clone2:4 allocation score on httpd-bundle-0: -INFINITY @@ -535,18 +551,26 @@ pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0 pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0 pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-1: -INFINITY diff --git a/cts/scheduler/scores/utilization-order2.scores b/cts/scheduler/scores/utilization-order2.scores index c4b49d9..4476b60 100644 --- a/cts/scheduler/scores/utilization-order2.scores +++ b/cts/scheduler/scores/utilization-order2.scores @@ -9,6 +9,8 @@ pcmk__primitive_assign: rsc1 allocation score on node1: 0 pcmk__primitive_assign: rsc1 allocation score on node2: 0 pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 +pcmk__primitive_assign: rsc2:0 allocation score on node1: 1 +pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY pcmk__primitive_assign: rsc2:0 allocation score on node2: 0 pcmk__primitive_assign: rsc2:1 allocation score on node1: 0 diff --git a/cts/scheduler/scores/utilization-order4.scores b/cts/scheduler/scores/utilization-order4.scores index 21eef87..fdc6163 100644 --- a/cts/scheduler/scores/utilization-order4.scores +++ b/cts/scheduler/scores/utilization-order4.scores @@ -47,15 +47,15 @@ pcmk__primitive_assign: degllx63-vm allocation score on deglxen001: -INFINITY pcmk__primitive_assign: degllx63-vm allocation score on deglxen002: -INFINITY pcmk__primitive_assign: degllx64-vm allocation score on deglxen001: -INFINITY pcmk__primitive_assign: degllx64-vm allocation score on deglxen002: -INFINITY -pcmk__primitive_assign: nfs-xen_config:0 allocation score on deglxen001: 300000 +pcmk__primitive_assign: nfs-xen_config:0 allocation score on deglxen001: 410000 pcmk__primitive_assign: nfs-xen_config:0 allocation score on deglxen002: -INFINITY pcmk__primitive_assign: nfs-xen_config:1 allocation score on deglxen001: -INFINITY pcmk__primitive_assign: nfs-xen_config:1 allocation score on deglxen002: -INFINITY -pcmk__primitive_assign: nfs-xen_images:0 allocation score on deglxen001: 100000 +pcmk__primitive_assign: nfs-xen_images:0 allocation score on deglxen001: 210000 pcmk__primitive_assign: nfs-xen_images:0 allocation score on deglxen002: -INFINITY pcmk__primitive_assign: nfs-xen_images:1 allocation score on deglxen001: -INFINITY pcmk__primitive_assign: nfs-xen_images:1 allocation score on deglxen002: -INFINITY -pcmk__primitive_assign: nfs-xen_swapfiles:0 allocation score on deglxen001: 200000 +pcmk__primitive_assign: nfs-xen_swapfiles:0 allocation score on deglxen001: 310000 pcmk__primitive_assign: nfs-xen_swapfiles:0 allocation score on deglxen002: -INFINITY pcmk__primitive_assign: nfs-xen_swapfiles:1 allocation score on deglxen001: -INFINITY pcmk__primitive_assign: nfs-xen_swapfiles:1 allocation score on deglxen002: -INFINITY diff --git a/cts/scheduler/scores/utilization-shuffle.scores b/cts/scheduler/scores/utilization-shuffle.scores index 5568dd3..e58269b 100644 --- a/cts/scheduler/scores/utilization-shuffle.scores +++ b/cts/scheduler/scores/utilization-shuffle.scores @@ -210,13 +210,23 @@ pcmk__primitive_assign: prmApPostgreSQLDB3 allocation score on act3: -INFINITY pcmk__primitive_assign: prmApPostgreSQLDB3 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmApPostgreSQLDB3 allocation score on sby2: -INFINITY pcmk__primitive_assign: prmDiskd1:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on act2: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on act2: -INFINITY pcmk__primitive_assign: prmDiskd1:0 allocation score on act2: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on act3: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on act3: -INFINITY pcmk__primitive_assign: prmDiskd1:0 allocation score on act3: INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on sby1: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmDiskd1:0 allocation score on sby1: 0 +pcmk__primitive_assign: prmDiskd1:0 allocation score on sby2: -INFINITY +pcmk__primitive_assign: prmDiskd1:0 allocation score on sby2: 0 pcmk__primitive_assign: prmDiskd1:0 allocation score on sby2: 0 pcmk__primitive_assign: prmDiskd1:1 allocation score on act1: INFINITY -pcmk__primitive_assign: prmDiskd1:1 allocation score on act2: 0 -pcmk__primitive_assign: prmDiskd1:1 allocation score on act3: 0 +pcmk__primitive_assign: prmDiskd1:1 allocation score on act2: INFINITY +pcmk__primitive_assign: prmDiskd1:1 allocation score on act3: -INFINITY pcmk__primitive_assign: prmDiskd1:1 allocation score on sby1: 0 pcmk__primitive_assign: prmDiskd1:1 allocation score on sby2: 0 pcmk__primitive_assign: prmDiskd1:2 allocation score on act1: -INFINITY @@ -231,17 +241,27 @@ pcmk__primitive_assign: prmDiskd1:3 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmDiskd1:3 allocation score on sby2: INFINITY pcmk__primitive_assign: prmDiskd1:4 allocation score on act1: -INFINITY pcmk__primitive_assign: prmDiskd1:4 allocation score on act2: INFINITY -pcmk__primitive_assign: prmDiskd1:4 allocation score on act3: 0 +pcmk__primitive_assign: prmDiskd1:4 allocation score on act3: -INFINITY pcmk__primitive_assign: prmDiskd1:4 allocation score on sby1: 0 pcmk__primitive_assign: prmDiskd1:4 allocation score on sby2: 0 pcmk__primitive_assign: prmDiskd2:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on act2: -INFINITY pcmk__primitive_assign: prmDiskd2:0 allocation score on act2: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on act2: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on act3: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on act3: -INFINITY pcmk__primitive_assign: prmDiskd2:0 allocation score on act3: INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on sby1: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmDiskd2:0 allocation score on sby1: 0 +pcmk__primitive_assign: prmDiskd2:0 allocation score on sby2: -INFINITY +pcmk__primitive_assign: prmDiskd2:0 allocation score on sby2: 0 pcmk__primitive_assign: prmDiskd2:0 allocation score on sby2: 0 pcmk__primitive_assign: prmDiskd2:1 allocation score on act1: INFINITY -pcmk__primitive_assign: prmDiskd2:1 allocation score on act2: 0 -pcmk__primitive_assign: prmDiskd2:1 allocation score on act3: 0 +pcmk__primitive_assign: prmDiskd2:1 allocation score on act2: INFINITY +pcmk__primitive_assign: prmDiskd2:1 allocation score on act3: -INFINITY pcmk__primitive_assign: prmDiskd2:1 allocation score on sby1: 0 pcmk__primitive_assign: prmDiskd2:1 allocation score on sby2: 0 pcmk__primitive_assign: prmDiskd2:2 allocation score on act1: -INFINITY @@ -256,7 +276,7 @@ pcmk__primitive_assign: prmDiskd2:3 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmDiskd2:3 allocation score on sby2: INFINITY pcmk__primitive_assign: prmDiskd2:4 allocation score on act1: -INFINITY pcmk__primitive_assign: prmDiskd2:4 allocation score on act2: INFINITY -pcmk__primitive_assign: prmDiskd2:4 allocation score on act3: 0 +pcmk__primitive_assign: prmDiskd2:4 allocation score on act3: -INFINITY pcmk__primitive_assign: prmDiskd2:4 allocation score on sby1: 0 pcmk__primitive_assign: prmDiskd2:4 allocation score on sby2: 0 pcmk__primitive_assign: prmExPostgreSQLDB1 allocation score on act1: 200 @@ -335,13 +355,23 @@ pcmk__primitive_assign: prmIpPostgreSQLDB3 allocation score on act3: -INFINITY pcmk__primitive_assign: prmIpPostgreSQLDB3 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmIpPostgreSQLDB3 allocation score on sby2: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on act1: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on act1: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on act2: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on act2: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on act2: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on act3: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on act3: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on act3: INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on sby1: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmPingd:0 allocation score on sby1: 0 +pcmk__primitive_assign: prmPingd:0 allocation score on sby2: -INFINITY +pcmk__primitive_assign: prmPingd:0 allocation score on sby2: 0 pcmk__primitive_assign: prmPingd:0 allocation score on sby2: 0 pcmk__primitive_assign: prmPingd:1 allocation score on act1: INFINITY -pcmk__primitive_assign: prmPingd:1 allocation score on act2: 0 -pcmk__primitive_assign: prmPingd:1 allocation score on act3: 0 +pcmk__primitive_assign: prmPingd:1 allocation score on act2: INFINITY +pcmk__primitive_assign: prmPingd:1 allocation score on act3: -INFINITY pcmk__primitive_assign: prmPingd:1 allocation score on sby1: 0 pcmk__primitive_assign: prmPingd:1 allocation score on sby2: 0 pcmk__primitive_assign: prmPingd:2 allocation score on act1: -INFINITY @@ -356,6 +386,6 @@ pcmk__primitive_assign: prmPingd:3 allocation score on sby1: -INFINITY pcmk__primitive_assign: prmPingd:3 allocation score on sby2: INFINITY pcmk__primitive_assign: prmPingd:4 allocation score on act1: -INFINITY pcmk__primitive_assign: prmPingd:4 allocation score on act2: INFINITY -pcmk__primitive_assign: prmPingd:4 allocation score on act3: 0 +pcmk__primitive_assign: prmPingd:4 allocation score on act3: -INFINITY pcmk__primitive_assign: prmPingd:4 allocation score on sby1: 0 pcmk__primitive_assign: prmPingd:4 allocation score on sby2: 0 diff --git a/cts/scheduler/scores/year-2038.scores b/cts/scheduler/scores/year-2038.scores index 1605ec0..a2fe598 100644 --- a/cts/scheduler/scores/year-2038.scores +++ b/cts/scheduler/scores/year-2038.scores @@ -2,194 +2,194 @@ galera:0 promotion score on galera-bundle-0: 100 galera:1 promotion score on galera-bundle-1: 100 galera:2 promotion score on galera-bundle-2: 100 -pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-1: 0 -pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501 -pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501 -pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 -pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 -pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 -pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-0: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-1: 0 -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY -pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY -pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 -pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501 -pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 +pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-1: 0 +pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501 +pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501 +pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 +pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 +pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 +pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY +pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-0: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-1: 0 +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501 +pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501 +pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501 pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-1: -INFINITY pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-2: -INFINITY diff --git a/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary b/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary index 7bd3b49..7388644 100644 --- a/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary +++ b/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary @@ -11,7 +11,7 @@ Current cluster status: Transition Summary: * Move myclone:0 ( f20node1 -> f20node2 ) - * Move vm ( f20node1 -> f20node2 ) due to unrunnable myclone-clone stop + * Move vm ( f20node1 -> f20node2 ) due to unmigrateable myclone-clone stop Executing Cluster Transition: * Resource action: myclone monitor on f20node2 diff --git a/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary b/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary index 2c88bc3..2a755e1 100644 --- a/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary +++ b/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary @@ -8,7 +8,7 @@ Current cluster status: Transition Summary: * Move A ( 18node1 -> 18node2 ) - * Move B ( 18node2 -> 18node1 ) due to unrunnable A stop + * Move B ( 18node2 -> 18node1 ) due to unmigrateable A stop Executing Cluster Transition: * Resource action: B stop on 18node2 diff --git a/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary b/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary index 0d0c7ff..92eecaf 100644 --- a/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary +++ b/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary @@ -11,7 +11,7 @@ Current cluster status: Transition Summary: * Migrate A ( 18node1 -> 18node2 ) * Move B ( 18node1 -> 18node2 ) - * Move C ( 18node1 -> 18node2 ) due to unrunnable B stop + * Move C ( 18node1 -> 18node2 ) due to unmigrateable B stop Executing Cluster Transition: * Pseudo action: thegroup_stop_0 diff --git a/cts/scheduler/summary/bundle-interleave-start.summary b/cts/scheduler/summary/bundle-interleave-start.summary index 1648e92..5a59847 100644 --- a/cts/scheduler/summary/bundle-interleave-start.summary +++ b/cts/scheduler/summary/bundle-interleave-start.summary @@ -14,24 +14,24 @@ Current cluster status: * app-bundle-2 (ocf:pacemaker:Stateful): Stopped Transition Summary: - * Start base-bundle-podman-0 ( node2 ) - * Start base-bundle-0 ( node2 ) - * Start base:0 ( base-bundle-0 ) - * Start base-bundle-podman-1 ( node3 ) - * Start base-bundle-1 ( node3 ) - * Start base:1 ( base-bundle-1 ) - * Start base-bundle-podman-2 ( node4 ) - * Start base-bundle-2 ( node4 ) - * Start base:2 ( base-bundle-2 ) - * Start app-bundle-podman-0 ( node2 ) - * Start app-bundle-0 ( node2 ) - * Start app:0 ( app-bundle-0 ) - * Start app-bundle-podman-1 ( node3 ) - * Start app-bundle-1 ( node3 ) - * Start app:1 ( app-bundle-1 ) - * Start app-bundle-podman-2 ( node4 ) - * Start app-bundle-2 ( node4 ) - * Start app:2 ( app-bundle-2 ) + * Start base-bundle-podman-0 ( node2 ) + * Start base-bundle-0 ( node2 ) + * Start base:0 ( base-bundle-0 ) + * Start base-bundle-podman-1 ( node3 ) + * Start base-bundle-1 ( node3 ) + * Start base:1 ( base-bundle-1 ) + * Start base-bundle-podman-2 ( node4 ) + * Start base-bundle-2 ( node4 ) + * Promote base:2 ( Stopped -> Promoted base-bundle-2 ) + * Start app-bundle-podman-0 ( node2 ) + * Start app-bundle-0 ( node2 ) + * Start app:0 ( app-bundle-0 ) + * Start app-bundle-podman-1 ( node3 ) + * Start app-bundle-1 ( node3 ) + * Start app:1 ( app-bundle-1 ) + * Start app-bundle-podman-2 ( node4 ) + * Start app-bundle-2 ( node4 ) + * Promote app:2 ( Stopped -> Promoted app-bundle-2 ) Executing Cluster Transition: * Resource action: base-bundle-podman-0 monitor on node5 @@ -91,17 +91,18 @@ Executing Cluster Transition: * Resource action: base-bundle-podman-2 monitor=60000 on node4 * Resource action: base-bundle-2 start on node4 * Resource action: base:0 start on base-bundle-0 - * Resource action: base:1 start on base-bundle-1 - * Resource action: base:2 start on base-bundle-2 - * Pseudo action: base-bundle-clone_running_0 * Resource action: base-bundle-0 monitor=30000 on node2 * Resource action: base-bundle-1 monitor=30000 on node3 * Resource action: base-bundle-2 monitor=30000 on node4 - * Pseudo action: base-bundle_running_0 + * Resource action: base:1 start on base-bundle-1 * Resource action: base:0 monitor=16000 on base-bundle-0 + * Resource action: base:2 start on base-bundle-2 * Resource action: base:1 monitor=16000 on base-bundle-1 - * Resource action: base:2 monitor=16000 on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Pseudo action: base-bundle_running_0 * Pseudo action: app-bundle_start_0 + * Pseudo action: base-bundle_promote_0 + * Pseudo action: base-bundle-clone_promote_0 * Pseudo action: app-bundle-clone_start_0 * Resource action: app-bundle-podman-0 start on node2 * Resource action: app-bundle-0 monitor on node5 @@ -121,23 +122,32 @@ Executing Cluster Transition: * Resource action: app-bundle-2 monitor on node3 * Resource action: app-bundle-2 monitor on node2 * Resource action: app-bundle-2 monitor on node1 + * Resource action: base:2 promote on base-bundle-2 + * Pseudo action: base-bundle-clone_promoted_0 * Resource action: app-bundle-podman-0 monitor=60000 on node2 * Resource action: app-bundle-0 start on node2 * Resource action: app-bundle-podman-1 monitor=60000 on node3 * Resource action: app-bundle-1 start on node3 * Resource action: app-bundle-podman-2 monitor=60000 on node4 * Resource action: app-bundle-2 start on node4 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base:2 monitor=15000 on base-bundle-2 * Resource action: app:0 start on app-bundle-0 - * Resource action: app:1 start on app-bundle-1 - * Resource action: app:2 start on app-bundle-2 - * Pseudo action: app-bundle-clone_running_0 * Resource action: app-bundle-0 monitor=30000 on node2 * Resource action: app-bundle-1 monitor=30000 on node3 * Resource action: app-bundle-2 monitor=30000 on node4 - * Pseudo action: app-bundle_running_0 + * Resource action: app:1 start on app-bundle-1 * Resource action: app:0 monitor=16000 on app-bundle-0 + * Resource action: app:2 start on app-bundle-2 * Resource action: app:1 monitor=16000 on app-bundle-1 - * Resource action: app:2 monitor=16000 on app-bundle-2 + * Pseudo action: app-bundle-clone_running_0 + * Pseudo action: app-bundle_running_0 + * Pseudo action: app-bundle_promote_0 + * Pseudo action: app-bundle-clone_promote_0 + * Resource action: app:2 promote on app-bundle-2 + * Pseudo action: app-bundle-clone_promoted_0 + * Pseudo action: app-bundle_promoted_0 + * Resource action: app:2 monitor=15000 on app-bundle-2 Revised Cluster Status: * Node List: @@ -149,8 +159,8 @@ Revised Cluster Status: * Container bundle set: base-bundle [localhost/pcmktest:base]: * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 * Container bundle set: app-bundle [localhost/pcmktest:app]: * app-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2 * app-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3 - * app-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4 + * app-bundle-2 (ocf:pacemaker:Stateful): Promoted node4 diff --git a/cts/scheduler/summary/bundle-order-fencing.summary b/cts/scheduler/summary/bundle-order-fencing.summary index e3a25c2..4088c15 100644 --- a/cts/scheduler/summary/bundle-order-fencing.summary +++ b/cts/scheduler/summary/bundle-order-fencing.summary @@ -145,6 +145,7 @@ Executing Cluster Transition: * Pseudo action: galera-bundle_stopped_0 * Resource action: rabbitmq notify on rabbitmq-bundle-1 * Resource action: rabbitmq notify on rabbitmq-bundle-2 + * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Pseudo action: galera-bundle-master_running_0 @@ -155,7 +156,6 @@ Executing Cluster Transition: * Pseudo action: redis-bundle-docker-0_stop_0 * Pseudo action: galera-bundle_running_0 * Pseudo action: rabbitmq-bundle_stopped_0 - * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Pseudo action: redis_stop_0 @@ -165,11 +165,11 @@ Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 + * Pseudo action: redis_notified_0 * Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0 * Pseudo action: redis-bundle-master_pre_notify_start_0 * Pseudo action: redis-bundle_stopped_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 - * Pseudo action: redis_notified_0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: redis-bundle-master_start_0 * Pseudo action: rabbitmq-bundle_running_0 diff --git a/cts/scheduler/summary/bundle-order-stop-on-remote.summary b/cts/scheduler/summary/bundle-order-stop-on-remote.summary index 5e2e367..612e701 100644 --- a/cts/scheduler/summary/bundle-order-stop-on-remote.summary +++ b/cts/scheduler/summary/bundle-order-stop-on-remote.summary @@ -140,8 +140,8 @@ Executing Cluster Transition: * Resource action: galera-bundle-docker-2 monitor=60000 on database-2 * Resource action: galera-bundle-2 start on controller-1 * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 + * Resource action: redis notify on redis-bundle-1 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Resource action: galera start on galera-bundle-0 @@ -153,8 +153,8 @@ Executing Cluster Transition: * Pseudo action: redis-bundle_promote_0 * Pseudo action: galera-bundle_running_0 * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 + * Resource action: redis notify on redis-bundle-1 * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Pseudo action: galera-bundle_promote_0 @@ -169,8 +169,8 @@ Executing Cluster Transition: * Resource action: galera monitor=10000 on galera-bundle-0 * Resource action: galera monitor=10000 on galera-bundle-2 * Resource action: redis notify on redis-bundle-0 - * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 + * Resource action: redis notify on redis-bundle-1 * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: redis-bundle_promoted_0 * Resource action: redis monitor=20000 on redis-bundle-0 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary new file mode 100644 index 0000000..ec6cf2b --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary @@ -0,0 +1,33 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 + +Transition Summary: + * Move vip ( node3 -> node1 ) + +Executing Cluster Transition: + * Resource action: vip stop on node3 + * Resource action: vip start on node1 + * Resource action: vip monitor=10000 on node1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary new file mode 100644 index 0000000..ec6cf2b --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary @@ -0,0 +1,33 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 + +Transition Summary: + * Move vip ( node3 -> node1 ) + +Executing Cluster Transition: + * Resource action: vip stop on node3 + * Resource action: vip start on node1 + * Resource action: vip monitor=10000 on node1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary new file mode 100644 index 0000000..e9db462 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary @@ -0,0 +1,45 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 + +Transition Summary: + * Promote base:1 ( Unpromoted -> Promoted base-bundle-1 ) + * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) + +Executing Cluster Transition: + * Resource action: base cancel=16000 on base-bundle-1 + * Resource action: base cancel=15000 on base-bundle-2 + * Pseudo action: base-bundle_demote_0 + * Pseudo action: base-bundle-clone_demote_0 + * Resource action: base demote on base-bundle-2 + * Pseudo action: base-bundle-clone_demoted_0 + * Pseudo action: base-bundle_demoted_0 + * Pseudo action: base-bundle_promote_0 + * Resource action: base monitor=16000 on base-bundle-2 + * Pseudo action: base-bundle-clone_promote_0 + * Resource action: base promote on base-bundle-1 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base monitor=15000 on base-bundle-1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary new file mode 100644 index 0000000..e9db462 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary @@ -0,0 +1,45 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 + +Transition Summary: + * Promote base:1 ( Unpromoted -> Promoted base-bundle-1 ) + * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) + +Executing Cluster Transition: + * Resource action: base cancel=16000 on base-bundle-1 + * Resource action: base cancel=15000 on base-bundle-2 + * Pseudo action: base-bundle_demote_0 + * Pseudo action: base-bundle-clone_demote_0 + * Resource action: base demote on base-bundle-2 + * Pseudo action: base-bundle-clone_demoted_0 + * Pseudo action: base-bundle_demoted_0 + * Pseudo action: base-bundle_promote_0 + * Resource action: base monitor=16000 on base-bundle-2 + * Pseudo action: base-bundle-clone_promote_0 + * Resource action: base promote on base-bundle-1 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base monitor=15000 on base-bundle-1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary new file mode 100644 index 0000000..c35f2e0 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary @@ -0,0 +1,51 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2 + +Transition Summary: + * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 ) + * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) + +Executing Cluster Transition: + * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 + * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 + * Pseudo action: bundle-a_demote_0 + * Pseudo action: bundle-a-clone_demote_0 + * Resource action: bundle-a-rsc demote on bundle-a-1 + * Pseudo action: bundle-a-clone_demoted_0 + * Pseudo action: bundle-a_demoted_0 + * Pseudo action: bundle-a_promote_0 + * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 + * Pseudo action: bundle-a-clone_promote_0 + * Resource action: bundle-a-rsc promote on bundle-a-2 + * Pseudo action: bundle-a-clone_promoted_0 + * Pseudo action: bundle-a_promoted_0 + * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2 diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary new file mode 100644 index 0000000..c35f2e0 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary @@ -0,0 +1,51 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2 + +Transition Summary: + * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 ) + * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) + +Executing Cluster Transition: + * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 + * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 + * Pseudo action: bundle-a_demote_0 + * Pseudo action: bundle-a-clone_demote_0 + * Resource action: bundle-a-rsc demote on bundle-a-1 + * Pseudo action: bundle-a-clone_demoted_0 + * Pseudo action: bundle-a_demoted_0 + * Pseudo action: bundle-a_promote_0 + * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 + * Pseudo action: bundle-a-clone_promote_0 + * Resource action: bundle-a-rsc promote on bundle-a-2 + * Pseudo action: bundle-a-clone_promoted_0 + * Pseudo action: bundle-a_promoted_0 + * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-1.summary b/cts/scheduler/summary/bundle-promoted-colocation-1.summary new file mode 100644 index 0000000..61cc974 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-colocation-1.summary @@ -0,0 +1,33 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 + +Transition Summary: + * Move vip ( node1 -> node3 ) + +Executing Cluster Transition: + * Resource action: vip stop on node1 + * Resource action: vip start on node3 + * Resource action: vip monitor=10000 on node3 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-2.summary b/cts/scheduler/summary/bundle-promoted-colocation-2.summary new file mode 100644 index 0000000..61cc974 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-colocation-2.summary @@ -0,0 +1,33 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 + +Transition Summary: + * Move vip ( node1 -> node3 ) + +Executing Cluster Transition: + * Resource action: vip stop on node1 + * Resource action: vip start on node3 + * Resource action: vip monitor=10000 on node3 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node3 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-3.summary b/cts/scheduler/summary/bundle-promoted-colocation-3.summary new file mode 100644 index 0000000..64b4157 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-colocation-3.summary @@ -0,0 +1,45 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 + +Transition Summary: + * Promote base:0 ( Unpromoted -> Promoted base-bundle-0 ) + * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) + +Executing Cluster Transition: + * Resource action: base cancel=16000 on base-bundle-0 + * Resource action: base cancel=15000 on base-bundle-2 + * Pseudo action: base-bundle_demote_0 + * Pseudo action: base-bundle-clone_demote_0 + * Resource action: base demote on base-bundle-2 + * Pseudo action: base-bundle-clone_demoted_0 + * Pseudo action: base-bundle_demoted_0 + * Pseudo action: base-bundle_promote_0 + * Resource action: base monitor=16000 on base-bundle-2 + * Pseudo action: base-bundle-clone_promote_0 + * Resource action: base promote on base-bundle-0 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base monitor=15000 on base-bundle-0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-4.summary b/cts/scheduler/summary/bundle-promoted-colocation-4.summary new file mode 100644 index 0000000..64b4157 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-colocation-4.summary @@ -0,0 +1,45 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 + +Transition Summary: + * Promote base:0 ( Unpromoted -> Promoted base-bundle-0 ) + * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 ) + +Executing Cluster Transition: + * Resource action: base cancel=16000 on base-bundle-0 + * Resource action: base cancel=15000 on base-bundle-2 + * Pseudo action: base-bundle_demote_0 + * Pseudo action: base-bundle-clone_demote_0 + * Resource action: base demote on base-bundle-2 + * Pseudo action: base-bundle-clone_demoted_0 + * Pseudo action: base-bundle_demoted_0 + * Pseudo action: base-bundle_promote_0 + * Resource action: base monitor=16000 on base-bundle-2 + * Pseudo action: base-bundle-clone_promote_0 + * Resource action: base promote on base-bundle-0 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base monitor=15000 on base-bundle-0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node1 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3 + * vip (ocf:heartbeat:IPaddr2): Started node1 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-5.summary b/cts/scheduler/summary/bundle-promoted-colocation-5.summary new file mode 100644 index 0000000..dbcf940 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-colocation-5.summary @@ -0,0 +1,51 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2 + +Transition Summary: + * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 ) + * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) + +Executing Cluster Transition: + * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 + * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 + * Pseudo action: bundle-a_demote_0 + * Pseudo action: bundle-a-clone_demote_0 + * Resource action: bundle-a-rsc demote on bundle-a-1 + * Pseudo action: bundle-a-clone_demoted_0 + * Pseudo action: bundle-a_demoted_0 + * Pseudo action: bundle-a_promote_0 + * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 + * Pseudo action: bundle-a-clone_promote_0 + * Resource action: bundle-a-rsc promote on bundle-a-2 + * Pseudo action: bundle-a-clone_promoted_0 + * Pseudo action: bundle-a_promoted_0 + * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2 diff --git a/cts/scheduler/summary/bundle-promoted-colocation-6.summary b/cts/scheduler/summary/bundle-promoted-colocation-6.summary new file mode 100644 index 0000000..dbcf940 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-colocation-6.summary @@ -0,0 +1,51 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2 + +Transition Summary: + * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 ) + * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 ) + +Executing Cluster Transition: + * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2 + * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1 + * Pseudo action: bundle-a_demote_0 + * Pseudo action: bundle-a-clone_demote_0 + * Resource action: bundle-a-rsc demote on bundle-a-1 + * Pseudo action: bundle-a-clone_demoted_0 + * Pseudo action: bundle-a_demoted_0 + * Pseudo action: bundle-a_promote_0 + * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1 + * Pseudo action: bundle-a-clone_promote_0 + * Resource action: bundle-a-rsc promote on bundle-a-2 + * Pseudo action: bundle-a-clone_promoted_0 + * Pseudo action: bundle-a_promoted_0 + * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: bundle-a [localhost/pcmktest]: + * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2 + * Container bundle set: bundle-b [localhost/pcmktest]: + * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1 + * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3 + * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2 diff --git a/cts/scheduler/summary/bundle-promoted-location-1.summary b/cts/scheduler/summary/bundle-promoted-location-1.summary new file mode 100644 index 0000000..4c0a0ab --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-location-1.summary @@ -0,0 +1,27 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 + +Transition Summary: + +Executing Cluster Transition: + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/bundle-promoted-location-2.summary b/cts/scheduler/summary/bundle-promoted-location-2.summary new file mode 100644 index 0000000..bd3b3a9 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-location-2.summary @@ -0,0 +1,54 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 + +Transition Summary: + * Stop base-bundle-podman-0 ( node3 ) due to node availability + * Stop base-bundle-0 ( node3 ) due to unrunnable base-bundle-podman-0 start + * Stop base:0 ( Promoted base-bundle-0 ) due to unrunnable base-bundle-podman-0 start + * Promote base:1 ( Unpromoted -> Promoted base-bundle-1 ) + +Executing Cluster Transition: + * Resource action: base cancel=16000 on base-bundle-1 + * Resource action: base cancel=15000 on base-bundle-0 + * Pseudo action: base-bundle_demote_0 + * Pseudo action: base-bundle-clone_demote_0 + * Resource action: base demote on base-bundle-0 + * Pseudo action: base-bundle-clone_demoted_0 + * Pseudo action: base-bundle_demoted_0 + * Pseudo action: base-bundle_stop_0 + * Pseudo action: base-bundle-clone_stop_0 + * Resource action: base stop on base-bundle-0 + * Pseudo action: base-bundle-clone_stopped_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-0 stop on node3 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-podman-0 stop on node3 + * Pseudo action: base-bundle_stopped_0 + * Pseudo action: base-bundle_running_0 + * Pseudo action: base-bundle_promote_0 + * Pseudo action: base-bundle-clone_promote_0 + * Resource action: base promote on base-bundle-1 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base monitor=15000 on base-bundle-1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Stopped + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/bundle-promoted-location-3.summary b/cts/scheduler/summary/bundle-promoted-location-3.summary new file mode 100644 index 0000000..4c0a0ab --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-location-3.summary @@ -0,0 +1,27 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 + +Transition Summary: + +Executing Cluster Transition: + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/bundle-promoted-location-4.summary b/cts/scheduler/summary/bundle-promoted-location-4.summary new file mode 100644 index 0000000..4c0a0ab --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-location-4.summary @@ -0,0 +1,27 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 + +Transition Summary: + +Executing Cluster Transition: + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/bundle-promoted-location-5.summary b/cts/scheduler/summary/bundle-promoted-location-5.summary new file mode 100644 index 0000000..4c0a0ab --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-location-5.summary @@ -0,0 +1,27 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 + +Transition Summary: + +Executing Cluster Transition: + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/bundle-promoted-location-6.summary b/cts/scheduler/summary/bundle-promoted-location-6.summary new file mode 100644 index 0000000..5e1cce2 --- /dev/null +++ b/cts/scheduler/summary/bundle-promoted-location-6.summary @@ -0,0 +1,40 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 + +Transition Summary: + * Stop base-bundle-podman-1 ( node2 ) due to node availability + * Stop base-bundle-1 ( node2 ) due to unrunnable base-bundle-podman-1 start + * Stop base:1 ( Unpromoted base-bundle-1 ) due to unrunnable base-bundle-podman-1 start + +Executing Cluster Transition: + * Pseudo action: base-bundle_stop_0 + * Pseudo action: base-bundle-clone_stop_0 + * Resource action: base stop on base-bundle-1 + * Pseudo action: base-bundle-clone_stopped_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-1 stop on node2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-podman-1 stop on node2 + * Pseudo action: base-bundle_stopped_0 + * Pseudo action: base-bundle_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Stopped + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary index 7726876..945f3c8 100644 --- a/cts/scheduler/summary/cancel-behind-moving-remote.summary +++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary @@ -58,22 +58,18 @@ Current cluster status: Transition Summary: * Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) * Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) - * Start ovn-dbs-bundle-podman-0 ( controller-2 ) - * Start ovn-dbs-bundle-0 ( controller-2 ) + * Start ovn-dbs-bundle-podman-0 ( controller-0 ) + * Start ovn-dbs-bundle-0 ( controller-0 ) * Start ovndb_servers:0 ( ovn-dbs-bundle-0 ) - * Move ovn-dbs-bundle-podman-1 ( controller-2 -> controller-0 ) - * Move ovn-dbs-bundle-1 ( controller-2 -> controller-0 ) - * Restart ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start - * Start ip-172.17.1.87 ( controller-0 ) + * Promote ovndb_servers:2 ( Unpromoted -> Promoted ovn-dbs-bundle-2 ) + * Start ip-172.17.1.87 ( controller-1 ) * Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 ) * Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 ) Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 - * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 - * Cluster action: clear_failcount for ovn-dbs-bundle-0 on controller-0 - * Cluster action: clear_failcount for ovn-dbs-bundle-1 on controller-2 + * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-2 + * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 * Cluster action: clear_failcount for nova-evacuate on messaging-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0 @@ -87,71 +83,53 @@ Executing Cluster Transition: * Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2 - * Pseudo action: ovn-dbs-bundle_stop_0 + * Pseudo action: ovn-dbs-bundle_start_0 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 - * Pseudo action: ovn-dbs-bundle-master_stop_0 + * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 + * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 + * Pseudo action: ovn-dbs-bundle-master_start_0 + * Resource action: ovn-dbs-bundle-podman-0 start on controller-0 + * Resource action: ovn-dbs-bundle-0 start on controller-0 * Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0 * Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2 * Pseudo action: rabbitmq-bundle-clone_running_0 - * Resource action: ovndb_servers stop on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_stopped_0 - * Resource action: ovn-dbs-bundle-1 stop on controller-2 + * Resource action: ovndb_servers start on ovn-dbs-bundle-0 + * Pseudo action: ovn-dbs-bundle-master_running_0 + * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-0 + * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-0 * Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0 * Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2 * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0 - * Resource action: ovn-dbs-bundle-podman-1 stop on controller-2 + * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0 - * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 - * Pseudo action: ovn-dbs-bundle_stopped_0 - * Pseudo action: ovn-dbs-bundle_start_0 - * Pseudo action: rabbitmq-bundle_running_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 - * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 - * Pseudo action: ovn-dbs-bundle-master_start_0 - * Resource action: ovn-dbs-bundle-podman-0 start on controller-2 - * Resource action: ovn-dbs-bundle-0 start on controller-2 - * Resource action: ovn-dbs-bundle-podman-1 start on controller-0 - * Resource action: ovn-dbs-bundle-1 start on controller-0 - * Resource action: ovndb_servers start on ovn-dbs-bundle-0 - * Resource action: ovndb_servers start on ovn-dbs-bundle-1 - * Pseudo action: ovn-dbs-bundle-master_running_0 - * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-2 - * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-2 - * Resource action: ovn-dbs-bundle-podman-1 monitor=60000 on controller-0 - * Resource action: ovn-dbs-bundle-1 monitor=30000 on controller-0 - * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 * Pseudo action: ovn-dbs-bundle_running_0 + * Pseudo action: rabbitmq-bundle_running_0 * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle_promote_0 + * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle-master_promote_0 - * Resource action: ip-172.17.1.87 start on controller-0 - * Resource action: ovndb_servers promote on ovn-dbs-bundle-1 + * Resource action: ip-172.17.1.87 start on controller-1 + * Resource action: ovndb_servers promote on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_promoted_0 - * Resource action: ip-172.17.1.87 monitor=10000 on controller-0 + * Resource action: ip-172.17.1.87 monitor=10000 on controller-1 * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 + * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: ovn-dbs-bundle_promoted_0 + * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-2 * Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0 - * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1 Using the original execution date of: 2021-02-15 01:40:51Z Revised Cluster Status: @@ -187,10 +165,10 @@ Revised Cluster Status: * haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0 * haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: - * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-2 - * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-0 - * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1 - * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-0 + * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0 + * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-2 + * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Promoted controller-1 + * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-1 * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ compute-0 compute-1 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-1.summary b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary new file mode 100644 index 0000000..0b6866e --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary @@ -0,0 +1,29 @@ +Using the original execution date of: 2023-06-21 00:59:59Z +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start dummy:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy start on node1 + * Pseudo action: dummy-clone_running_0 + * Resource action: dummy monitor=10000 on node1 +Using the original execution date of: 2023-06-21 00:59:59Z + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-10.summary b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary new file mode 100644 index 0000000..5b0f9b6 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary @@ -0,0 +1,29 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start dummy:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy start on node1 + * Pseudo action: dummy-clone_running_0 + * Resource action: dummy monitor=11000 on node1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node1 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-11.summary b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary new file mode 100644 index 0000000..e0bdb61 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary @@ -0,0 +1,34 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start rsc1:2 ( node1 ) + * Start rsc2:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_running_0 + * Resource action: rsc1 monitor=11000 on node1 + * Resource action: rsc2 monitor=11000 on node1 + * Pseudo action: grp-clone_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node1 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-12.summary b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary new file mode 100644 index 0000000..6e55a0b --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary @@ -0,0 +1,43 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Start base:2 ( base-bundle-2 ) + +Executing Cluster Transition: + * Pseudo action: base-bundle_start_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-2 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + * Resource action: base monitor=16000 on base-bundle-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1 diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-2.summary b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary new file mode 100644 index 0000000..8b18120 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary @@ -0,0 +1,32 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start rsc1:2 ( node1 ) + * Start rsc2:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_running_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp-clone_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-3.summary b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary new file mode 100644 index 0000000..5702177 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary @@ -0,0 +1,42 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Start base:2 ( base-bundle-2 ) + +Executing Cluster Transition: + * Pseudo action: base-bundle_start_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-2 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary new file mode 100644 index 0000000..0b6866e --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary @@ -0,0 +1,29 @@ +Using the original execution date of: 2023-06-21 00:59:59Z +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start dummy:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy start on node1 + * Pseudo action: dummy-clone_running_0 + * Resource action: dummy monitor=10000 on node1 +Using the original execution date of: 2023-06-21 00:59:59Z + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy]: + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary new file mode 100644 index 0000000..8b18120 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary @@ -0,0 +1,32 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node2 node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Start rsc1:2 ( node1 ) + * Start rsc2:2 ( node1 ) + +Executing Cluster Transition: + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_running_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp-clone_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp]: + * Started: [ node1 node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary new file mode 100644 index 0000000..5702177 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary @@ -0,0 +1,42 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Start base:2 ( base-bundle-2 ) + +Executing Cluster Transition: + * Pseudo action: base-bundle_start_0 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-2 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Started node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Started node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Started node1 diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary new file mode 100644 index 0000000..7744570 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary @@ -0,0 +1,38 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Demote dummy:1 ( Promoted -> Unpromoted node2 ) + * Promote dummy:2 ( Stopped -> Promoted node1 ) + +Executing Cluster Transition: + * Resource action: dummy cancel=10000 on node2 + * Pseudo action: dummy-clone_demote_0 + * Resource action: dummy demote on node2 + * Pseudo action: dummy-clone_demoted_0 + * Pseudo action: dummy-clone_start_0 + * Resource action: dummy monitor=11000 on node2 + * Resource action: dummy start on node1 + * Pseudo action: dummy-clone_running_0 + * Pseudo action: dummy-clone_promote_0 + * Resource action: dummy promote on node1 + * Pseudo action: dummy-clone_promoted_0 + * Resource action: dummy monitor=10000 on node1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: dummy-clone [dummy] (promotable): + * Promoted: [ node1 ] + * Unpromoted: [ node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-8.summary b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary new file mode 100644 index 0000000..878f248 --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary @@ -0,0 +1,52 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node2 ] + * Unpromoted: [ node3 ] + * Stopped: [ node1 ] + +Transition Summary: + * Demote rsc1:1 ( Promoted -> Unpromoted node2 ) + * Demote rsc2:1 ( Promoted -> Unpromoted node2 ) + * Promote rsc1:2 ( Stopped -> Promoted node1 ) + * Promote rsc2:2 ( Stopped -> Promoted node1 ) + +Executing Cluster Transition: + * Resource action: rsc1 cancel=10000 on node2 + * Resource action: rsc2 cancel=10000 on node2 + * Pseudo action: grp-clone_demote_0 + * Pseudo action: grp:1_demote_0 + * Resource action: rsc2 demote on node2 + * Resource action: rsc1 demote on node2 + * Resource action: rsc2 monitor=11000 on node2 + * Pseudo action: grp:1_demoted_0 + * Resource action: rsc1 monitor=11000 on node2 + * Pseudo action: grp-clone_demoted_0 + * Pseudo action: grp-clone_start_0 + * Pseudo action: grp:2_start_0 + * Resource action: rsc1 start on node1 + * Resource action: rsc2 start on node1 + * Pseudo action: grp:2_running_0 + * Pseudo action: grp-clone_running_0 + * Pseudo action: grp-clone_promote_0 + * Pseudo action: grp:2_promote_0 + * Resource action: rsc1 promote on node1 + * Resource action: rsc2 promote on node1 + * Pseudo action: grp:2_promoted_0 + * Resource action: rsc1 monitor=10000 on node1 + * Resource action: rsc2 monitor=10000 on node1 + * Pseudo action: grp-clone_promoted_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Clone Set: grp-clone [grp] (promotable): + * Promoted: [ node1 ] + * Unpromoted: [ node2 node3 ] diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-9.summary b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary new file mode 100644 index 0000000..7ede39a --- /dev/null +++ b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary @@ -0,0 +1,56 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Stopped + +Transition Summary: + * Demote base:1 ( Promoted -> Unpromoted base-bundle-1 ) + * Start base-bundle-podman-2 ( node1 ) + * Start base-bundle-2 ( node1 ) + * Promote base:2 ( Stopped -> Promoted base-bundle-2 ) + +Executing Cluster Transition: + * Resource action: base cancel=15000 on base-bundle-1 + * Pseudo action: base-bundle_demote_0 + * Pseudo action: base-bundle-clone_demote_0 + * Resource action: base demote on base-bundle-1 + * Pseudo action: base-bundle-clone_demoted_0 + * Pseudo action: base-bundle_demoted_0 + * Pseudo action: base-bundle_start_0 + * Resource action: base monitor=16000 on base-bundle-1 + * Pseudo action: base-bundle-clone_start_0 + * Resource action: base-bundle-podman-2 start on node1 + * Resource action: base-bundle-2 monitor on node3 + * Resource action: base-bundle-2 monitor on node2 + * Resource action: base-bundle-2 monitor on node1 + * Resource action: base-bundle-podman-2 monitor=60000 on node1 + * Resource action: base-bundle-2 start on node1 + * Resource action: base start on base-bundle-2 + * Pseudo action: base-bundle-clone_running_0 + * Resource action: base-bundle-2 monitor=30000 on node1 + * Pseudo action: base-bundle_running_0 + * Pseudo action: base-bundle_promote_0 + * Pseudo action: base-bundle-clone_promote_0 + * Resource action: base promote on base-bundle-2 + * Pseudo action: base-bundle-clone_promoted_0 + * Pseudo action: base-bundle_promoted_0 + * Resource action: base monitor=15000 on base-bundle-2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node2 + * Container bundle set: base-bundle [localhost/pcmktest]: + * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3 + * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2 + * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node1 diff --git a/cts/scheduler/summary/coloc-with-inner-group-member.summary b/cts/scheduler/summary/coloc-with-inner-group-member.summary new file mode 100644 index 0000000..6659721 --- /dev/null +++ b/cts/scheduler/summary/coloc-with-inner-group-member.summary @@ -0,0 +1,45 @@ +Using the original execution date of: 2023-06-20 20:45:06Z +Current cluster status: + * Node List: + * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started rhel8-1 + * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3 + * Resource Group: grp: + * foo (ocf:pacemaker:Dummy): Started rhel8-4 + * bar (ocf:pacemaker:Dummy): Started rhel8-4 + * vip (ocf:pacemaker:Dummy): Started rhel8-3 + +Transition Summary: + * Move foo ( rhel8-4 -> rhel8-3 ) + * Move bar ( rhel8-4 -> rhel8-3 ) + * Restart vip ( rhel8-3 ) due to required bar start + +Executing Cluster Transition: + * Pseudo action: grp_stop_0 + * Resource action: vip stop on rhel8-3 + * Resource action: bar stop on rhel8-4 + * Resource action: foo stop on rhel8-4 + * Pseudo action: grp_stopped_0 + * Pseudo action: grp_start_0 + * Resource action: foo start on rhel8-3 + * Resource action: bar start on rhel8-3 + * Resource action: vip start on rhel8-3 + * Resource action: vip monitor=10000 on rhel8-3 + * Pseudo action: grp_running_0 + * Resource action: foo monitor=10000 on rhel8-3 + * Resource action: bar monitor=10000 on rhel8-3 +Using the original execution date of: 2023-06-20 20:45:06Z + +Revised Cluster Status: + * Node List: + * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started rhel8-1 + * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3 + * Resource Group: grp: + * foo (ocf:pacemaker:Dummy): Started rhel8-3 + * bar (ocf:pacemaker:Dummy): Started rhel8-3 + * vip (ocf:pacemaker:Dummy): Started rhel8-3 diff --git a/cts/scheduler/summary/group-anticolocation-2.summary b/cts/scheduler/summary/group-anticolocation-2.summary new file mode 100644 index 0000000..3ecb056 --- /dev/null +++ b/cts/scheduler/summary/group-anticolocation-2.summary @@ -0,0 +1,41 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 + +Transition Summary: + * Move member2a ( node1 -> node2 ) + * Recover member2b ( node1 -> node2 ) + +Executing Cluster Transition: + * Pseudo action: group2_stop_0 + * Resource action: member2b stop on node1 + * Resource action: member2a stop on node1 + * Pseudo action: group2_stopped_0 + * Pseudo action: group2_start_0 + * Resource action: member2a start on node2 + * Resource action: member2b start on node2 + * Pseudo action: group2_running_0 + * Resource action: member2a monitor=10000 on node2 + * Resource action: member2b monitor=10000 on node2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node2 + * member2b (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/group-anticolocation-3.summary b/cts/scheduler/summary/group-anticolocation-3.summary new file mode 100644 index 0000000..c9d4321 --- /dev/null +++ b/cts/scheduler/summary/group-anticolocation-3.summary @@ -0,0 +1,33 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 + +Transition Summary: + * Stop member2b ( node1 ) due to node availability + +Executing Cluster Transition: + * Pseudo action: group2_stop_0 + * Resource action: member2b stop on node1 + * Pseudo action: group2_stopped_0 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/group-anticolocation-4.summary b/cts/scheduler/summary/group-anticolocation-4.summary new file mode 100644 index 0000000..3ecb056 --- /dev/null +++ b/cts/scheduler/summary/group-anticolocation-4.summary @@ -0,0 +1,41 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 + +Transition Summary: + * Move member2a ( node1 -> node2 ) + * Recover member2b ( node1 -> node2 ) + +Executing Cluster Transition: + * Pseudo action: group2_stop_0 + * Resource action: member2b stop on node1 + * Resource action: member2a stop on node1 + * Pseudo action: group2_stopped_0 + * Pseudo action: group2_start_0 + * Resource action: member2a start on node2 + * Resource action: member2b start on node2 + * Pseudo action: group2_running_0 + * Resource action: member2a monitor=10000 on node2 + * Resource action: member2b monitor=10000 on node2 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node2 + * member2b (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/group-anticolocation-5.summary b/cts/scheduler/summary/group-anticolocation-5.summary new file mode 100644 index 0000000..6f83538 --- /dev/null +++ b/cts/scheduler/summary/group-anticolocation-5.summary @@ -0,0 +1,41 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node1 + * member2b (ocf:pacemaker:Dummy): FAILED node1 + +Transition Summary: + * Move member2a ( node1 -> node3 ) + * Recover member2b ( node1 -> node3 ) + +Executing Cluster Transition: + * Pseudo action: group2_stop_0 + * Resource action: member2b stop on node1 + * Resource action: member2a stop on node1 + * Pseudo action: group2_stopped_0 + * Pseudo action: group2_start_0 + * Resource action: member2a start on node3 + * Resource action: member2b start on node3 + * Pseudo action: group2_running_0 + * Resource action: member2a monitor=10000 on node3 + * Resource action: member2b monitor=10000 on node3 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Resource Group: group1: + * member1a (ocf:pacemaker:Dummy): Started node2 + * member1b (ocf:pacemaker:Dummy): Started node2 + * Resource Group: group2: + * member2a (ocf:pacemaker:Dummy): Started node3 + * member2b (ocf:pacemaker:Dummy): Started node3 diff --git a/cts/scheduler/summary/group-anticolocation.summary b/cts/scheduler/summary/group-anticolocation.summary index 3ecb056..93d2e73 100644 --- a/cts/scheduler/summary/group-anticolocation.summary +++ b/cts/scheduler/summary/group-anticolocation.summary @@ -12,17 +12,29 @@ Current cluster status: * member2b (ocf:pacemaker:Dummy): FAILED node1 Transition Summary: + * Move member1a ( node2 -> node1 ) + * Move member1b ( node2 -> node1 ) * Move member2a ( node1 -> node2 ) * Recover member2b ( node1 -> node2 ) Executing Cluster Transition: + * Pseudo action: group1_stop_0 + * Resource action: member1b stop on node2 * Pseudo action: group2_stop_0 * Resource action: member2b stop on node1 + * Resource action: member1a stop on node2 * Resource action: member2a stop on node1 + * Pseudo action: group1_stopped_0 + * Pseudo action: group1_start_0 + * Resource action: member1a start on node1 + * Resource action: member1b start on node1 * Pseudo action: group2_stopped_0 * Pseudo action: group2_start_0 * Resource action: member2a start on node2 * Resource action: member2b start on node2 + * Pseudo action: group1_running_0 + * Resource action: member1a monitor=10000 on node1 + * Resource action: member1b monitor=10000 on node1 * Pseudo action: group2_running_0 * Resource action: member2a monitor=10000 on node2 * Resource action: member2b monitor=10000 on node2 @@ -34,8 +46,8 @@ Revised Cluster Status: * Full List of Resources: * Fencing (stonith:fence_xvm): Started node1 * Resource Group: group1: - * member1a (ocf:pacemaker:Dummy): Started node2 - * member1b (ocf:pacemaker:Dummy): Started node2 + * member1a (ocf:pacemaker:Dummy): Started node1 + * member1b (ocf:pacemaker:Dummy): Started node1 * Resource Group: group2: * member2a (ocf:pacemaker:Dummy): Started node2 * member2b (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/migrate-fencing.summary b/cts/scheduler/summary/migrate-fencing.summary index ebc65bd..500c78a 100644 --- a/cts/scheduler/summary/migrate-fencing.summary +++ b/cts/scheduler/summary/migrate-fencing.summary @@ -23,7 +23,7 @@ Current cluster status: * Unpromoted: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: - * Fence (reboot) pcmk-4 'termination was requested' + * Fence (reboot) pcmk-4 'fencing was requested' * Stop FencingChild:0 ( pcmk-4 ) due to node availability * Move r192.168.101.181 ( pcmk-4 -> pcmk-1 ) * Move r192.168.101.182 ( pcmk-4 -> pcmk-1 ) diff --git a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary index c06f8f0..ab8f8ff 100644 --- a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary +++ b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary @@ -37,9 +37,9 @@ Executing Cluster Transition: * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 * Pseudo action: ovn-dbs-bundle_stop_0 - * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 + * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 * Pseudo action: ovn-dbs-bundle-master_stop_0 * Resource action: ovndb_servers stop on ovn-dbs-bundle-0 diff --git a/cts/scheduler/summary/node-pending-timeout.summary b/cts/scheduler/summary/node-pending-timeout.summary new file mode 100644 index 0000000..0fef982 --- /dev/null +++ b/cts/scheduler/summary/node-pending-timeout.summary @@ -0,0 +1,26 @@ +Using the original execution date of: 2023-02-21 12:19:57Z +Current cluster status: + * Node List: + * Node node-2: UNCLEAN (online) + * Online: [ node-1 ] + + * Full List of Resources: + * st-sbd (stonith:external/sbd): Stopped + +Transition Summary: + * Fence (reboot) node-2 'peer pending timed out on joining the process group' + * Start st-sbd ( node-1 ) + +Executing Cluster Transition: + * Resource action: st-sbd monitor on node-1 + * Fencing node-2 (reboot) + * Resource action: st-sbd start on node-1 +Using the original execution date of: 2023-02-21 12:19:57Z + +Revised Cluster Status: + * Node List: + * Online: [ node-1 ] + * OFFLINE: [ node-2 ] + + * Full List of Resources: + * st-sbd (stonith:external/sbd): Started node-1 diff --git a/cts/scheduler/summary/pending-node-no-uname.summary b/cts/scheduler/summary/pending-node-no-uname.summary new file mode 100644 index 0000000..5f04fc6 --- /dev/null +++ b/cts/scheduler/summary/pending-node-no-uname.summary @@ -0,0 +1,23 @@ +Using the original execution date of: 2023-02-21 12:19:57Z +Current cluster status: + * Node List: + * Node node-2: pending + * Online: [ node-1 ] + + * Full List of Resources: + * st-sbd (stonith:external/sbd): Stopped + +Transition Summary: + * Start st-sbd ( node-1 ) blocked + +Executing Cluster Transition: + * Resource action: st-sbd monitor on node-1 +Using the original execution date of: 2023-02-21 12:19:57Z + +Revised Cluster Status: + * Node List: + * Node node-2: pending + * Online: [ node-1 ] + + * Full List of Resources: + * st-sbd (stonith:external/sbd): Stopped diff --git a/cts/scheduler/summary/promoted-ordering.summary b/cts/scheduler/summary/promoted-ordering.summary index 3222e18..0ef1bd8 100644 --- a/cts/scheduler/summary/promoted-ordering.summary +++ b/cts/scheduler/summary/promoted-ordering.summary @@ -9,8 +9,8 @@ Current cluster status: * extip_2 (ocf:heartbeat:IPaddr2): Stopped * Resource Group: group_main: * intip_0_main (ocf:heartbeat:IPaddr2): Stopped - * intip_1_master (ocf:heartbeat:IPaddr2): Stopped - * intip_2_slave (ocf:heartbeat:IPaddr2): Stopped + * intip_1_active (ocf:heartbeat:IPaddr2): Stopped + * intip_2_passive (ocf:heartbeat:IPaddr2): Stopped * Clone Set: ms_drbd_www [drbd_www] (promotable): * Stopped: [ webcluster01 webcluster02 ] * Clone Set: clone_ocfs2_www [ocfs2_www] (unique): @@ -25,8 +25,8 @@ Current cluster status: Transition Summary: * Start extip_1 ( webcluster01 ) * Start extip_2 ( webcluster01 ) - * Start intip_1_master ( webcluster01 ) - * Start intip_2_slave ( webcluster01 ) + * Start intip_1_active ( webcluster01 ) + * Start intip_2_passive ( webcluster01 ) * Start drbd_www:0 ( webcluster01 ) * Start drbd_mysql:0 ( webcluster01 ) @@ -35,8 +35,8 @@ Executing Cluster Transition: * Resource action: extip_1 monitor on webcluster01 * Resource action: extip_2 monitor on webcluster01 * Resource action: intip_0_main monitor on webcluster01 - * Resource action: intip_1_master monitor on webcluster01 - * Resource action: intip_2_slave monitor on webcluster01 + * Resource action: intip_1_active monitor on webcluster01 + * Resource action: intip_2_passive monitor on webcluster01 * Resource action: drbd_www:0 monitor on webcluster01 * Pseudo action: ms_drbd_www_pre_notify_start_0 * Resource action: ocfs2_www:0 monitor on webcluster01 @@ -48,16 +48,16 @@ Executing Cluster Transition: * Resource action: fs_mysql monitor on webcluster01 * Resource action: extip_1 start on webcluster01 * Resource action: extip_2 start on webcluster01 - * Resource action: intip_1_master start on webcluster01 - * Resource action: intip_2_slave start on webcluster01 + * Resource action: intip_1_active start on webcluster01 + * Resource action: intip_2_passive start on webcluster01 * Pseudo action: ms_drbd_www_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_www_start_0 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_mysql_start_0 * Resource action: extip_1 monitor=30000 on webcluster01 * Resource action: extip_2 monitor=30000 on webcluster01 - * Resource action: intip_1_master monitor=30000 on webcluster01 - * Resource action: intip_2_slave monitor=30000 on webcluster01 + * Resource action: intip_1_active monitor=30000 on webcluster01 + * Resource action: intip_2_passive monitor=30000 on webcluster01 * Resource action: drbd_www:0 start on webcluster01 * Pseudo action: ms_drbd_www_running_0 * Resource action: drbd_mysql:0 start on webcluster01 @@ -80,8 +80,8 @@ Revised Cluster Status: * extip_2 (ocf:heartbeat:IPaddr2): Started webcluster01 * Resource Group: group_main: * intip_0_main (ocf:heartbeat:IPaddr2): Stopped - * intip_1_master (ocf:heartbeat:IPaddr2): Started webcluster01 - * intip_2_slave (ocf:heartbeat:IPaddr2): Started webcluster01 + * intip_1_active (ocf:heartbeat:IPaddr2): Started webcluster01 + * intip_2_passive (ocf:heartbeat:IPaddr2): Started webcluster01 * Clone Set: ms_drbd_www [drbd_www] (promotable): * Unpromoted: [ webcluster01 ] * Stopped: [ webcluster02 ] diff --git a/cts/scheduler/summary/promoted-probed-score.summary b/cts/scheduler/summary/promoted-probed-score.summary index 3c9326c..52487d4 100644 --- a/cts/scheduler/summary/promoted-probed-score.summary +++ b/cts/scheduler/summary/promoted-probed-score.summary @@ -39,8 +39,8 @@ Current cluster status: * Proxy (ocf:heartbeat:VirtualDomain): Stopped Transition Summary: - * Promote AdminDrbd:0 ( Stopped -> Promoted hypatia-corosync.nevis.columbia.edu ) - * Promote AdminDrbd:1 ( Stopped -> Promoted orestes-corosync.nevis.columbia.edu ) + * Promote AdminDrbd:0 ( Stopped -> Promoted orestes-corosync.nevis.columbia.edu ) + * Promote AdminDrbd:1 ( Stopped -> Promoted hypatia-corosync.nevis.columbia.edu ) * Start CronAmbientTemperature ( hypatia-corosync.nevis.columbia.edu ) * Start StonithHypatia ( orestes-corosync.nevis.columbia.edu ) * Start StonithOrestes ( hypatia-corosync.nevis.columbia.edu ) @@ -83,18 +83,18 @@ Transition Summary: * Start ExportUsrNevis:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportUsrNevisOffsite:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportWWW:1 ( orestes-corosync.nevis.columbia.edu ) - * Start AdminLvm:0 ( hypatia-corosync.nevis.columbia.edu ) - * Start FSUsrNevis:0 ( hypatia-corosync.nevis.columbia.edu ) - * Start FSVarNevis:0 ( hypatia-corosync.nevis.columbia.edu ) - * Start FSVirtualMachines:0 ( hypatia-corosync.nevis.columbia.edu ) - * Start FSMail:0 ( hypatia-corosync.nevis.columbia.edu ) - * Start FSWork:0 ( hypatia-corosync.nevis.columbia.edu ) - * Start AdminLvm:1 ( orestes-corosync.nevis.columbia.edu ) - * Start FSUsrNevis:1 ( orestes-corosync.nevis.columbia.edu ) - * Start FSVarNevis:1 ( orestes-corosync.nevis.columbia.edu ) - * Start FSVirtualMachines:1 ( orestes-corosync.nevis.columbia.edu ) - * Start FSMail:1 ( orestes-corosync.nevis.columbia.edu ) - * Start FSWork:1 ( orestes-corosync.nevis.columbia.edu ) + * Start AdminLvm:0 ( orestes-corosync.nevis.columbia.edu ) + * Start FSUsrNevis:0 ( orestes-corosync.nevis.columbia.edu ) + * Start FSVarNevis:0 ( orestes-corosync.nevis.columbia.edu ) + * Start FSVirtualMachines:0 ( orestes-corosync.nevis.columbia.edu ) + * Start FSMail:0 ( orestes-corosync.nevis.columbia.edu ) + * Start FSWork:0 ( orestes-corosync.nevis.columbia.edu ) + * Start AdminLvm:1 ( hypatia-corosync.nevis.columbia.edu ) + * Start FSUsrNevis:1 ( hypatia-corosync.nevis.columbia.edu ) + * Start FSVarNevis:1 ( hypatia-corosync.nevis.columbia.edu ) + * Start FSVirtualMachines:1 ( hypatia-corosync.nevis.columbia.edu ) + * Start FSMail:1 ( hypatia-corosync.nevis.columbia.edu ) + * Start FSWork:1 ( hypatia-corosync.nevis.columbia.edu ) * Start KVM-guest ( hypatia-corosync.nevis.columbia.edu ) * Start Proxy ( orestes-corosync.nevis.columbia.edu ) @@ -125,74 +125,74 @@ Executing Cluster Transition: * Resource action: ExportUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportWWW:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: AdminLvm:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSMail:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: FSWork:0 monitor on hypatia-corosync.nevis.columbia.edu - * Resource action: AdminLvm:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSMail:1 monitor on orestes-corosync.nevis.columbia.edu - * Resource action: FSWork:1 monitor on orestes-corosync.nevis.columbia.edu + * Resource action: AdminLvm:0 monitor on orestes-corosync.nevis.columbia.edu + * Resource action: FSUsrNevis:0 monitor on orestes-corosync.nevis.columbia.edu + * Resource action: FSVarNevis:0 monitor on orestes-corosync.nevis.columbia.edu + * Resource action: FSVirtualMachines:0 monitor on orestes-corosync.nevis.columbia.edu + * Resource action: FSMail:0 monitor on orestes-corosync.nevis.columbia.edu + * Resource action: FSWork:0 monitor on orestes-corosync.nevis.columbia.edu + * Resource action: AdminLvm:1 monitor on hypatia-corosync.nevis.columbia.edu + * Resource action: FSUsrNevis:1 monitor on hypatia-corosync.nevis.columbia.edu + * Resource action: FSVarNevis:1 monitor on hypatia-corosync.nevis.columbia.edu + * Resource action: FSVirtualMachines:1 monitor on hypatia-corosync.nevis.columbia.edu + * Resource action: FSMail:1 monitor on hypatia-corosync.nevis.columbia.edu + * Resource action: FSWork:1 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: KVM-guest monitor on orestes-corosync.nevis.columbia.edu * Resource action: KVM-guest monitor on hypatia-corosync.nevis.columbia.edu * Resource action: Proxy monitor on orestes-corosync.nevis.columbia.edu * Resource action: Proxy monitor on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-pre_notify_start_0 * Pseudo action: AdminClone_start_0 - * Resource action: AdminDrbd:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 start on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:0 start on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:1 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_running_0 * Pseudo action: AdminClone_post_notify_running_0 - * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-post_notify_running_0 * Pseudo action: AdminClone_pre_notify_promote_0 - * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-pre_notify_promote_0 * Pseudo action: AdminClone_promote_0 - * Resource action: AdminDrbd:0 promote on hypatia-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 promote on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:0 promote on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:1 promote on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_promoted_0 * Pseudo action: AdminClone_post_notify_promoted_0 - * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-post_notify_promoted_0 * Pseudo action: FilesystemClone_start_0 - * Resource action: AdminDrbd:0 monitor=59000 on hypatia-corosync.nevis.columbia.edu - * Resource action: AdminDrbd:1 monitor=59000 on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:0 monitor=59000 on orestes-corosync.nevis.columbia.edu + * Resource action: AdminDrbd:1 monitor=59000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:0_start_0 - * Resource action: AdminLvm:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSMail:0 start on hypatia-corosync.nevis.columbia.edu - * Resource action: FSWork:0 start on hypatia-corosync.nevis.columbia.edu + * Resource action: AdminLvm:0 start on orestes-corosync.nevis.columbia.edu + * Resource action: FSUsrNevis:0 start on orestes-corosync.nevis.columbia.edu + * Resource action: FSVarNevis:0 start on orestes-corosync.nevis.columbia.edu + * Resource action: FSVirtualMachines:0 start on orestes-corosync.nevis.columbia.edu + * Resource action: FSMail:0 start on orestes-corosync.nevis.columbia.edu + * Resource action: FSWork:0 start on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:1_start_0 - * Resource action: AdminLvm:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSMail:1 start on orestes-corosync.nevis.columbia.edu - * Resource action: FSWork:1 start on orestes-corosync.nevis.columbia.edu + * Resource action: AdminLvm:1 start on hypatia-corosync.nevis.columbia.edu + * Resource action: FSUsrNevis:1 start on hypatia-corosync.nevis.columbia.edu + * Resource action: FSVarNevis:1 start on hypatia-corosync.nevis.columbia.edu + * Resource action: FSVirtualMachines:1 start on hypatia-corosync.nevis.columbia.edu + * Resource action: FSMail:1 start on hypatia-corosync.nevis.columbia.edu + * Resource action: FSWork:1 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:0_running_0 - * Resource action: AdminLvm:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSMail:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu - * Resource action: FSWork:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu + * Resource action: AdminLvm:0 monitor=30000 on orestes-corosync.nevis.columbia.edu + * Resource action: FSUsrNevis:0 monitor=20000 on orestes-corosync.nevis.columbia.edu + * Resource action: FSVarNevis:0 monitor=20000 on orestes-corosync.nevis.columbia.edu + * Resource action: FSVirtualMachines:0 monitor=20000 on orestes-corosync.nevis.columbia.edu + * Resource action: FSMail:0 monitor=20000 on orestes-corosync.nevis.columbia.edu + * Resource action: FSWork:0 monitor=20000 on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:1_running_0 - * Resource action: AdminLvm:1 monitor=30000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSUsrNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSVarNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSVirtualMachines:1 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSMail:1 monitor=20000 on orestes-corosync.nevis.columbia.edu - * Resource action: FSWork:1 monitor=20000 on orestes-corosync.nevis.columbia.edu + * Resource action: AdminLvm:1 monitor=30000 on hypatia-corosync.nevis.columbia.edu + * Resource action: FSUsrNevis:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu + * Resource action: FSVarNevis:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu + * Resource action: FSVirtualMachines:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu + * Resource action: FSMail:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu + * Resource action: FSWork:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: FilesystemClone_running_0 * Resource action: CronAmbientTemperature start on hypatia-corosync.nevis.columbia.edu * Pseudo action: DhcpGroup_start_0 diff --git a/cts/scheduler/summary/timeout-by-node.summary b/cts/scheduler/summary/timeout-by-node.summary new file mode 100644 index 0000000..78f4fcd --- /dev/null +++ b/cts/scheduler/summary/timeout-by-node.summary @@ -0,0 +1,43 @@ +Current cluster status: + * Node List: + * Online: [ node1 node2 node3 node4 node5 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Clone Set: rsc1-clone [rsc1]: + * Stopped: [ node1 node2 node3 node4 node5 ] + +Transition Summary: + * Start rsc1:0 ( node2 ) + * Start rsc1:1 ( node3 ) + * Start rsc1:2 ( node4 ) + * Start rsc1:3 ( node5 ) + * Start rsc1:4 ( node1 ) + +Executing Cluster Transition: + * Resource action: rsc1:0 monitor on node2 + * Resource action: rsc1:1 monitor on node3 + * Resource action: rsc1:2 monitor on node4 + * Resource action: rsc1:3 monitor on node5 + * Resource action: rsc1:4 monitor on node1 + * Pseudo action: rsc1-clone_start_0 + * Resource action: rsc1:0 start on node2 + * Resource action: rsc1:1 start on node3 + * Resource action: rsc1:2 start on node4 + * Resource action: rsc1:3 start on node5 + * Resource action: rsc1:4 start on node1 + * Pseudo action: rsc1-clone_running_0 + * Resource action: rsc1:0 monitor=10000 on node2 + * Resource action: rsc1:1 monitor=10000 on node3 + * Resource action: rsc1:2 monitor=10000 on node4 + * Resource action: rsc1:3 monitor=10000 on node5 + * Resource action: rsc1:4 monitor=10000 on node1 + +Revised Cluster Status: + * Node List: + * Online: [ node1 node2 node3 node4 node5 ] + + * Full List of Resources: + * Fencing (stonith:fence_xvm): Started node1 + * Clone Set: rsc1-clone [rsc1]: + * Started: [ node1 node2 node3 node4 node5 ] diff --git a/cts/scheduler/summary/unfence-definition.summary b/cts/scheduler/summary/unfence-definition.summary index bb22680..2d94f71 100644 --- a/cts/scheduler/summary/unfence-definition.summary +++ b/cts/scheduler/summary/unfence-definition.summary @@ -32,8 +32,8 @@ Executing Cluster Transition: * Resource action: fencing monitor on virt-3 * Resource action: fencing delete on virt-1 * Resource action: dlm monitor on virt-3 - * Resource action: clvmd stop on virt-1 * Resource action: clvmd monitor on virt-3 + * Resource action: clvmd stop on virt-1 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-1 diff --git a/cts/scheduler/summary/unfence-parameters.summary b/cts/scheduler/summary/unfence-parameters.summary index b872a41..93a65e6 100644 --- a/cts/scheduler/summary/unfence-parameters.summary +++ b/cts/scheduler/summary/unfence-parameters.summary @@ -31,8 +31,8 @@ Executing Cluster Transition: * Fencing virt-3 (on) * Resource action: fencing monitor on virt-3 * Resource action: dlm monitor on virt-3 - * Resource action: clvmd stop on virt-1 * Resource action: clvmd monitor on virt-3 + * Resource action: clvmd stop on virt-1 * Pseudo action: clvmd-clone_stopped_0 * Pseudo action: dlm-clone_stop_0 * Resource action: dlm stop on virt-1 diff --git a/cts/scheduler/xml/anon-instance-pending.xml b/cts/scheduler/xml/anon-instance-pending.xml index 86a6728..297c0bb 100644 --- a/cts/scheduler/xml/anon-instance-pending.xml +++ b/cts/scheduler/xml/anon-instance-pending.xml @@ -16,7 +16,7 @@ - + diff --git a/cts/scheduler/xml/bundle-interleave-start.xml b/cts/scheduler/xml/bundle-interleave-start.xml index e8630cd..facb181 100644 --- a/cts/scheduler/xml/bundle-interleave-start.xml +++ b/cts/scheduler/xml/bundle-interleave-start.xml @@ -6,7 +6,8 @@ and its promoted role is colocated with base's. App's starts and promotes are ordered after base's. - In this test, all are stopped and must be started. + In this test, all are stopped and must be started. One replica of each + bundle must be promoted. --> diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-1.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-1.xml new file mode 100644 index 0000000..71f472e --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-anticolocation-1.xml @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-2.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-2.xml new file mode 100644 index 0000000..32bc5ea --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-anticolocation-2.xml @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-3.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-3.xml new file mode 100644 index 0000000..f954ae1 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-anticolocation-3.xml @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-4.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-4.xml new file mode 100644 index 0000000..8902190 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-anticolocation-4.xml @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-5.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-5.xml new file mode 100644 index 0000000..b960ac5 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-anticolocation-5.xml @@ -0,0 +1,368 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-6.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-6.xml new file mode 100644 index 0000000..6cc80e4 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-anticolocation-6.xml @@ -0,0 +1,368 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-colocation-1.xml b/cts/scheduler/xml/bundle-promoted-colocation-1.xml new file mode 100644 index 0000000..ff2a520 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-colocation-1.xml @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-colocation-2.xml b/cts/scheduler/xml/bundle-promoted-colocation-2.xml new file mode 100644 index 0000000..cbef724 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-colocation-2.xml @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-colocation-3.xml b/cts/scheduler/xml/bundle-promoted-colocation-3.xml new file mode 100644 index 0000000..94d5d1b --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-colocation-3.xml @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-colocation-4.xml b/cts/scheduler/xml/bundle-promoted-colocation-4.xml new file mode 100644 index 0000000..4739472 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-colocation-4.xml @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-colocation-5.xml b/cts/scheduler/xml/bundle-promoted-colocation-5.xml new file mode 100644 index 0000000..76367d7 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-colocation-5.xml @@ -0,0 +1,367 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-colocation-6.xml b/cts/scheduler/xml/bundle-promoted-colocation-6.xml new file mode 100644 index 0000000..a14e7c4 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-colocation-6.xml @@ -0,0 +1,367 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-location-1.xml b/cts/scheduler/xml/bundle-promoted-location-1.xml new file mode 100644 index 0000000..bba9980 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-location-1.xml @@ -0,0 +1,221 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-location-2.xml b/cts/scheduler/xml/bundle-promoted-location-2.xml new file mode 100644 index 0000000..352ea70 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-location-2.xml @@ -0,0 +1,218 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-location-3.xml b/cts/scheduler/xml/bundle-promoted-location-3.xml new file mode 100644 index 0000000..0954fd5 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-location-3.xml @@ -0,0 +1,225 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-location-4.xml b/cts/scheduler/xml/bundle-promoted-location-4.xml new file mode 100644 index 0000000..8cfbac1 --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-location-4.xml @@ -0,0 +1,225 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-location-5.xml b/cts/scheduler/xml/bundle-promoted-location-5.xml new file mode 100644 index 0000000..4cb76fe --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-location-5.xml @@ -0,0 +1,231 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/bundle-promoted-location-6.xml b/cts/scheduler/xml/bundle-promoted-location-6.xml new file mode 100644 index 0000000..cab69de --- /dev/null +++ b/cts/scheduler/xml/bundle-promoted-location-6.xml @@ -0,0 +1,224 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/cancel-behind-moving-remote.xml b/cts/scheduler/xml/cancel-behind-moving-remote.xml index 67e1430..7b88060 100644 --- a/cts/scheduler/xml/cancel-behind-moving-remote.xml +++ b/cts/scheduler/xml/cancel-behind-moving-remote.xml @@ -1,5 +1,19 @@ + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-1.xml b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml new file mode 100644 index 0000000..a634ff3 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml @@ -0,0 +1,113 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-10.xml b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml new file mode 100644 index 0000000..faa202a --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-11.xml b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml new file mode 100644 index 0000000..43d6d74 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml @@ -0,0 +1,153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-12.xml b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml new file mode 100644 index 0000000..e302690 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml @@ -0,0 +1,186 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-2.xml b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml new file mode 100644 index 0000000..486666c --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-3.xml b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml new file mode 100644 index 0000000..ddafb74 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml new file mode 100644 index 0000000..f0a5feb --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml @@ -0,0 +1,115 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml new file mode 100644 index 0000000..95e5eca --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml @@ -0,0 +1,143 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml new file mode 100644 index 0000000..64bb4d9 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml @@ -0,0 +1,182 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml new file mode 100644 index 0000000..e588b81 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-8.xml b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml new file mode 100644 index 0000000..6f882b8 --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml @@ -0,0 +1,153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-9.xml b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml new file mode 100644 index 0000000..104331d --- /dev/null +++ b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml @@ -0,0 +1,186 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/coloc-with-inner-group-member.xml b/cts/scheduler/xml/coloc-with-inner-group-member.xml new file mode 100644 index 0000000..c07edec --- /dev/null +++ b/cts/scheduler/xml/coloc-with-inner-group-member.xml @@ -0,0 +1,258 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/group-anticolocation-2.xml b/cts/scheduler/xml/group-anticolocation-2.xml new file mode 100644 index 0000000..0fb5523 --- /dev/null +++ b/cts/scheduler/xml/group-anticolocation-2.xml @@ -0,0 +1,166 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/group-anticolocation-3.xml b/cts/scheduler/xml/group-anticolocation-3.xml new file mode 100644 index 0000000..2c118fd --- /dev/null +++ b/cts/scheduler/xml/group-anticolocation-3.xml @@ -0,0 +1,165 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/group-anticolocation-4.xml b/cts/scheduler/xml/group-anticolocation-4.xml new file mode 100644 index 0000000..33ecb3f --- /dev/null +++ b/cts/scheduler/xml/group-anticolocation-4.xml @@ -0,0 +1,167 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/group-anticolocation-5.xml b/cts/scheduler/xml/group-anticolocation-5.xml new file mode 100644 index 0000000..b7eb9f3 --- /dev/null +++ b/cts/scheduler/xml/group-anticolocation-5.xml @@ -0,0 +1,188 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/group-anticolocation.xml b/cts/scheduler/xml/group-anticolocation.xml index 1f895ec..1c6c8c9 100644 --- a/cts/scheduler/xml/group-anticolocation.xml +++ b/cts/scheduler/xml/group-anticolocation.xml @@ -1,15 +1,17 @@ diff --git a/cts/scheduler/xml/node-pending-timeout.xml b/cts/scheduler/xml/node-pending-timeout.xml new file mode 100644 index 0000000..b4c3614 --- /dev/null +++ b/cts/scheduler/xml/node-pending-timeout.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/pending-node-no-uname.xml b/cts/scheduler/xml/pending-node-no-uname.xml new file mode 100644 index 0000000..d1b3664 --- /dev/null +++ b/cts/scheduler/xml/pending-node-no-uname.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/promoted-ordering.xml b/cts/scheduler/xml/promoted-ordering.xml index bcf018e..7dd2415 100644 --- a/cts/scheduler/xml/promoted-ordering.xml +++ b/cts/scheduler/xml/promoted-ordering.xml @@ -61,7 +61,7 @@ - + @@ -75,7 +75,7 @@ - + @@ -85,7 +85,7 @@ - + @@ -242,24 +242,24 @@ - + - + - - - - - - - - + + + + + + + + diff --git a/cts/scheduler/xml/promoted-probed-score.xml b/cts/scheduler/xml/promoted-probed-score.xml index cedc909..bc42aa4 100644 --- a/cts/scheduler/xml/promoted-probed-score.xml +++ b/cts/scheduler/xml/promoted-probed-score.xml @@ -623,7 +623,7 @@ - + @@ -691,7 +691,7 @@ - + diff --git a/cts/scheduler/xml/timeout-by-node.xml b/cts/scheduler/xml/timeout-by-node.xml new file mode 100644 index 0000000..221885b --- /dev/null +++ b/cts/scheduler/xml/timeout-by-node.xml @@ -0,0 +1,139 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/support/Makefile.am b/cts/support/Makefile.am index 33cfa6f..d591633 100644 --- a/cts/support/Makefile.am +++ b/cts/support/Makefile.am @@ -19,6 +19,6 @@ dist_cts_DATA = cts.conf if BUILD_UPSTART dist_cts_DATA += pacemaker-cts-dummyd.conf endif -cts_SCRIPTS = fence_dummy \ - LSBDummy \ - pacemaker-cts-dummyd +cts_SCRIPTS = fence_dummy \ + LSBDummy \ + pacemaker-cts-dummyd diff --git a/daemons/Makefile.am b/daemons/Makefile.am index 743320b..30dd17e 100644 --- a/daemons/Makefile.am +++ b/daemons/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2018 the Pacemaker project contributors +# Copyright 2018-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -8,4 +8,10 @@ # MAINTAINERCLEANFILES = Makefile.in -SUBDIRS = based schedulerd attrd controld execd fenced pacemakerd +SUBDIRS = based \ + schedulerd \ + attrd \ + controld \ + execd \ + fenced \ + pacemakerd diff --git a/daemons/attrd/Makefile.am b/daemons/attrd/Makefile.am index 6bb81c4..f8d8bc9 100644 --- a/daemons/attrd/Makefile.am +++ b/daemons/attrd/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2022 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -18,31 +18,32 @@ noinst_HEADERS = pacemaker-attrd.h pacemaker_attrd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_attrd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemaker_attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ - $(top_builddir)/lib/pengine/libpe_rules.la \ - $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/lrmd/liblrmd.la \ - $(CLUSTERLIBS) +pacemaker_attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la +pacemaker_attrd_LDADD += $(top_builddir)/lib/cib/libcib.la +pacemaker_attrd_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la +pacemaker_attrd_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la +pacemaker_attrd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +pacemaker_attrd_LDADD += $(CLUSTERLIBS) pacemaker_attrd_SOURCES = attrd_alerts.c \ - attrd_attributes.c \ - attrd_cib.c \ - attrd_corosync.c \ - attrd_elections.c \ - attrd_ipc.c \ - attrd_messages.c \ - attrd_sync.c \ - attrd_utils.c \ - pacemaker-attrd.c - -clean-generic: - rm -f *.log *.debug *.xml *~ - -if BUILD_LEGACY_LINKS + attrd_attributes.c \ + attrd_cib.c \ + attrd_corosync.c \ + attrd_elections.c \ + attrd_ipc.c \ + attrd_messages.c \ + attrd_sync.c \ + attrd_utils.c \ + pacemaker-attrd.c + +.PHONY: install-exec-hook install-exec-hook: +if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f attrd && $(LN_S) pacemaker-attrd attrd +endif +.PHONY: uninstall-hook uninstall-hook: +if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f attrd endif diff --git a/daemons/attrd/attrd_alerts.c b/daemons/attrd/attrd_alerts.c index b694891..495e18f 100644 --- a/daemons/attrd/attrd_alerts.c +++ b/daemons/attrd/attrd_alerts.c @@ -1,5 +1,5 @@ /* - * Copyright 2015-2021 the Pacemaker project contributors + * Copyright 2015-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include "pacemaker-attrd.h" @@ -92,7 +93,7 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void } crmalerts = output; - if (crmalerts && !pcmk__str_eq(crm_element_name(crmalerts), XML_CIB_TAG_ALERTS, pcmk__str_none)) { + if ((crmalerts != NULL) && !pcmk__xe_is(crmalerts, XML_CIB_TAG_ALERTS)) { crmalerts = first_named_child(crmalerts, XML_CIB_TAG_ALERTS); } if (!crmalerts) { @@ -104,9 +105,6 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void attrd_alert_list = pe_unpack_alerts(crmalerts); } -#define XPATH_ALERTS \ - "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_ALERTS - gboolean attrd_read_options(gpointer user_data) { @@ -114,8 +112,9 @@ attrd_read_options(gpointer user_data) CRM_CHECK(the_cib != NULL, return TRUE); - call_id = the_cib->cmds->query(the_cib, XPATH_ALERTS, NULL, - cib_xpath | cib_scope_local); + call_id = the_cib->cmds->query(the_cib, + pcmk__cib_abs_xpath_for(XML_CIB_TAG_ALERTS), + NULL, cib_xpath|cib_scope_local); the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, NULL, "config_query_callback", @@ -125,14 +124,6 @@ attrd_read_options(gpointer user_data) return TRUE; } -void -attrd_cib_updated_cb(const char *event, xmlNode * msg) -{ - if (!attrd_shutting_down() && pcmk__alert_in_patchset(msg, false)) { - mainloop_set_trigger(attrd_config_read); - } -} - int attrd_send_attribute_alert(const char *node, int nodeid, const char *attr, const char *value) diff --git a/daemons/attrd/attrd_attributes.c b/daemons/attrd/attrd_attributes.c index 516ced7..388c181 100644 --- a/daemons/attrd/attrd_attributes.c +++ b/daemons/attrd/attrd_attributes.c @@ -25,25 +25,45 @@ static attribute_t * attrd_create_attribute(xmlNode *xml) { + int is_private = 0; int dampen = 0; - const char *value = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING); - attribute_t *a = calloc(1, sizeof(attribute_t)); + const char *name = crm_element_value(xml, PCMK__XA_ATTR_NAME); + const char *set_type = crm_element_value(xml, PCMK__XA_ATTR_SET_TYPE); + const char *dampen_s = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING); + attribute_t *a = NULL; + + if (set_type == NULL) { + set_type = XML_TAG_ATTR_SETS; + } + + /* Set type is meaningful only when writing to the CIB. Private + * attributes are not written. + */ + crm_element_value_int(xml, PCMK__XA_ATTR_IS_PRIVATE, &is_private); + if ((is_private != 0) + && !pcmk__str_any_of(set_type, XML_TAG_ATTR_SETS, XML_TAG_UTILIZATION, + NULL)) { + crm_warn("Ignoring attribute %s with invalid set type %s", + pcmk__s(name, "(unidentified)"), set_type); + return NULL; + } + a = calloc(1, sizeof(attribute_t)); CRM_ASSERT(a != NULL); - a->id = crm_element_value_copy(xml, PCMK__XA_ATTR_NAME); - a->set_id = crm_element_value_copy(xml, PCMK__XA_ATTR_SET); - a->set_type = crm_element_value_copy(xml, PCMK__XA_ATTR_SET_TYPE); - a->uuid = crm_element_value_copy(xml, PCMK__XA_ATTR_UUID); - a->values = pcmk__strikey_table(NULL, attrd_free_attribute_value); + a->is_private = is_private; + pcmk__str_update(&a->id, name); + pcmk__str_update(&a->set_type, set_type); - crm_element_value_int(xml, PCMK__XA_ATTR_IS_PRIVATE, &a->is_private); + a->set_id = crm_element_value_copy(xml, PCMK__XA_ATTR_SET); + a->uuid = crm_element_value_copy(xml, PCMK__XA_ATTR_UUID); + a->values = pcmk__strikey_table(NULL, attrd_free_attribute_value); a->user = crm_element_value_copy(xml, PCMK__XA_ATTR_USER); crm_trace("Performing all %s operations as user '%s'", a->id, a->user); - if (value != NULL) { - dampen = crm_get_msec(value); + if (dampen_s != NULL) { + dampen = crm_get_msec(dampen_s); } crm_trace("Created attribute %s with %s write delay", a->id, (a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms)); @@ -52,7 +72,7 @@ attrd_create_attribute(xmlNode *xml) a->timeout_ms = dampen; a->timer = attrd_add_timer(a->id, a->timeout_ms, a); } else if (dampen < 0) { - crm_warn("Ignoring invalid delay %s for attribute %s", value, a->id); + crm_warn("Ignoring invalid delay %s for attribute %s", dampen_s, a->id); } g_hash_table_replace(attributes, a->id, a); @@ -169,6 +189,10 @@ attrd_populate_attribute(xmlNode *xml, const char *attr) if (a == NULL) { if (update_both || pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE, pcmk__str_none)) { a = attrd_create_attribute(xml); + if (a == NULL) { + return NULL; + } + } else { crm_warn("Could not update %s: attribute not found", attr); return NULL; diff --git a/daemons/attrd/attrd_cib.c b/daemons/attrd/attrd_cib.c index 928c013..80e5580 100644 --- a/daemons/attrd/attrd_cib.c +++ b/daemons/attrd/attrd_cib.c @@ -10,6 +10,7 @@ #include #include +#include // PRIu32 #include #include #include @@ -24,6 +25,188 @@ static int last_cib_op_done = 0; +static void write_attribute(attribute_t *a, bool ignore_delay); + +static void +attrd_cib_destroy_cb(gpointer user_data) +{ + cib_t *cib = user_data; + + cib->cmds->signoff(cib); + + if (attrd_shutting_down(false)) { + crm_info("Disconnected from the CIB manager"); + + } else { + // @TODO This should trigger a reconnect, not a shutdown + crm_crit("Lost connection to the CIB manager, shutting down"); + attrd_exit_status = CRM_EX_DISCONNECT; + attrd_shutdown(0); + } +} + +static void +attrd_cib_updated_cb(const char *event, xmlNode *msg) +{ + const xmlNode *patchset = NULL; + const char *client_name = NULL; + + if (attrd_shutting_down(true)) { + return; + } + + if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) { + return; + } + + if (cib__element_in_patchset(patchset, XML_CIB_TAG_ALERTS)) { + mainloop_set_trigger(attrd_config_read); + } + + if (!attrd_election_won()) { + // Don't write attributes if we're not the writer + return; + } + + client_name = crm_element_value(msg, F_CIB_CLIENTNAME); + if (!cib__client_triggers_refresh(client_name)) { + // The CIB is still accurate + return; + } + + if (cib__element_in_patchset(patchset, XML_CIB_TAG_NODES) + || cib__element_in_patchset(patchset, XML_CIB_TAG_STATUS)) { + + /* An unsafe client modified the nodes or status section. Write + * transient attributes to ensure they're up-to-date in the CIB. + */ + if (client_name == NULL) { + client_name = crm_element_value(msg, F_CIB_CLIENTID); + } + crm_notice("Updating all attributes after %s event triggered by %s", + event, pcmk__s(client_name, "(unidentified client)")); + + attrd_write_attributes(attrd_write_all); + } +} + +int +attrd_cib_connect(int max_retry) +{ + static int attempts = 0; + + int rc = -ENOTCONN; + + the_cib = cib_new(); + if (the_cib == NULL) { + return -ENOTCONN; + } + + do { + if (attempts > 0) { + sleep(attempts); + } + attempts++; + crm_debug("Connection attempt %d to the CIB manager", attempts); + rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command); + + } while ((rc != pcmk_ok) && (attempts < max_retry)); + + if (rc != pcmk_ok) { + crm_err("Connection to the CIB manager failed: %s " CRM_XS " rc=%d", + pcmk_strerror(rc), rc); + goto cleanup; + } + + crm_debug("Connected to the CIB manager after %d attempts", attempts); + + rc = the_cib->cmds->set_connection_dnotify(the_cib, attrd_cib_destroy_cb); + if (rc != pcmk_ok) { + crm_err("Could not set disconnection callback"); + goto cleanup; + } + + rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, + attrd_cib_updated_cb); + if (rc != pcmk_ok) { + crm_err("Could not set CIB notification callback"); + goto cleanup; + } + + return pcmk_ok; + +cleanup: + cib__clean_up_connection(&the_cib); + return -ENOTCONN; +} + +void +attrd_cib_disconnect(void) +{ + CRM_CHECK(the_cib != NULL, return); + the_cib->cmds->del_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, + attrd_cib_updated_cb); + cib__clean_up_connection(&the_cib); +} + +static void +attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output, + void *user_data) +{ + do_crm_log_unlikely(((rc != pcmk_ok)? LOG_NOTICE : LOG_DEBUG), + "Cleared transient attributes: %s " + CRM_XS " xpath=%s rc=%d", + pcmk_strerror(rc), (char *) user_data, rc); +} + +#define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS + +/*! + * \internal + * \brief Wipe all transient attributes for this node from the CIB + * + * Clear any previous transient node attributes from the CIB. This is + * normally done by the DC's controller when this node leaves the cluster, but + * this handles the case where the node restarted so quickly that the + * cluster layer didn't notice. + * + * \todo If pacemaker-attrd respawns after crashing (see PCMK_ENV_RESPAWNED), + * ideally we'd skip this and sync our attributes from the writer. + * However, currently we reject any values for us that the writer has, in + * attrd_peer_update(). + */ +static void +attrd_erase_attrs(void) +{ + int call_id = 0; + char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname); + + crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s", + xpath); + + call_id = the_cib->cmds->remove(the_cib, xpath, NULL, cib_xpath); + the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath, + "attrd_erase_cb", attrd_erase_cb, + free); +} + +/*! + * \internal + * \brief Prepare the CIB after cluster is connected + */ +void +attrd_cib_init(void) +{ + // We have no attribute values in memory, wipe the CIB to match + attrd_erase_attrs(); + + // Set a trigger for reading the CIB (for the alerts section) + attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL); + + // Always read the CIB at start-up + mainloop_set_trigger(attrd_config_read); +} + static gboolean attribute_timer_cb(gpointer data) { @@ -92,7 +275,7 @@ attrd_cib_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *use /* We deferred a write of a new update because this update was in * progress. Write out the new value without additional delay. */ - attrd_write_attribute(a, false); + write_attribute(a, false); /* We're re-attempting a write because the original failed; delay * the next attempt so we don't potentially flood the CIB manager @@ -121,48 +304,134 @@ attrd_cib_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *use } } -static void -build_update_element(xmlNode *parent, attribute_t *a, const char *nodeid, const char *value) +/*! + * \internal + * \brief Add a set-attribute update request to the current CIB transaction + * + * \param[in] attr Attribute to update + * \param[in] attr_id ID of attribute to update + * \param[in] node_id ID of node for which to update attribute value + * \param[in] set_id ID of attribute set + * \param[in] value New value for attribute + * + * \return Standard Pacemaker return code + */ +static int +add_set_attr_update(const attribute_t *attr, const char *attr_id, + const char *node_id, const char *set_id, const char *value) { - const char *set = NULL; - xmlNode *xml_obj = NULL; + xmlNode *update = create_xml_node(NULL, XML_CIB_TAG_STATE); + xmlNode *child = update; + int rc = ENOMEM; - xml_obj = create_xml_node(parent, XML_CIB_TAG_STATE); - crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); + if (child == NULL) { + goto done; + } + crm_xml_add(child, XML_ATTR_ID, node_id); - xml_obj = create_xml_node(xml_obj, XML_TAG_TRANSIENT_NODEATTRS); - crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); + child = create_xml_node(child, XML_TAG_TRANSIENT_NODEATTRS); + if (child == NULL) { + goto done; + } + crm_xml_add(child, XML_ATTR_ID, node_id); - if (pcmk__str_eq(a->set_type, XML_TAG_ATTR_SETS, pcmk__str_null_matches)) { - xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS); - } else if (pcmk__str_eq(a->set_type, XML_TAG_UTILIZATION, pcmk__str_none)) { - xml_obj = create_xml_node(xml_obj, XML_TAG_UTILIZATION); - } else { - crm_err("Unknown set type attribute: %s", a->set_type); + child = create_xml_node(child, attr->set_type); + if (child == NULL) { + goto done; } + crm_xml_add(child, XML_ATTR_ID, set_id); - if (a->set_id) { - crm_xml_set_id(xml_obj, "%s", a->set_id); - } else { - crm_xml_set_id(xml_obj, "%s-%s", XML_CIB_TAG_STATUS, nodeid); + child = create_xml_node(child, XML_CIB_TAG_NVPAIR); + if (child == NULL) { + goto done; } - set = ID(xml_obj); + crm_xml_add(child, XML_ATTR_ID, attr_id); + crm_xml_add(child, XML_NVPAIR_ATTR_NAME, attr->id); + crm_xml_add(child, XML_NVPAIR_ATTR_VALUE, value); + + rc = the_cib->cmds->modify(the_cib, XML_CIB_TAG_STATUS, update, + cib_can_create|cib_transaction); + rc = pcmk_legacy2rc(rc); + +done: + free_xml(update); + return rc; +} + +/*! + * \internal + * \brief Add an unset-attribute update request to the current CIB transaction + * + * \param[in] attr Attribute to update + * \param[in] attr_id ID of attribute to update + * \param[in] node_id ID of node for which to update attribute value + * \param[in] set_id ID of attribute set + * + * \return Standard Pacemaker return code + */ +static int +add_unset_attr_update(const attribute_t *attr, const char *attr_id, + const char *node_id, const char *set_id) +{ + char *xpath = crm_strdup_printf("/" XML_TAG_CIB + "/" XML_CIB_TAG_STATUS + "/" XML_CIB_TAG_STATE + "[@" XML_ATTR_ID "='%s']" + "/" XML_TAG_TRANSIENT_NODEATTRS + "[@" XML_ATTR_ID "='%s']" + "/%s[@" XML_ATTR_ID "='%s']" + "/" XML_CIB_TAG_NVPAIR + "[@" XML_ATTR_ID "='%s' " + "and @" XML_NVPAIR_ATTR_NAME "='%s']", + node_id, node_id, attr->set_type, set_id, + attr_id, attr->id); + + int rc = the_cib->cmds->remove(the_cib, xpath, NULL, + cib_xpath|cib_transaction); + + free(xpath); + return pcmk_legacy2rc(rc); +} + +/*! + * \internal + * \brief Add an attribute update request to the current CIB transaction + * + * \param[in] attr Attribute to update + * \param[in] value New value for attribute + * \param[in] node_id ID of node for which to update attribute value + * + * \return Standard Pacemaker return code + */ +static int +add_attr_update(const attribute_t *attr, const char *value, const char *node_id) +{ + char *set_id = NULL; + char *attr_id = NULL; + int rc = pcmk_rc_ok; - xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); - if (a->uuid) { - crm_xml_set_id(xml_obj, "%s", a->uuid); + if (attr->set_id != NULL) { + pcmk__str_update(&set_id, attr->set_id); } else { - crm_xml_set_id(xml_obj, "%s-%s", set, a->id); + set_id = crm_strdup_printf("%s-%s", XML_CIB_TAG_STATUS, node_id); } - crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, a->id); + crm_xml_sanitize_id(set_id); - if(value) { - crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, value); + if (attr->uuid != NULL) { + pcmk__str_update(&attr_id, attr->uuid); + } else { + attr_id = crm_strdup_printf("%s-%s", set_id, attr->id); + } + crm_xml_sanitize_id(attr_id); + if (value != NULL) { + rc = add_set_attr_update(attr, attr_id, node_id, set_id, value); } else { - crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, ""); - crm_xml_add(xml_obj, "__delete__", XML_NVPAIR_ATTR_VALUE); + rc = add_unset_attr_update(attr, attr_id, node_id, set_id); } + free(set_id); + free(attr_id); + return rc; } static void @@ -202,15 +471,22 @@ attrd_add_timer(const char *id, int timeout_ms, attribute_t *attr) return mainloop_timer_add(id, timeout_ms, FALSE, attribute_timer_cb, attr); } -void -attrd_write_attribute(attribute_t *a, bool ignore_delay) +/*! + * \internal + * \brief Write an attribute's values to the CIB if appropriate + * + * \param[in,out] a Attribute to write + * \param[in] ignore_delay If true, write attribute now regardless of any + * configured delay + */ +static void +write_attribute(attribute_t *a, bool ignore_delay) { int private_updates = 0, cib_updates = 0; - xmlNode *xml_top = NULL; attribute_value_t *v = NULL; GHashTableIter iter; - enum cib_call_options flags = cib_none; GHashTable *alert_attribute_value = NULL; + int rc = pcmk_ok; if (a == NULL) { return; @@ -218,32 +494,37 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay) /* If this attribute will be written to the CIB ... */ if (!stand_alone && !a->is_private) { - /* Defer the write if now's not a good time */ - CRM_CHECK(the_cib != NULL, return); if (a->update && (a->update < last_cib_op_done)) { - crm_info("Write out of '%s' continuing: update %d considered lost", a->id, a->update); + crm_info("Write out of '%s' continuing: update %d considered lost", + a->id, a->update); a->update = 0; // Don't log this message again } else if (a->update) { - crm_info("Write out of '%s' delayed: update %d in progress", a->id, a->update); - return; + crm_info("Write out of '%s' delayed: update %d in progress", + a->id, a->update); + goto done; } else if (mainloop_timer_running(a->timer)) { if (ignore_delay) { - /* 'refresh' forces a write of the current value of all attributes - * Cancel any existing timers, we're writing it NOW - */ mainloop_timer_stop(a->timer); - crm_debug("Write out of '%s': timer is running but ignore delay", a->id); + crm_debug("Overriding '%s' write delay", a->id); } else { - crm_info("Write out of '%s' delayed: timer is running", a->id); - return; + crm_info("Delaying write of '%s'", a->id); + goto done; } } - /* Initialize the status update XML */ - xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS); + // Initiate a transaction for all the peer value updates + CRM_CHECK(the_cib != NULL, goto done); + the_cib->cmds->set_user(the_cib, a->user); + rc = the_cib->cmds->init_transaction(the_cib); + if (rc != pcmk_ok) { + crm_err("Failed to write %s (id %s, set %s): Could not initiate " + "CIB transaction", + a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); + goto done; + } } /* Attribute will be written shortly, so clear changed flag */ @@ -256,12 +537,14 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay) a->force_write = FALSE; /* Make the table for the attribute trap */ - alert_attribute_value = pcmk__strikey_table(NULL, attrd_free_attribute_value); + alert_attribute_value = pcmk__strikey_table(NULL, + attrd_free_attribute_value); /* Iterate over each peer value of this attribute */ g_hash_table_iter_init(&iter, a->values); - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & v)) { - crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, CRM_GET_PEER_ANY); + while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) { + crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, + CRM_GET_PEER_ANY); /* If the value's peer info does not correspond to a peer, ignore it */ if (peer == NULL) { @@ -291,11 +574,20 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay) continue; } - /* Add this value to status update XML */ - crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID %u/%u)", + // Update this value as part of the CIB transaction we're building + rc = add_attr_update(a, v->current, peer->uuid); + if (rc != pcmk_rc_ok) { + crm_err("Failed to update %s[%s]=%s (peer known as %s, UUID %s, " + "ID %" PRIu32 "/%" PRIu32 "): %s", + a->id, v->nodename, v->current, peer->uname, peer->uuid, + peer->id, v->nodeid, pcmk_rc_str(rc)); + continue; + } + + crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID " + "%" PRIu32 "/%" PRIu32 ")", a->id, v->nodename, v->current, peer->uname, peer->uuid, peer->id, v->nodeid); - build_update_element(xml_top, a, peer->uuid, v->current); cib_updates++; /* Preservation of the attribute to transmit alert */ @@ -305,12 +597,6 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay) v->requested = NULL; if (v->current) { v->requested = strdup(v->current); - } else { - /* Older attrd versions don't know about the cib_mixed_update - * flag so make sure it goes to the local cib which does - */ - cib__set_call_options(flags, crm_system_name, - cib_mixed_update|cib_scope_local); } } @@ -319,40 +605,55 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay) private_updates, pcmk__plural_s(private_updates), a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); } - if (cib_updates) { - crm_log_xml_trace(xml_top, __func__); + if (cib_updates > 0) { + char *id = NULL; - a->update = cib_internal_op(the_cib, PCMK__CIB_REQUEST_MODIFY, NULL, - XML_CIB_TAG_STATUS, xml_top, NULL, flags, - a->user); + // Commit transaction + a->update = the_cib->cmds->end_transaction(the_cib, true, cib_none); crm_info("Sent CIB request %d with %d change%s for %s (id %s, set %s)", a->update, cib_updates, pcmk__plural_s(cib_updates), a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); - the_cib->cmds->register_callback_full(the_cib, a->update, - CIB_OP_TIMEOUT_S, FALSE, - strdup(a->id), - "attrd_cib_callback", - attrd_cib_callback, free); - /* Transmit alert of the attribute */ - send_alert_attributes_value(a, alert_attribute_value); + pcmk__str_update(&id, a->id); + if (the_cib->cmds->register_callback_full(the_cib, a->update, + CIB_OP_TIMEOUT_S, FALSE, id, + "attrd_cib_callback", + attrd_cib_callback, free)) { + // Transmit alert of the attribute + send_alert_attributes_value(a, alert_attribute_value); + } } - g_hash_table_destroy(alert_attribute_value); - free_xml(xml_top); +done: + // Discard transaction (if any) + if (the_cib != NULL) { + the_cib->cmds->end_transaction(the_cib, false, cib_none); + the_cib->cmds->set_user(the_cib, NULL); + } + + if (alert_attribute_value != NULL) { + g_hash_table_destroy(alert_attribute_value); + } } +/*! + * \internal + * \brief Write out attributes + * + * \param[in] options Group of enum attrd_write_options + */ void -attrd_write_attributes(bool all, bool ignore_delay) +attrd_write_attributes(uint32_t options) { GHashTableIter iter; attribute_t *a = NULL; - crm_debug("Writing out %s attributes", all? "all" : "changed"); + crm_debug("Writing out %s attributes", + pcmk_is_set(options, attrd_write_all)? "all" : "changed"); g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { - if (!all && a->unknown_peer_uuids) { + if (!pcmk_is_set(options, attrd_write_all) && a->unknown_peer_uuids) { // Try writing this attribute again, in case peer ID was learned a->changed = true; } else if (a->force_write) { @@ -360,9 +661,14 @@ attrd_write_attributes(bool all, bool ignore_delay) a->changed = true; } - if(all || a->changed) { - /* When forced write flag is set, ignore delay. */ - attrd_write_attribute(a, (a->force_write ? true : ignore_delay)); + if (pcmk_is_set(options, attrd_write_all) || a->changed) { + bool ignore_delay = pcmk_is_set(options, attrd_write_no_delay); + + if (a->force_write) { + // Always ignore delay when forced write flag is set + ignore_delay = true; + } + write_attribute(a, ignore_delay); } else { crm_trace("Skipping unchanged attribute %s", a->id); } @@ -373,7 +679,7 @@ void attrd_write_or_elect_attribute(attribute_t *a) { if (attrd_election_won()) { - attrd_write_attribute(a, false); + write_attribute(a, false); } else { attrd_start_election_if_needed(); } diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c index ef205e6..86dc67b 100644 --- a/daemons/attrd/attrd_corosync.c +++ b/daemons/attrd/attrd_corosync.c @@ -23,8 +23,6 @@ #include "pacemaker-attrd.h" -extern crm_exit_t attrd_exit_status; - static xmlNode * attrd_confirmation(int callid) { @@ -48,7 +46,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml) return; } - if (attrd_shutting_down()) { + if (attrd_shutting_down(false)) { /* If we're shutting down, we want to continue responding to election * ops as long as we're a cluster member (because our vote may be * needed). Ignore all other messages. @@ -133,11 +131,11 @@ attrd_cpg_dispatch(cpg_handle_t handle, static void attrd_cpg_destroy(gpointer unused) { - if (attrd_shutting_down()) { - crm_info("Corosync disconnection complete"); + if (attrd_shutting_down(false)) { + crm_info("Disconnected from Corosync process group"); } else { - crm_crit("Lost connection to cluster layer, shutting down"); + crm_crit("Lost connection to Corosync process group, shutting down"); attrd_exit_status = CRM_EX_DISCONNECT; attrd_shutdown(0); } @@ -180,7 +178,7 @@ cache_remote_node(const char *node_name) /* If we previously assumed this node was an unseen cluster node, * remove its entry from the cluster peer cache. */ - crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name); + crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name, NULL); if (dup && (dup->uuid == NULL)) { reap_crm_member(0, node_name); @@ -285,7 +283,7 @@ record_peer_nodeid(attribute_value_t *v, const char *host) crm_trace("Learned %s has node id %s", known_peer->uname, known_peer->uuid); if (attrd_election_won()) { - attrd_write_attributes(false, false); + attrd_write_attributes(attrd_write_changed); } } @@ -476,9 +474,7 @@ attrd_peer_clear_failure(pcmk__request_t *request) crm_xml_add(xml, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Make sure value is not set, so we delete */ - if (crm_element_value(xml, PCMK__XA_ATTR_VALUE)) { - crm_xml_replace(xml, PCMK__XA_ATTR_VALUE, NULL); - } + xml_remove_prop(xml, PCMK__XA_ATTR_VALUE); g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) { @@ -591,7 +587,8 @@ attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host, { bool handle_sync_point = false; - if (xml_has_children(xml)) { + CRM_CHECK((peer != NULL) && (xml != NULL), return); + if (xml->children != NULL) { for (xmlNode *child = first_named_child(xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { attrd_copy_xml_attributes(xml, child); diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c index 3b6b55a..82fbe8a 100644 --- a/daemons/attrd/attrd_elections.c +++ b/daemons/attrd/attrd_elections.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2022 the Pacemaker project contributors + * Copyright 2013-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -25,9 +25,11 @@ attrd_election_cb(gpointer user_data) /* Update the peers after an election */ attrd_peer_sync(NULL, NULL); - /* Update the CIB after an election */ - attrd_write_attributes(true, false); - return FALSE; + /* After winning an election, update the CIB with the values of all + * attributes as the winner knows them. + */ + attrd_write_attributes(attrd_write_all); + return G_SOURCE_REMOVE; } void @@ -48,7 +50,7 @@ attrd_start_election_if_needed(void) { if ((peer_writer == NULL) && (election_state(writer) != election_in_progress) - && !attrd_shutting_down()) { + && !attrd_shutting_down(false)) { crm_info("Starting an election to determine the writer"); election_vote(writer); @@ -70,7 +72,7 @@ attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml) crm_xml_add(xml, F_CRM_HOST_FROM, peer->uname); // Don't become writer if we're shutting down - rc = election_count_vote(writer, xml, !attrd_shutting_down()); + rc = election_count_vote(writer, xml, !attrd_shutting_down(false)); switch(rc) { case election_start: diff --git a/daemons/attrd/attrd_ipc.c b/daemons/attrd/attrd_ipc.c index 9d3dfff..05c4a69 100644 --- a/daemons/attrd/attrd_ipc.c +++ b/daemons/attrd/attrd_ipc.c @@ -140,12 +140,8 @@ attrd_client_clear_failure(pcmk__request_t *request) } /* Make sure attribute and value are not set, so we delete via regex */ - if (crm_element_value(xml, PCMK__XA_ATTR_NAME)) { - crm_xml_replace(xml, PCMK__XA_ATTR_NAME, NULL); - } - if (crm_element_value(xml, PCMK__XA_ATTR_VALUE)) { - crm_xml_replace(xml, PCMK__XA_ATTR_VALUE, NULL); - } + xml_remove_prop(xml, PCMK__XA_ATTR_NAME); + xml_remove_prop(xml, PCMK__XA_ATTR_VALUE); return attrd_client_update(request); } @@ -166,7 +162,8 @@ attrd_client_peer_remove(pcmk__request_t *request) crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, &nodeid); if (nodeid > 0) { - crm_node_t *node = pcmk__search_cluster_node_cache(nodeid, NULL); + crm_node_t *node = pcmk__search_cluster_node_cache(nodeid, NULL, + NULL); char *host_alloc = NULL; if (node && node->uname) { @@ -235,7 +232,7 @@ attrd_client_refresh(pcmk__request_t *request) crm_info("Updating all attributes"); attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); - attrd_write_attributes(true, true); + attrd_write_attributes(attrd_write_all|attrd_write_no_delay); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; @@ -282,7 +279,7 @@ expand_regexes(xmlNode *xml, const char *attr, const char *value, const char *re * regex and replace it with the name. */ attrd_copy_xml_attributes(xml, child); - crm_xml_replace(child, PCMK__XA_ATTR_PATTERN, NULL); + xml_remove_prop(child, PCMK__XA_ATTR_PATTERN); crm_xml_add(child, PCMK__XA_ATTR_NAME, attr); } } @@ -401,14 +398,18 @@ send_child_update(xmlNode *child, void *data) xmlNode * attrd_client_update(pcmk__request_t *request) { - xmlNode *xml = request->xml; + xmlNode *xml = NULL; const char *attr, *value, *regex; + CRM_CHECK((request != NULL) && (request->xml != NULL), return NULL); + + xml = request->xml; + /* If the message has children, that means it is a message from a newer * client that supports sending multiple operations at a time. There are * two ways we can handle that. */ - if (xml_has_children(xml)) { + if (xml->children != NULL) { if (ATTRD_SUPPORTS_MULTI_MESSAGE(minimum_protocol_version)) { /* First, if all peers support a certain protocol version, we can * just broadcast the big message and they'll handle it. However, @@ -494,7 +495,7 @@ static int32_t attrd_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("New client connection %p", c); - if (attrd_shutting_down()) { + if (attrd_shutting_down(false)) { crm_info("Ignoring new connection from pid %d during shutdown", pcmk__client_pid(c)); return -EPERM; diff --git a/daemons/attrd/attrd_messages.c b/daemons/attrd/attrd_messages.c index 184176a..89da6d8 100644 --- a/daemons/attrd/attrd_messages.c +++ b/daemons/attrd/attrd_messages.c @@ -20,6 +20,36 @@ int minimum_protocol_version = -1; static GHashTable *attrd_handlers = NULL; +static bool +is_sync_point_attr(xmlAttrPtr attr, void *data) +{ + return pcmk__str_eq((const char *) attr->name, PCMK__XA_ATTR_SYNC_POINT, pcmk__str_none); +} + +static int +remove_sync_point_attribute(xmlNode *xml, void *data) +{ + pcmk__xe_remove_matching_attrs(xml, is_sync_point_attr, NULL); + pcmk__xe_foreach_child(xml, XML_ATTR_OP, remove_sync_point_attribute, NULL); + return pcmk_rc_ok; +} + +/* Sync points on a multi-update IPC message to an attrd too old to support + * multi-update messages won't work. Strip the sync point attribute off here + * so we don't pretend to support this situation and instead ACK the client + * immediately. + */ +static void +remove_unsupported_sync_points(pcmk__request_t *request) +{ + if (request->xml->children != NULL && !ATTRD_SUPPORTS_MULTI_MESSAGE(minimum_protocol_version) && + attrd_request_has_sync_point(request->xml)) { + crm_warn("Ignoring sync point in request from %s because not all nodes support it", + pcmk__request_origin(request)); + remove_sync_point_attribute(request->xml, NULL); + } +} + static xmlNode * handle_unknown_request(pcmk__request_t *request) { @@ -42,6 +72,8 @@ handle_clear_failure_request(pcmk__request_t *request) pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { + remove_unsupported_sync_points(request); + if (attrd_request_has_sync_point(request->xml)) { /* If this client supplied a sync point it wants to wait for, add it to * the wait list. Clients on this list will not receive an ACK until @@ -180,6 +212,8 @@ handle_update_request(pcmk__request_t *request) return NULL; } else { + remove_unsupported_sync_points(request); + if (attrd_request_has_sync_point(request->xml)) { /* If this client supplied a sync point it wants to wait for, add it to * the wait list. Clients on this list will not receive an ACK until diff --git a/daemons/attrd/attrd_sync.c b/daemons/attrd/attrd_sync.c index d59ddd5..1a6c24c 100644 --- a/daemons/attrd/attrd_sync.c +++ b/daemons/attrd/attrd_sync.c @@ -313,7 +313,9 @@ attrd_cluster_sync_point_update(xmlNode *xml) const char * attrd_request_sync_point(xmlNode *xml) { - if (xml_has_children(xml)) { + CRM_CHECK(xml != NULL, return NULL); + + if (xml->children != NULL) { xmlNode *child = pcmk__xe_match(xml, XML_ATTR_OP, PCMK__XA_ATTR_SYNC_POINT, NULL); if (child) { diff --git a/daemons/attrd/attrd_utils.c b/daemons/attrd/attrd_utils.c index 7de8dd9..341ee1a 100644 --- a/daemons/attrd/attrd_utils.c +++ b/daemons/attrd/attrd_utils.c @@ -56,26 +56,22 @@ attrd_clear_requesting_shutdown(void) /*! * \internal - * \brief Check whether we're currently requesting shutdown + * \brief Check whether local attribute manager is shutting down * - * \return true if requesting shutdown, false otherwise - */ -bool -attrd_requesting_shutdown(void) -{ - return requesting_shutdown; -} - -/*! - * \internal - * \brief Check whether we're currently shutting down + * \param[in] if_requested Also consider presence of "shutdown" attribute * - * \return true if shutting down, false otherwise + * \return \c true if local attribute manager has begun shutdown sequence + * or (if \p if_requested is \c true) whether local node has a nonzero + * "shutdown" attribute set, otherwise \c false + * \note Most callers should pass \c false for \p if_requested, because the + * attribute manager needs to continue performing while the controller is + * shutting down, and even needs to be eligible for election in case all + * nodes are shutting down. */ bool -attrd_shutting_down(void) +attrd_shutting_down(bool if_requested) { - return shutting_down; + return shutting_down || (if_requested && requesting_shutdown); } /*! @@ -137,39 +133,6 @@ attrd_run_mainloop(void) g_main_loop_run(mloop); } -void -attrd_cib_disconnect(void) -{ - CRM_CHECK(the_cib != NULL, return); - the_cib->cmds->del_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb); - the_cib->cmds->del_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb); - cib__clean_up_connection(&the_cib); -} - -void -attrd_cib_replaced_cb(const char *event, xmlNode * msg) -{ - int change_section = cib_change_section_nodes | cib_change_section_status | cib_change_section_alerts; - - if (attrd_requesting_shutdown() || attrd_shutting_down()) { - return; - } - - crm_element_value_int(msg, F_CIB_CHANGE_SECTION, &change_section); - - if (attrd_election_won()) { - if (change_section & (cib_change_section_nodes | cib_change_section_status)) { - crm_notice("Updating all attributes after %s event", event); - attrd_write_attributes(true, false); - } - } - - if (change_section & cib_change_section_alerts) { - // Check for changes in alerts - mainloop_set_trigger(attrd_config_read); - } -} - /* strlen("value") */ #define plus_plus_len (5) diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c index 037825b..8091c5b 100644 --- a/daemons/attrd/pacemaker-attrd.c +++ b/daemons/attrd/pacemaker-attrd.c @@ -63,140 +63,6 @@ crm_cluster_t *attrd_cluster = NULL; crm_trigger_t *attrd_config_read = NULL; crm_exit_t attrd_exit_status = CRM_EX_OK; -static void -attrd_cib_destroy_cb(gpointer user_data) -{ - cib_t *conn = user_data; - - conn->cmds->signoff(conn); /* Ensure IPC is cleaned up */ - - if (attrd_shutting_down()) { - crm_info("Connection disconnection complete"); - - } else { - /* eventually this should trigger a reconnect, not a shutdown */ - crm_crit("Lost connection to the CIB manager, shutting down"); - attrd_exit_status = CRM_EX_DISCONNECT; - attrd_shutdown(0); - } - - return; -} - -static void -attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output, - void *user_data) -{ - do_crm_log_unlikely((rc? LOG_NOTICE : LOG_DEBUG), - "Cleared transient attributes: %s " - CRM_XS " xpath=%s rc=%d", - pcmk_strerror(rc), (char *) user_data, rc); -} - -#define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS - -/*! - * \internal - * \brief Wipe all transient attributes for this node from the CIB - * - * Clear any previous transient node attributes from the CIB. This is - * normally done by the DC's controller when this node leaves the cluster, but - * this handles the case where the node restarted so quickly that the - * cluster layer didn't notice. - * - * \todo If pacemaker-attrd respawns after crashing (see PCMK_respawned), - * ideally we'd skip this and sync our attributes from the writer. - * However, currently we reject any values for us that the writer has, in - * attrd_peer_update(). - */ -static void -attrd_erase_attrs(void) -{ - int call_id; - char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname); - - crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s", - xpath); - - call_id = the_cib->cmds->remove(the_cib, xpath, NULL, cib_xpath); - the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath, - "attrd_erase_cb", attrd_erase_cb, - free); -} - -static int -attrd_cib_connect(int max_retry) -{ - static int attempts = 0; - - int rc = -ENOTCONN; - - the_cib = cib_new(); - if (the_cib == NULL) { - return -ENOTCONN; - } - - do { - if(attempts > 0) { - sleep(attempts); - } - - attempts++; - crm_debug("Connection attempt %d to the CIB manager", attempts); - rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command); - - } while(rc != pcmk_ok && attempts < max_retry); - - if (rc != pcmk_ok) { - crm_err("Connection to the CIB manager failed: %s " CRM_XS " rc=%d", - pcmk_strerror(rc), rc); - goto cleanup; - } - - crm_debug("Connected to the CIB manager after %d attempts", attempts); - - rc = the_cib->cmds->set_connection_dnotify(the_cib, attrd_cib_destroy_cb); - if (rc != pcmk_ok) { - crm_err("Could not set disconnection callback"); - goto cleanup; - } - - rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb); - if(rc != pcmk_ok) { - crm_err("Could not set CIB notification callback"); - goto cleanup; - } - - rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb); - if (rc != pcmk_ok) { - crm_err("Could not set CIB notification callback (update)"); - goto cleanup; - } - - return pcmk_ok; - - cleanup: - cib__clean_up_connection(&the_cib); - return -ENOTCONN; -} - -/*! - * \internal - * \brief Prepare the CIB after cluster is connected - */ -static void -attrd_cib_init(void) -{ - // We have no attribute values in memory, wipe the CIB to match - attrd_erase_attrs(); - - // Set a trigger for reading the CIB (for the alerts section) - attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL); - - // Always read the CIB at start-up - mainloop_set_trigger(attrd_config_read); -} - static bool ipc_already_running(void) { @@ -208,8 +74,10 @@ ipc_already_running(void) return false; } - rc = pcmk_connect_ipc(old_instance, pcmk_ipc_dispatch_sync); + rc = pcmk__connect_ipc(old_instance, pcmk_ipc_dispatch_sync, 2); if (rc != pcmk_rc_ok) { + crm_debug("No existing %s manager instance found: %s", + pcmk_ipc_name(old_instance, true), pcmk_rc_str(rc)); pcmk_free_ipc_api(old_instance); return false; } @@ -277,7 +145,7 @@ main(int argc, char **argv) attrd_exit_status = CRM_EX_OK; g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "%s", msg); - crm_err(msg); + crm_err("%s", msg); goto done; } diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h index 329fb5a..b8929a7 100644 --- a/daemons/attrd/pacemaker-attrd.h +++ b/daemons/attrd/pacemaker-attrd.h @@ -57,13 +57,14 @@ void attrd_run_mainloop(void); void attrd_set_requesting_shutdown(void); void attrd_clear_requesting_shutdown(void); void attrd_free_waitlist(void); -bool attrd_requesting_shutdown(void); -bool attrd_shutting_down(void); +bool attrd_shutting_down(bool if_requested); void attrd_shutdown(int nsig); void attrd_init_ipc(void); void attrd_ipc_fini(void); +int attrd_cib_connect(int max_retry); void attrd_cib_disconnect(void); +void attrd_cib_init(void); bool attrd_value_needs_expansion(const char *value); int attrd_expand_value(const char *value, const char *old_value); @@ -92,6 +93,7 @@ int attrd_failure_regex(regex_t *regex, const char *rsc, const char *op, guint interval_ms); extern cib_t *the_cib; +extern crm_exit_t attrd_exit_status; /* Alerts */ @@ -100,8 +102,6 @@ extern crm_trigger_t *attrd_config_read; void attrd_lrmd_disconnect(void); gboolean attrd_read_options(gpointer user_data); -void attrd_cib_replaced_cb(const char *event, xmlNode * msg); -void attrd_cib_updated_cb(const char *event, xmlNode *msg); int attrd_send_attribute_alert(const char *node, int nodeid, const char *attr, const char *value); @@ -177,8 +177,13 @@ void attrd_free_attribute(gpointer data); void attrd_free_attribute_value(gpointer data); attribute_t *attrd_populate_attribute(xmlNode *xml, const char *attr); -void attrd_write_attribute(attribute_t *a, bool ignore_delay); -void attrd_write_attributes(bool all, bool ignore_delay); +enum attrd_write_options { + attrd_write_changed = 0, + attrd_write_all = (1 << 0), + attrd_write_no_delay = (1 << 1), +}; + +void attrd_write_attributes(uint32_t options); void attrd_write_or_elect_attribute(attribute_t *a); extern int minimum_protocol_version; diff --git a/daemons/based/Makefile.am b/daemons/based/Makefile.am index 053d93c..022fc47 100644 --- a/daemons/based/Makefile.am +++ b/daemons/based/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2021 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -13,35 +13,37 @@ EXTRA_DIST = cib.pam halibdir = $(CRM_DAEMON_DIR) -COMMONLIBS = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/cib/libcib.la - halib_PROGRAMS = pacemaker-based -noinst_HEADERS = pacemaker-based.h +noinst_HEADERS = based_transaction.h \ + pacemaker-based.h pacemaker_based_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_based_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemaker_based_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ - $(COMMONLIBS) $(CLUSTERLIBS) - -pacemaker_based_SOURCES = pacemaker-based.c \ - based_callbacks.c \ - based_common.c \ - based_io.c \ - based_messages.c \ - based_notify.c \ - based_remote.c - -clean-generic: - rm -f *.log *.debug *.xml *~ - -if BUILD_LEGACY_LINKS +pacemaker_based_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la +pacemaker_based_LDADD += $(top_builddir)/lib/cib/libcib.la +pacemaker_based_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +pacemaker_based_LDADD += $(CLUSTERLIBS) + +pacemaker_based_SOURCES = pacemaker-based.c \ + based_callbacks.c \ + based_io.c \ + based_messages.c \ + based_notify.c \ + based_operation.c \ + based_remote.c \ + based_transaction.c + +.PHONY: install-exec-hook install-exec-hook: +if BUILD_LEGACY_LINKS $(MKDIR_P) -- $(DESTDIR)$(CRM_DAEMON_DIR) cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f cib && $(LN_S) pacemaker-based cib +endif +.PHONY: uninstall-hook uninstall-hook: +if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f cib endif diff --git a/daemons/based/based_callbacks.c b/daemons/based/based_callbacks.c index 3726caa..4fac222 100644 --- a/daemons/based/based_callbacks.c +++ b/daemons/based/based_callbacks.c @@ -20,6 +20,9 @@ #include #include // PRIu64 +#include +#include + #include #include #include @@ -31,7 +34,6 @@ #include #define EXIT_ESCALATION_MS 10000 -#define OUR_NODENAME (stand_alone? "localhost" : crm_cluster->uname) static unsigned long cib_local_bcast_num = 0; @@ -50,11 +52,10 @@ qb_ipcs_service_t *ipcs_ro = NULL; qb_ipcs_service_t *ipcs_rw = NULL; qb_ipcs_service_t *ipcs_shm = NULL; -static void cib_process_request(xmlNode *request, gboolean privileged, - const pcmk__client_t *cib_client); - -static int cib_process_command(xmlNode *request, xmlNode **reply, - xmlNode **cib_diff, gboolean privileged); +static int cib_process_command(xmlNode *request, + const cib__operation_t *operation, + cib__op_fn_t op_function, xmlNode **reply, + xmlNode **cib_diff, bool privileged); static gboolean cib_common_callback(qb_ipcs_connection_t *c, void *data, size_t size, gboolean privileged); @@ -138,11 +139,130 @@ struct qb_ipcs_service_handlers ipc_rw_callbacks = { .connection_destroyed = cib_ipc_destroy }; +/*! + * \internal + * \brief Create reply XML for a CIB request + * + * \param[in] op CIB operation type + * \param[in] call_id CIB call ID + * \param[in] client_id CIB client ID + * \param[in] call_options Group of enum cib_call_options flags + * \param[in] rc Request return code + * \param[in] call_data Request output data + * + * \return Reply XML + * + * \note The caller is responsible for freeing the return value using + * \p free_xml(). + */ +static xmlNode * +create_cib_reply(const char *op, const char *call_id, const char *client_id, + int call_options, int rc, xmlNode *call_data) +{ + xmlNode *reply = create_xml_node(NULL, "cib-reply"); + + CRM_ASSERT(reply != NULL); + + crm_xml_add(reply, F_TYPE, T_CIB); + crm_xml_add(reply, F_CIB_OPERATION, op); + crm_xml_add(reply, F_CIB_CALLID, call_id); + crm_xml_add(reply, F_CIB_CLIENTID, client_id); + crm_xml_add_int(reply, F_CIB_CALLOPTS, call_options); + crm_xml_add_int(reply, F_CIB_RC, rc); + + if (call_data != NULL) { + crm_trace("Attaching reply output"); + add_message_xml(reply, F_CIB_CALLDATA, call_data); + } + + crm_log_xml_explicit(reply, "cib:reply"); + return reply; +} + +static void +do_local_notify(const xmlNode *notify_src, const char *client_id, + bool sync_reply, bool from_peer) +{ + int rid = 0; + int call_id = 0; + pcmk__client_t *client_obj = NULL; + + CRM_ASSERT(notify_src && client_id); + + crm_element_value_int(notify_src, F_CIB_CALLID, &call_id); + + client_obj = pcmk__find_client_by_id(client_id); + if (client_obj == NULL) { + crm_debug("Could not send response %d: client %s not found", + call_id, client_id); + return; + } + + if (sync_reply) { + if (client_obj->ipcs) { + CRM_LOG_ASSERT(client_obj->request_id); + + rid = client_obj->request_id; + client_obj->request_id = 0; + + crm_trace("Sending response %d to client %s%s", + rid, pcmk__client_name(client_obj), + (from_peer? " (originator of delegated request)" : "")); + } else { + crm_trace("Sending response (call %d) to client %s%s", + call_id, pcmk__client_name(client_obj), + (from_peer? " (originator of delegated request)" : "")); + } + + } else { + crm_trace("Sending event %d to client %s%s", + call_id, pcmk__client_name(client_obj), + (from_peer? " (originator of delegated request)" : "")); + } + + switch (PCMK__CLIENT_TYPE(client_obj)) { + case pcmk__client_ipc: + { + int rc = pcmk__ipc_send_xml(client_obj, rid, notify_src, + (sync_reply? crm_ipc_flags_none + : crm_ipc_server_event)); + + if (rc != pcmk_rc_ok) { + crm_warn("%s reply to client %s failed: %s " CRM_XS " rc=%d", + (sync_reply? "Synchronous" : "Asynchronous"), + pcmk__client_name(client_obj), pcmk_rc_str(rc), + rc); + } + } + break; +#ifdef HAVE_GNUTLS_GNUTLS_H + case pcmk__client_tls: +#endif + case pcmk__client_tcp: + pcmk__remote_send_xml(client_obj->remote, notify_src); + break; + default: + crm_err("Unknown transport for client %s " + CRM_XS " flags=%#016" PRIx64, + pcmk__client_name(client_obj), client_obj->flags); + } +} + void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, pcmk__client_t *cib_client, gboolean privileged) { const char *op = crm_element_value(op_request, F_CIB_OPERATION); + int call_options = cib_none; + + crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options); + + /* Requests with cib_transaction set should not be sent to based directly + * (outside of a commit-transaction request) + */ + if (pcmk_is_set(call_options, cib_transaction)) { + return; + } if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) { if (flags & crm_ipc_client_response) { @@ -180,9 +300,6 @@ cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, } else if (pcmk__str_eq(type, T_CIB_DIFF_NOTIFY, pcmk__str_casei)) { bit = cib_notify_diff; - } else if (pcmk__str_eq(type, T_CIB_REPLACE_NOTIFY, pcmk__str_casei)) { - bit = cib_notify_replace; - } else { status = CRM_EX_INVALID_PARAM; } @@ -354,9 +471,7 @@ process_ping_reply(xmlNode *reply) if(remote_cib && remote_cib->children) { // Additional debug xml_calculate_changes(the_cib, remote_cib); - - pcmk__output_set_log_level(logger_out, LOG_INFO); - pcmk__xml_show_changes(logger_out, remote_cib); + pcmk__log_xml_changes(LOG_INFO, remote_cib); crm_trace("End of differences"); } @@ -366,75 +481,6 @@ process_ping_reply(xmlNode *reply) } } -static void -do_local_notify(xmlNode * notify_src, const char *client_id, - gboolean sync_reply, gboolean from_peer) -{ - int rid = 0; - int call_id = 0; - pcmk__client_t *client_obj = NULL; - - CRM_ASSERT(notify_src && client_id); - - crm_element_value_int(notify_src, F_CIB_CALLID, &call_id); - - client_obj = pcmk__find_client_by_id(client_id); - if (client_obj == NULL) { - crm_debug("Could not send response %d: client %s not found", - call_id, client_id); - return; - } - - if (sync_reply) { - if (client_obj->ipcs) { - CRM_LOG_ASSERT(client_obj->request_id); - - rid = client_obj->request_id; - client_obj->request_id = 0; - - crm_trace("Sending response %d to client %s%s", - rid, pcmk__client_name(client_obj), - (from_peer? " (originator of delegated request)" : "")); - } else { - crm_trace("Sending response (call %d) to client %s%s", - call_id, pcmk__client_name(client_obj), - (from_peer? " (originator of delegated request)" : "")); - } - - } else { - crm_trace("Sending event %d to client %s%s", - call_id, pcmk__client_name(client_obj), - (from_peer? " (originator of delegated request)" : "")); - } - - switch (PCMK__CLIENT_TYPE(client_obj)) { - case pcmk__client_ipc: - { - int rc = pcmk__ipc_send_xml(client_obj, rid, notify_src, - (sync_reply? crm_ipc_flags_none - : crm_ipc_server_event)); - - if (rc != pcmk_rc_ok) { - crm_warn("%s reply to client %s failed: %s " CRM_XS " rc=%d", - (sync_reply? "Synchronous" : "Asynchronous"), - pcmk__client_name(client_obj), pcmk_rc_str(rc), - rc); - } - } - break; -#ifdef HAVE_GNUTLS_GNUTLS_H - case pcmk__client_tls: -#endif - case pcmk__client_tcp: - pcmk__remote_send_xml(client_obj->remote, notify_src); - break; - default: - crm_err("Unknown transport for client %s " - CRM_XS " flags=%#016" PRIx64, - pcmk__client_name(client_obj), client_obj->flags); - } -} - static void local_notify_destroy_callback(gpointer data) { @@ -448,7 +494,7 @@ local_notify_destroy_callback(gpointer data) static void check_local_notify(int bcast_id) { - cib_local_notify_t *notify = NULL; + const cib_local_notify_t *notify = NULL; if (!local_notify_queue) { return; @@ -483,13 +529,14 @@ queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_re } static void -parse_local_options_v1(const pcmk__client_t *cib_client, int call_type, - int call_options, const char *host, const char *op, - gboolean *local_notify, gboolean *needs_reply, - gboolean *process, gboolean *needs_forward) +parse_local_options_v1(const pcmk__client_t *cib_client, + const cib__operation_t *operation, int call_options, + const char *host, const char *op, gboolean *local_notify, + gboolean *needs_reply, gboolean *process, + gboolean *needs_forward) { - if (cib_op_modifies(call_type) - && !(call_options & cib_inhibit_bcast)) { + if (pcmk_is_set(operation->flags, cib__op_attr_modifies) + && !pcmk_is_set(call_options, cib_inhibit_bcast)) { /* we need to send an update anyway */ *needs_reply = TRUE; } else { @@ -526,78 +573,87 @@ parse_local_options_v1(const pcmk__client_t *cib_client, int call_type, } static void -parse_local_options_v2(const pcmk__client_t *cib_client, int call_type, - int call_options, const char *host, const char *op, - gboolean *local_notify, gboolean *needs_reply, - gboolean *process, gboolean *needs_forward) +parse_local_options_v2(const pcmk__client_t *cib_client, + const cib__operation_t *operation, int call_options, + const char *host, const char *op, gboolean *local_notify, + gboolean *needs_reply, gboolean *process, + gboolean *needs_forward) { - if (cib_op_modifies(call_type)) { - if (pcmk__str_any_of(op, PCMK__CIB_REQUEST_PRIMARY, - PCMK__CIB_REQUEST_SECONDARY, NULL)) { - /* Always handle these locally */ - *process = TRUE; - *needs_reply = FALSE; - *local_notify = TRUE; - *needs_forward = FALSE; - return; - - } else { - /* Redirect all other updates via CPG */ - *needs_reply = TRUE; - *needs_forward = TRUE; - *process = FALSE; - crm_trace("%s op from %s needs to be forwarded to client %s", - op, pcmk__client_name(cib_client), - pcmk__s(host, "the primary instance")); - return; - } - } - - + // Process locally and notify local client *process = TRUE; *needs_reply = FALSE; *local_notify = TRUE; *needs_forward = FALSE; - if (stand_alone) { - crm_trace("Processing %s op from client %s (stand-alone)", + if (pcmk_is_set(operation->flags, cib__op_attr_local)) { + /* Always process locally if cib__op_attr_local is set. + * + * @COMPAT: Currently host is ignored. At a compatibility break, throw + * an error (from cib_process_request() or earlier) if host is not NULL or + * OUR_NODENAME. + */ + crm_trace("Processing always-local %s op from client %s", op, pcmk__client_name(cib_client)); - } else if (host == NULL) { - crm_trace("Processing unaddressed %s op from client %s", - op, pcmk__client_name(cib_client)); + if (!pcmk__str_eq(host, OUR_NODENAME, + pcmk__str_casei|pcmk__str_null_matches)) { - } else if (pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei)) { - crm_trace("Processing locally addressed %s op from client %s", + crm_warn("Operation '%s' is always local but its target host is " + "set to '%s'", + op, host); + } + return; + } + + if (pcmk_is_set(operation->flags, cib__op_attr_modifies) + || !pcmk__str_eq(host, OUR_NODENAME, + pcmk__str_casei|pcmk__str_null_matches)) { + + // Forward modifying and non-local requests via cluster + *process = FALSE; + *needs_reply = FALSE; + *local_notify = FALSE; + *needs_forward = TRUE; + + crm_trace("%s op from %s needs to be forwarded to %s", + op, pcmk__client_name(cib_client), + pcmk__s(host, "all nodes")); + return; + } + + if (stand_alone) { + crm_trace("Processing %s op from client %s (stand-alone)", op, pcmk__client_name(cib_client)); } else { - crm_trace("%s op from %s needs to be forwarded to client %s", - op, pcmk__client_name(cib_client), host); - *needs_forward = TRUE; - *process = FALSE; + crm_trace("Processing %saddressed %s op from client %s", + ((host != NULL)? "locally " : "un"), + op, pcmk__client_name(cib_client)); } } static void -parse_local_options(const pcmk__client_t *cib_client, int call_type, - int call_options, const char *host, const char *op, - gboolean *local_notify, gboolean *needs_reply, - gboolean *process, gboolean *needs_forward) +parse_local_options(const pcmk__client_t *cib_client, + const cib__operation_t *operation, int call_options, + const char *host, const char *op, gboolean *local_notify, + gboolean *needs_reply, gboolean *process, + gboolean *needs_forward) { if(cib_legacy_mode()) { - parse_local_options_v1(cib_client, call_type, call_options, host, - op, local_notify, needs_reply, process, needs_forward); + parse_local_options_v1(cib_client, operation, call_options, host, + op, local_notify, needs_reply, process, + needs_forward); } else { - parse_local_options_v2(cib_client, call_type, call_options, host, - op, local_notify, needs_reply, process, needs_forward); + parse_local_options_v2(cib_client, operation, call_options, host, + op, local_notify, needs_reply, process, + needs_forward); } } static gboolean -parse_peer_options_v1(int call_type, xmlNode * request, - gboolean * local_notify, gboolean * needs_reply, gboolean * process, - gboolean * needs_forward) +parse_peer_options_v1(const cib__operation_t *operation, xmlNode *request, + gboolean *local_notify, gboolean *needs_reply, + gboolean *process) { const char *op = NULL; const char *host = NULL; @@ -620,7 +676,8 @@ parse_peer_options_v1(int call_type, xmlNode * request, } op = crm_element_value(request, F_CIB_OPERATION); - crm_trace("Processing %s request sent by %s", op, originator); + crm_trace("Processing legacy %s request sent by %s", op, originator); + if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SHUTDOWN, pcmk__str_none)) { /* Always process these */ *local_notify = FALSE; @@ -693,9 +750,9 @@ parse_peer_options_v1(int call_type, xmlNode * request, } static gboolean -parse_peer_options_v2(int call_type, xmlNode * request, - gboolean * local_notify, gboolean * needs_reply, gboolean * process, - gboolean * needs_forward) +parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, + gboolean *local_notify, gboolean *needs_reply, + gboolean *process) { const char *host = NULL; const char *delegated = crm_element_value(request, F_CIB_DELEGATED); @@ -705,6 +762,10 @@ parse_peer_options_v2(int call_type, xmlNode * request, gboolean is_reply = pcmk__str_eq(reply_to, OUR_NODENAME, pcmk__str_casei); + if (originator == NULL) { // Shouldn't be possible + originator = "peer"; + } + if (pcmk__str_eq(op, PCMK__CIB_REQUEST_REPLACE, pcmk__str_none)) { /* sync_our_cib() sets F_CIB_ISREPLY */ if (reply_to) { @@ -734,10 +795,10 @@ parse_peer_options_v2(int call_type, xmlNode * request, const char *max = crm_element_value(request, F_CIB_SCHEMA_MAX); const char *upgrade_rc = crm_element_value(request, F_CIB_UPGRADE_RC); - crm_trace("Parsing %s operation%s for %s with max=%s and upgrade_rc=%s", - op, (is_reply? " reply" : ""), + crm_trace("Parsing upgrade %s for %s with max=%s and upgrade_rc=%s", + (is_reply? "reply" : "request"), (based_is_primary? "primary" : "secondary"), - (max? max : "none"), (upgrade_rc? upgrade_rc : "none")); + pcmk__s(max, "none"), pcmk__s(upgrade_rc, "none")); if (upgrade_rc != NULL) { // Our upgrade request was rejected by DC, notify clients of result @@ -752,7 +813,7 @@ parse_peer_options_v2(int call_type, xmlNode * request, goto skip_is_reply; } else { - // Ignore broadcast client requests when we're not DC + // Ignore broadcast client requests when we're not primary return FALSE; } @@ -762,22 +823,25 @@ parse_peer_options_v2(int call_type, xmlNode * request, legacy_mode = TRUE; return FALSE; - } else if (is_reply && cib_op_modifies(call_type)) { + } else if (is_reply + && pcmk_is_set(operation->flags, cib__op_attr_modifies)) { crm_trace("Ignoring legacy %s reply sent from %s to local clients", op, originator); return FALSE; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SHUTDOWN, pcmk__str_none)) { - /* Legacy handling */ - crm_debug("Legacy handling of %s message from %s", op, originator); *local_notify = FALSE; if (reply_to == NULL) { *process = TRUE; + } else { // Not possible? + crm_debug("Ignoring shutdown request from %s because reply_to=%s", + originator, reply_to); } return *process; } - if(is_reply) { - crm_trace("Handling %s reply sent from %s to local clients", op, originator); + if (is_reply) { + crm_trace("Will notify local clients for %s reply from %s", + op, originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; @@ -797,62 +861,78 @@ parse_peer_options_v2(int call_type, xmlNode * request, return TRUE; } else if (host != NULL) { - /* this is for a specific instance and we're not it */ - crm_trace("Ignoring %s operation for instance on %s", op, host); + crm_trace("Ignoring %s request intended for CIB manager on %s", + op, host); return FALSE; } else if(is_reply == FALSE && pcmk__str_eq(op, CRM_OP_PING, pcmk__str_casei)) { *needs_reply = TRUE; } - crm_trace("Processing %s request sent to everyone by %s/%s on %s %s", op, - crm_element_value(request, F_CIB_CLIENTNAME), - crm_element_value(request, F_CIB_CALLID), - originator, (*local_notify)?"(notify)":""); + crm_trace("Processing %s request broadcast by %s call %s on %s " + "(local clients will%s be notified)", op, + pcmk__s(crm_element_value(request, F_CIB_CLIENTNAME), "client"), + pcmk__s(crm_element_value(request, F_CIB_CALLID), "without ID"), + originator, (*local_notify? "" : "not")); return TRUE; } static gboolean -parse_peer_options(int call_type, xmlNode * request, - gboolean * local_notify, gboolean * needs_reply, gboolean * process, - gboolean * needs_forward) +parse_peer_options(const cib__operation_t *operation, xmlNode *request, + gboolean *local_notify, gboolean *needs_reply, + gboolean *process) { /* TODO: What happens when an update comes in after node A * requests the CIB from node B, but before it gets the reply (and * sends out the replace operation) */ if(cib_legacy_mode()) { - return parse_peer_options_v1( - call_type, request, local_notify, needs_reply, process, needs_forward); + return parse_peer_options_v1(operation, request, local_notify, + needs_reply, process); } else { - return parse_peer_options_v2( - call_type, request, local_notify, needs_reply, process, needs_forward); + return parse_peer_options_v2(operation, request, local_notify, + needs_reply, process); } } +/*! + * \internal + * \brief Forward a CIB request to the appropriate target host(s) + * + * \param[in] request CIB request to forward + */ static void -forward_request(xmlNode *request, int call_options) +forward_request(xmlNode *request) { const char *op = crm_element_value(request, F_CIB_OPERATION); + const char *section = crm_element_value(request, F_CIB_SECTION); const char *host = crm_element_value(request, F_CIB_HOST); + const char *originator = crm_element_value(request, F_ORIG); + const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); + const char *call_id = crm_element_value(request, F_CIB_CALLID); - crm_xml_add(request, F_CIB_DELEGATED, OUR_NODENAME); - - if (host != NULL) { - crm_trace("Forwarding %s op to %s", op, host); - send_cluster_message(crm_get_peer(0, host), crm_msg_cib, request, FALSE); + int log_level = LOG_INFO; - } else { - crm_trace("Forwarding %s op to primary instance", op); - send_cluster_message(NULL, crm_msg_cib, request, FALSE); + if (pcmk__str_eq(op, PCMK__CIB_REQUEST_NOOP, pcmk__str_none)) { + log_level = LOG_DEBUG; } - /* Return the request to its original state */ - xml_remove_prop(request, F_CIB_DELEGATED); + do_crm_log(log_level, + "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)", + pcmk__s(op, "invalid"), + pcmk__s(section, "all"), + pcmk__s(host, (cib_legacy_mode()? "primary" : "all")), + pcmk__s(originator, "local"), + pcmk__s(client_name, "unspecified"), + pcmk__s(call_id, "unspecified")); - if (call_options & cib_discard_reply) { - crm_trace("Client not interested in reply"); - } + crm_xml_add(request, F_CIB_DELEGATED, OUR_NODENAME); + + send_cluster_message(((host != NULL)? crm_get_peer(0, host) : NULL), + crm_msg_cib, request, FALSE); + + // Return the request to its original state + xml_remove_prop(request, F_CIB_DELEGATED); } static gboolean @@ -861,9 +941,10 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb CRM_ASSERT(msg != NULL); if (broadcast) { - /* this (successful) call modified the CIB _and_ the - * change needs to be broadcast... - * send via HA to other nodes + /* @COMPAT: Legacy code + * + * This successful call modified the CIB, and the change needs to be + * broadcast (sent via cluster to all nodes). */ int diff_add_updates = 0; int diff_add_epoch = 0; @@ -878,7 +959,7 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb CRM_LOG_ASSERT(result_diff != NULL); digest = crm_element_value(result_diff, XML_ATTR_DIGEST); - crm_element_value_int(result_diff, "format", &format); + crm_element_value_int(result_diff, PCMK_XA_FORMAT, &format); cib_diff_version_details(result_diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, @@ -919,12 +1000,14 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb * \param[in] privileged Whether privileged commands may be run * (see cib_server_ops[] definition) * \param[in] cib_client IPC client that sent request (or NULL if CPG) + * + * \return Legacy Pacemaker return code */ -static void +int cib_process_request(xmlNode *request, gboolean privileged, const pcmk__client_t *cib_client) { - int call_type = 0; + // @TODO: Break into multiple smaller functions int call_options = 0; gboolean process = TRUE; // Whether to process request locally now @@ -946,12 +1029,16 @@ cib_process_request(xmlNode *request, gboolean privileged, const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); + const cib__operation_t *operation = NULL; + cib__op_fn_t op_function = NULL; + crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); if ((host != NULL) && (*host == '\0')) { host = NULL; } + // @TODO: Improve trace messages. Target is accurate only for legacy mode. if (host) { target = host; @@ -970,72 +1057,68 @@ cib_process_request(xmlNode *request, gboolean privileged, crm_trace("Processing local %s operation from %s/%s intended for %s", op, client_name, call_id, target); } - rc = cib_get_operation_id(op, &call_type); + rc = cib__get_operation(op, &operation); + rc = pcmk_rc2legacy(rc); if (rc != pcmk_ok) { /* TODO: construct error reply? */ crm_err("Pre-processing of command failed: %s", pcmk_strerror(rc)); - return; + return rc; + } + + op_function = based_get_op_function(operation); + if (op_function == NULL) { + crm_err("Operation %s not supported by CIB manager", op); + return -EOPNOTSUPP; } if (cib_client != NULL) { - parse_local_options(cib_client, call_type, call_options, host, op, - &local_notify, &needs_reply, &process, &needs_forward); + parse_local_options(cib_client, operation, call_options, host, op, + &local_notify, &needs_reply, &process, + &needs_forward); - } else if (parse_peer_options(call_type, request, &local_notify, - &needs_reply, &process, &needs_forward) == FALSE) { - return; + } else if (!parse_peer_options(operation, request, &local_notify, + &needs_reply, &process)) { + return rc; + } + + if (pcmk_is_set(call_options, cib_transaction)) { + /* All requests in a transaction are processed locally against a working + * CIB copy, and we don't notify for individual requests because the + * entire transaction is atomic. + * + * We still call the option parser functions above, for the sake of log + * messages and checking whether we're the target for peer requests. + */ + process = TRUE; + needs_reply = FALSE; + local_notify = FALSE; + needs_forward = FALSE; } - is_update = cib_op_modifies(call_type); + is_update = pcmk_is_set(operation->flags, cib__op_attr_modifies); - if (call_options & cib_discard_reply) { + if (pcmk_is_set(call_options, cib_discard_reply)) { /* If the request will modify the CIB, and we are in legacy mode, we * need to build a reply so we can broadcast a diff, even if the * requester doesn't want one. */ needs_reply = is_update && cib_legacy_mode(); local_notify = FALSE; + crm_trace("Client is not interested in the reply"); } if (needs_forward) { - const char *section = crm_element_value(request, F_CIB_SECTION); - int log_level = LOG_INFO; - - if (pcmk__str_eq(op, PCMK__CIB_REQUEST_NOOP, pcmk__str_none)) { - log_level = LOG_DEBUG; - } - - do_crm_log(log_level, - "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)", - op, - section ? section : "'all'", - pcmk__s(host, (cib_legacy_mode() ? "primary" : "all")), - originator ? originator : "local", - client_name, call_id); - - forward_request(request, call_options); - return; + forward_request(request); + return rc; } if (cib_status != pcmk_ok) { - const char *call = crm_element_value(request, F_CIB_CALLID); - rc = cib_status; crm_err("Operation ignored, cluster configuration is invalid." " Please repair and restart: %s", pcmk_strerror(cib_status)); - op_reply = create_xml_node(NULL, "cib-reply"); - crm_xml_add(op_reply, F_TYPE, T_CIB); - crm_xml_add(op_reply, F_CIB_OPERATION, op); - crm_xml_add(op_reply, F_CIB_CALLID, call); - crm_xml_add(op_reply, F_CIB_CLIENTID, client_id); - crm_xml_add_int(op_reply, F_CIB_CALLOPTS, call_options); - crm_xml_add_int(op_reply, F_CIB_RC, rc); - - crm_trace("Attaching reply output"); - add_message_xml(op_reply, F_CIB_CALLDATA, the_cib); - - crm_log_xml_explicit(op_reply, "cib:reply"); + op_reply = create_cib_reply(op, call_id, client_id, call_options, rc, + the_cib); } else if (process) { time_t finished = 0; @@ -1043,7 +1126,8 @@ cib_process_request(xmlNode *request, gboolean privileged, int level = LOG_INFO; const char *section = crm_element_value(request, F_CIB_SECTION); - rc = cib_process_command(request, &op_reply, &result_diff, privileged); + rc = cib_process_command(request, operation, op_function, &op_reply, + &result_diff, privileged); if (!is_update) { level = LOG_TRACE; @@ -1120,10 +1204,9 @@ cib_process_request(xmlNode *request, gboolean privileged, op_reply = NULL; /* the reply is queued, so don't free here */ } - } else if (call_options & cib_discard_reply) { - crm_trace("Caller isn't interested in reply"); + } else if ((cib_client == NULL) + && !pcmk_is_set(call_options, cib_discard_reply)) { - } else if (cib_client == NULL) { if (is_update == FALSE || result_diff == NULL) { crm_trace("Request not broadcast: R/O call"); @@ -1158,24 +1241,51 @@ cib_process_request(xmlNode *request, gboolean privileged, free_xml(op_reply); free_xml(result_diff); - return; + return rc; } -static char * -calculate_section_digest(const char *xpath, xmlNode * xml_obj) +/*! + * \internal + * \brief Get a CIB operation's input from the request XML + * + * \param[in] request CIB request XML + * \param[in] type CIB operation type + * \param[out] section Where to store CIB section name + * + * \return Input XML for CIB operation + * + * \note If not \c NULL, the return value is a non-const pointer to part of + * \p request. The caller should not free it directly. + */ +static xmlNode * +prepare_input(const xmlNode *request, enum cib__op_type type, + const char **section) { - xmlNode *xml_section = NULL; + xmlNode *input = NULL; + + *section = NULL; + + switch (type) { + case cib__op_apply_patch: + if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { + input = get_message_xml(request, F_CIB_UPDATE_DIFF); + } else { + input = get_message_xml(request, F_CIB_CALLDATA); + } + break; - if (xml_obj == NULL) { - return NULL; + default: + input = get_message_xml(request, F_CIB_CALLDATA); + *section = crm_element_value(request, F_CIB_SECTION); + break; } - xml_section = get_xpath_object(xpath, xml_obj, LOG_TRACE); - if (xml_section == NULL) { - return NULL; + // Grab the specified section + if ((*section != NULL) && pcmk__xe_is(input, XML_TAG_CIB)) { + input = pcmk_find_cib_element(input, *section); } - return calculate_xml_versioned_digest(xml_section, FALSE, TRUE, CRM_FEATURE_SET); + return input; } // v1 and v2 patch formats @@ -1201,14 +1311,14 @@ contains_config_change(xmlNode *diff) } static int -cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gboolean privileged) +cib_process_command(xmlNode *request, const cib__operation_t *operation, + cib__op_fn_t op_function, xmlNode **reply, + xmlNode **cib_diff, bool privileged) { xmlNode *input = NULL; xmlNode *output = NULL; xmlNode *result_cib = NULL; - xmlNode *current_cib = NULL; - int call_type = 0; int call_options = 0; const char *op = NULL; @@ -1216,24 +1326,15 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb const char *call_id = crm_element_value(request, F_CIB_CALLID); const char *client_id = crm_element_value(request, F_CIB_CLIENTID); const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); - const char *origin = crm_element_value(request, F_ORIG); + const char *originator = crm_element_value(request, F_ORIG); int rc = pcmk_ok; - int rc2 = pcmk_ok; - gboolean send_r_notify = FALSE; - gboolean config_changed = FALSE; - gboolean manage_counters = TRUE; + bool config_changed = false; + bool manage_counters = true; static mainloop_timer_t *digest_timer = NULL; - char *current_nodes_digest = NULL; - char *current_alerts_digest = NULL; - char *current_status_digest = NULL; - uint32_t change_section = cib_change_section_nodes - |cib_change_section_alerts - |cib_change_section_status; - CRM_ASSERT(cib_status == pcmk_ok); if(digest_timer == NULL) { @@ -1242,91 +1343,64 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb *reply = NULL; *cib_diff = NULL; - current_cib = the_cib; /* Start processing the request... */ op = crm_element_value(request, F_CIB_OPERATION); crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); - rc = cib_get_operation_id(op, &call_type); - if (rc == pcmk_ok && privileged == FALSE) { - rc = cib_op_can_run(call_type, call_options, privileged); + if (!privileged && pcmk_is_set(operation->flags, cib__op_attr_privileged)) { + rc = -EACCES; + crm_trace("Failed due to lack of privileges: %s", pcmk_strerror(rc)); + goto done; } - rc2 = cib_op_prepare(call_type, request, &input, §ion); - if (rc == pcmk_ok) { - rc = rc2; - } + input = prepare_input(request, operation->type, §ion); - if (rc != pcmk_ok) { - crm_trace("Call setup failed: %s", pcmk_strerror(rc)); - goto done; - - } else if (cib_op_modifies(call_type) == FALSE) { - rc = cib_perform_op(op, call_options, cib_op_func(call_type), TRUE, - section, request, input, FALSE, &config_changed, - current_cib, &result_cib, NULL, &output); + if (!pcmk_is_set(operation->flags, cib__op_attr_modifies)) { + rc = cib_perform_op(op, call_options, op_function, true, section, + request, input, false, &config_changed, &the_cib, + &result_cib, NULL, &output); CRM_CHECK(result_cib == NULL, free_xml(result_cib)); goto done; } - /* Handle a valid write action */ + /* @COMPAT: Handle a valid write action (legacy) + * + * @TODO: Re-evaluate whether this is all truly legacy. The cib_force_diff + * portion is. However, F_CIB_GLOBAL_UPDATE may be set by a sync operation + * even in non-legacy mode, and manage_counters tells xml_create_patchset() + * whether to update version/epoch info. + */ if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { - /* legacy code */ - manage_counters = FALSE; + manage_counters = false; cib__set_call_options(call_options, "call", cib_force_diff); crm_trace("Global update detected"); - CRM_CHECK(call_type == 3 || call_type == 4, crm_err("Call type: %d", call_type); - crm_log_xml_err(request, "bad op")); + CRM_LOG_ASSERT(pcmk__str_any_of(op, + PCMK__CIB_REQUEST_APPLY_PATCH, + PCMK__CIB_REQUEST_REPLACE, + NULL)); } ping_modified_since = TRUE; if (pcmk_is_set(call_options, cib_inhibit_bcast)) { crm_trace("Skipping update: inhibit broadcast"); - manage_counters = FALSE; - } - - if (!pcmk_is_set(call_options, cib_dryrun) - && pcmk__str_eq(section, XML_CIB_TAG_STATUS, pcmk__str_casei)) { - // Copying large CIBs accounts for a huge percentage of our CIB usage - cib__set_call_options(call_options, "call", cib_zero_copy); - } else { - cib__clear_call_options(call_options, "call", cib_zero_copy); - } - -#define XPATH_CONFIG "//" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION -#define XPATH_NODES XPATH_CONFIG "/" XML_CIB_TAG_NODES -#define XPATH_ALERTS XPATH_CONFIG "/" XML_CIB_TAG_ALERTS -#define XPATH_STATUS "//" XML_TAG_CIB "/" XML_CIB_TAG_STATUS - - // Calculate the hash value of the section before the change - if (pcmk__str_eq(PCMK__CIB_REQUEST_REPLACE, op, pcmk__str_none)) { - current_nodes_digest = calculate_section_digest(XPATH_NODES, - current_cib); - current_alerts_digest = calculate_section_digest(XPATH_ALERTS, - current_cib); - current_status_digest = calculate_section_digest(XPATH_STATUS, - current_cib); - crm_trace("current-digest %s:%s:%s", current_nodes_digest, - current_alerts_digest, current_status_digest); + manage_counters = false; } // result_cib must not be modified after cib_perform_op() returns - rc = cib_perform_op(op, call_options, cib_op_func(call_type), FALSE, - section, request, input, manage_counters, - &config_changed, current_cib, &result_cib, cib_diff, - &output); + rc = cib_perform_op(op, call_options, op_function, false, section, + request, input, manage_counters, &config_changed, + &the_cib, &result_cib, cib_diff, &output); + // @COMPAT: Legacy code if (!manage_counters) { int format = 1; - /* Legacy code - * If the diff is NULL at this point, it's because nothing changed - */ + // If the diff is NULL at this point, it's because nothing changed if (*cib_diff != NULL) { - crm_element_value_int(*cib_diff, "format", &format); + crm_element_value_int(*cib_diff, PCMK_XA_FORMAT, &format); } if (format == 1) { @@ -1334,92 +1408,60 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb } } - /* Always write to disk for successful replace and upgrade ops. This also + /* Always write to disk for successful ops with the flag set. This also * negates the need to detect ordering changes. */ if ((rc == pcmk_ok) - && pcmk__str_any_of(op, - PCMK__CIB_REQUEST_REPLACE, - PCMK__CIB_REQUEST_UPGRADE, - NULL)) { - config_changed = TRUE; - } - - if (rc == pcmk_ok && !pcmk_is_set(call_options, cib_dryrun)) { - crm_trace("Activating %s->%s%s%s", - crm_element_value(current_cib, XML_ATTR_NUMUPDATES), - crm_element_value(result_cib, XML_ATTR_NUMUPDATES), - (pcmk_is_set(call_options, cib_zero_copy)? " zero-copy" : ""), - (config_changed? " changed" : "")); - if (!pcmk_is_set(call_options, cib_zero_copy)) { - rc = activateCibXml(result_cib, config_changed, op); - crm_trace("Activated %s (%d)", - crm_element_value(current_cib, XML_ATTR_NUMUPDATES), rc); - } + && pcmk_is_set(operation->flags, cib__op_attr_writes_through)) { - if ((rc == pcmk_ok) && contains_config_change(*cib_diff)) { - cib_read_config(config_hash, result_cib); - } + config_changed = true; + } - if (pcmk__str_eq(PCMK__CIB_REQUEST_REPLACE, op, pcmk__str_none)) { - char *result_nodes_digest = NULL; - char *result_alerts_digest = NULL; - char *result_status_digest = NULL; - - /* Calculate the hash value of the changed section. */ - result_nodes_digest = calculate_section_digest(XPATH_NODES, - result_cib); - result_alerts_digest = calculate_section_digest(XPATH_ALERTS, - result_cib); - result_status_digest = calculate_section_digest(XPATH_STATUS, - result_cib); - crm_trace("result-digest %s:%s:%s", result_nodes_digest, - result_alerts_digest, result_status_digest); - - if (pcmk__str_eq(current_nodes_digest, result_nodes_digest, - pcmk__str_none)) { - change_section = - pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, - "CIB change section", - "change_section", change_section, - cib_change_section_nodes, "nodes"); - } + if ((rc == pcmk_ok) + && !pcmk_any_flags_set(call_options, cib_dryrun|cib_transaction)) { - if (pcmk__str_eq(current_alerts_digest, result_alerts_digest, - pcmk__str_none)) { - change_section = - pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, - "CIB change section", - "change_section", change_section, - cib_change_section_alerts, "alerts"); + if (result_cib != the_cib) { + if (pcmk_is_set(operation->flags, cib__op_attr_writes_through)) { + config_changed = true; } - if (pcmk__str_eq(current_status_digest, result_status_digest, - pcmk__str_none)) { - change_section = - pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, - "CIB change section", - "change_section", change_section, - cib_change_section_status, "status"); - } + crm_trace("Activating %s->%s%s", + crm_element_value(the_cib, XML_ATTR_NUMUPDATES), + crm_element_value(result_cib, XML_ATTR_NUMUPDATES), + (config_changed? " changed" : "")); - if (change_section != cib_change_section_none) { - send_r_notify = TRUE; + rc = activateCibXml(result_cib, config_changed, op); + if (rc != pcmk_ok) { + crm_err("Failed to activate new CIB: %s", pcmk_strerror(rc)); } - - free(result_nodes_digest); - free(result_alerts_digest); - free(result_status_digest); + } + + if ((rc == pcmk_ok) && contains_config_change(*cib_diff)) { + cib_read_config(config_hash, result_cib); + } - } else if (pcmk__str_eq(PCMK__CIB_REQUEST_ERASE, op, pcmk__str_none)) { - send_r_notify = TRUE; + /* @COMPAT Nodes older than feature set 3.19.0 don't support + * transactions. In a mixed-version cluster with nodes <3.19.0, we must + * sync the updated CIB, so that the older nodes receive the changes. + * Any node that has already applied the transaction will ignore the + * synced CIB. + * + * To ensure the updated CIB is synced from only one node, we sync it + * from the originator. + */ + if ((operation->type == cib__op_commit_transact) + && pcmk__str_eq(originator, OUR_NODENAME, pcmk__str_casei) + && compare_version(crm_element_value(the_cib, XML_ATTR_CRM_VERSION), + "3.19.0") < 0) { + + sync_our_cib(request, TRUE); } mainloop_timer_stop(digest_timer); mainloop_timer_start(digest_timer); } else if (rc == -pcmk_err_schema_validation) { - CRM_ASSERT(!pcmk_is_set(call_options, cib_zero_copy)); + CRM_ASSERT(result_cib != the_cib); if (output != NULL) { crm_log_xml_info(output, "cib:output"); @@ -1432,61 +1474,31 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb crm_trace("Not activating %d %d %s", rc, pcmk_is_set(call_options, cib_dryrun), crm_element_value(result_cib, XML_ATTR_NUMUPDATES)); - if (!pcmk_is_set(call_options, cib_zero_copy)) { + + if (result_cib != the_cib) { free_xml(result_cib); } } - if ((call_options & (cib_inhibit_notify|cib_dryrun)) == 0) { + if (!pcmk_any_flags_set(call_options, + cib_dryrun|cib_inhibit_notify|cib_transaction)) { crm_trace("Sending notifications %d", pcmk_is_set(call_options, cib_dryrun)); - cib_diff_notify(op, rc, call_id, client_id, client_name, origin, input, - *cib_diff); + cib_diff_notify(op, rc, call_id, client_id, client_name, originator, + input, *cib_diff); } - if (send_r_notify) { - cib_replace_notify(op, rc, call_id, client_id, client_name, origin, - the_cib, *cib_diff, change_section); - } - - pcmk__output_set_log_level(logger_out, LOG_TRACE); - logger_out->message(logger_out, "xml-patchset", *cib_diff); + pcmk__log_xml_patchset(LOG_TRACE, *cib_diff); done: if (!pcmk_is_set(call_options, cib_discard_reply) || cib_legacy_mode()) { - const char *caller = crm_element_value(request, F_CIB_CLIENTID); - - *reply = create_xml_node(NULL, "cib-reply"); - crm_xml_add(*reply, F_TYPE, T_CIB); - crm_xml_add(*reply, F_CIB_OPERATION, op); - crm_xml_add(*reply, F_CIB_CALLID, call_id); - crm_xml_add(*reply, F_CIB_CLIENTID, caller); - crm_xml_add_int(*reply, F_CIB_CALLOPTS, call_options); - crm_xml_add_int(*reply, F_CIB_RC, rc); - - if (output != NULL) { - crm_trace("Attaching reply output"); - add_message_xml(*reply, F_CIB_CALLDATA, output); - } - - crm_log_xml_explicit(*reply, "cib:reply"); + *reply = create_cib_reply(op, call_id, client_id, call_options, rc, + output); } - crm_trace("cleanup"); - - if (cib_op_modifies(call_type) == FALSE && output != current_cib) { + if (output != the_cib) { free_xml(output); - output = NULL; - } - - if (call_type >= 0) { - cib_op_cleanup(call_type, call_options, &input, &output); } - - free(current_nodes_digest); - free(current_alerts_digest); - free(current_status_digest); - crm_trace("done"); return rc; } @@ -1554,12 +1566,12 @@ initiate_exit(void) xmlNode *leaving = NULL; active = crm_active_peers(); - if (active < 2) { + if (active < 2) { // This is the last active node terminate_cib(__func__, 0); return; } - crm_info("Sending disconnect notification to %d peers...", active); + crm_info("Sending shutdown request to %d peers", active); leaving = create_xml_node(NULL, "exit-notification"); crm_xml_add(leaving, F_TYPE, "cib"); @@ -1664,12 +1676,6 @@ terminate_cib(const char *caller, int fast) uninitializeCib(); - if (logger_out != NULL) { - logger_out->finish(logger_out, CRM_EX_OK, true, NULL); - pcmk__output_free(logger_out); - logger_out = NULL; - } - if (fast > 0) { /* Quit fast on error */ pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); diff --git a/daemons/based/based_common.c b/daemons/based/based_common.c deleted file mode 100644 index 7e68cf0..0000000 --- a/daemons/based/based_common.c +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Copyright 2008-2023 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU General Public License version 2 - * or later (GPLv2+) WITHOUT ANY WARRANTY. - */ - -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include - -#include - -gboolean stand_alone = FALSE; - -extern int cib_perform_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, - gboolean privileged); - -static xmlNode * -cib_prepare_common(xmlNode * root, const char *section) -{ - xmlNode *data = NULL; - - /* extract the CIB from the fragment */ - if (root == NULL) { - return NULL; - - } else if (pcmk__strcase_any_of(crm_element_name(root), XML_TAG_FRAGMENT, - F_CRM_DATA, F_CIB_CALLDATA, NULL)) { - data = first_named_child(root, XML_TAG_CIB); - - } else { - data = root; - } - - /* grab the section specified for the command */ - if (section != NULL && data != NULL && pcmk__str_eq(crm_element_name(data), XML_TAG_CIB, pcmk__str_none)) { - data = pcmk_find_cib_element(data, section); - } - - /* crm_log_xml_trace(root, "cib:input"); */ - return data; -} - -static int -cib_prepare_none(xmlNode * request, xmlNode ** data, const char **section) -{ - *data = NULL; - *section = crm_element_value(request, F_CIB_SECTION); - return pcmk_ok; -} - -static int -cib_prepare_data(xmlNode * request, xmlNode ** data, const char **section) -{ - xmlNode *input_fragment = get_message_xml(request, F_CIB_CALLDATA); - - *section = crm_element_value(request, F_CIB_SECTION); - *data = cib_prepare_common(input_fragment, *section); - /* crm_log_xml_debug(*data, "data"); */ - return pcmk_ok; -} - -static int -cib_prepare_sync(xmlNode * request, xmlNode ** data, const char **section) -{ - *data = NULL; - *section = crm_element_value(request, F_CIB_SECTION); - return pcmk_ok; -} - -static int -cib_prepare_diff(xmlNode * request, xmlNode ** data, const char **section) -{ - xmlNode *input_fragment = NULL; - - *data = NULL; - *section = NULL; - - if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { - input_fragment = get_message_xml(request, F_CIB_UPDATE_DIFF); - } else { - input_fragment = get_message_xml(request, F_CIB_CALLDATA); - } - - CRM_CHECK(input_fragment != NULL, crm_log_xml_warn(request, "no input")); - *data = cib_prepare_common(input_fragment, NULL); - return pcmk_ok; -} - -static int -cib_cleanup_query(int options, xmlNode ** data, xmlNode ** output) -{ - CRM_LOG_ASSERT(*data == NULL); - if ((options & cib_no_children) - || pcmk__str_eq(crm_element_name(*output), "xpath-query", pcmk__str_casei)) { - free_xml(*output); - } - return pcmk_ok; -} - -static int -cib_cleanup_data(int options, xmlNode ** data, xmlNode ** output) -{ - free_xml(*output); - *data = NULL; - return pcmk_ok; -} - -static int -cib_cleanup_output(int options, xmlNode ** data, xmlNode ** output) -{ - free_xml(*output); - return pcmk_ok; -} - -static int -cib_cleanup_none(int options, xmlNode ** data, xmlNode ** output) -{ - CRM_LOG_ASSERT(*data == NULL); - CRM_LOG_ASSERT(*output == NULL); - return pcmk_ok; -} - -static cib_operation_t cib_server_ops[] = { - // Booleans are modifies_cib, needs_privileges - { - NULL, FALSE, FALSE, - cib_prepare_none, cib_cleanup_none, cib_process_default - }, - { - PCMK__CIB_REQUEST_QUERY, FALSE, FALSE, - cib_prepare_none, cib_cleanup_query, cib_process_query - }, - { - PCMK__CIB_REQUEST_MODIFY, TRUE, TRUE, - cib_prepare_data, cib_cleanup_data, cib_process_modify - }, - { - PCMK__CIB_REQUEST_APPLY_PATCH, TRUE, TRUE, - cib_prepare_diff, cib_cleanup_data, cib_server_process_diff - }, - { - PCMK__CIB_REQUEST_REPLACE, TRUE, TRUE, - cib_prepare_data, cib_cleanup_data, cib_process_replace_svr - }, - { - PCMK__CIB_REQUEST_CREATE, TRUE, TRUE, - cib_prepare_data, cib_cleanup_data, cib_process_create - }, - { - PCMK__CIB_REQUEST_DELETE, TRUE, TRUE, - cib_prepare_data, cib_cleanup_data, cib_process_delete - }, - { - PCMK__CIB_REQUEST_SYNC_TO_ALL, FALSE, TRUE, - cib_prepare_sync, cib_cleanup_none, cib_process_sync - }, - { - PCMK__CIB_REQUEST_BUMP, TRUE, TRUE, - cib_prepare_none, cib_cleanup_output, cib_process_bump - }, - { - PCMK__CIB_REQUEST_ERASE, TRUE, TRUE, - cib_prepare_none, cib_cleanup_output, cib_process_erase - }, - { - PCMK__CIB_REQUEST_NOOP, FALSE, FALSE, - cib_prepare_none, cib_cleanup_none, cib_process_default - }, - { - PCMK__CIB_REQUEST_ABS_DELETE, TRUE, TRUE, - cib_prepare_data, cib_cleanup_data, cib_process_delete_absolute - }, - { - PCMK__CIB_REQUEST_UPGRADE, TRUE, TRUE, - cib_prepare_none, cib_cleanup_output, cib_process_upgrade_server - }, - { - PCMK__CIB_REQUEST_SECONDARY, FALSE, TRUE, - cib_prepare_none, cib_cleanup_none, cib_process_readwrite - }, - { - PCMK__CIB_REQUEST_ALL_SECONDARY, FALSE, TRUE, - cib_prepare_none, cib_cleanup_none, cib_process_readwrite - }, - { - PCMK__CIB_REQUEST_SYNC_TO_ONE, FALSE, TRUE, - cib_prepare_sync, cib_cleanup_none, cib_process_sync_one - }, - { - PCMK__CIB_REQUEST_PRIMARY, TRUE, TRUE, - cib_prepare_data, cib_cleanup_data, cib_process_readwrite - }, - { - PCMK__CIB_REQUEST_IS_PRIMARY, FALSE, TRUE, - cib_prepare_none, cib_cleanup_none, cib_process_readwrite - }, - { - PCMK__CIB_REQUEST_SHUTDOWN, FALSE, TRUE, - cib_prepare_sync, cib_cleanup_none, cib_process_shutdown_req - }, - { - CRM_OP_PING, FALSE, FALSE, - cib_prepare_none, cib_cleanup_output, cib_process_ping - }, -}; - -int -cib_get_operation_id(const char *op, int *operation) -{ - static GHashTable *operation_hash = NULL; - - if (operation_hash == NULL) { - int lpc = 0; - int max_msg_types = PCMK__NELEM(cib_server_ops); - - operation_hash = pcmk__strkey_table(NULL, free); - for (lpc = 1; lpc < max_msg_types; lpc++) { - int *value = malloc(sizeof(int)); - - if(value) { - *value = lpc; - g_hash_table_insert(operation_hash, (gpointer) cib_server_ops[lpc].operation, value); - } - } - } - - if (op != NULL) { - int *value = g_hash_table_lookup(operation_hash, op); - - if (value) { - *operation = *value; - return pcmk_ok; - } - } - crm_err("Operation %s is not valid", op); - *operation = -1; - return -EINVAL; -} - -xmlNode * -cib_msg_copy(xmlNode * msg, gboolean with_data) -{ - int lpc = 0; - const char *field = NULL; - const char *value = NULL; - xmlNode *value_struct = NULL; - - static const char *field_list[] = { - F_XML_TAGNAME, - F_TYPE, - F_CIB_CLIENTID, - F_CIB_CALLOPTS, - F_CIB_CALLID, - F_CIB_OPERATION, - F_CIB_ISREPLY, - F_CIB_SECTION, - F_CIB_HOST, - F_CIB_RC, - F_CIB_DELEGATED, - F_CIB_OBJID, - F_CIB_OBJTYPE, - F_CIB_EXISTING, - F_CIB_SEENCOUNT, - F_CIB_TIMEOUT, - F_CIB_GLOBAL_UPDATE, - F_CIB_CLIENTNAME, - F_CIB_USER, - F_CIB_NOTIFY_TYPE, - F_CIB_NOTIFY_ACTIVATE - }; - - static const char *data_list[] = { - F_CIB_CALLDATA, - F_CIB_UPDATE, - F_CIB_UPDATE_RESULT - }; - - xmlNode *copy = create_xml_node(NULL, "copy"); - - CRM_ASSERT(copy != NULL); - - for (lpc = 0; lpc < PCMK__NELEM(field_list); lpc++) { - field = field_list[lpc]; - value = crm_element_value(msg, field); - if (value != NULL) { - crm_xml_add(copy, field, value); - } - } - for (lpc = 0; with_data && lpc < PCMK__NELEM(data_list); lpc++) { - field = data_list[lpc]; - value_struct = get_message_xml(msg, field); - if (value_struct != NULL) { - add_message_xml(copy, field, value_struct); - } - } - - return copy; -} - -cib_op_t * -cib_op_func(int call_type) -{ - return &(cib_server_ops[call_type].fn); -} - -gboolean -cib_op_modifies(int call_type) -{ - return cib_server_ops[call_type].modifies_cib; -} - -int -cib_op_can_run(int call_type, int call_options, bool privileged) -{ - if (!privileged && cib_server_ops[call_type].needs_privileges) { - return -EACCES; - } - return pcmk_ok; -} - -int -cib_op_prepare(int call_type, xmlNode * request, xmlNode ** input, const char **section) -{ - crm_trace("Prepare %d", call_type); - return cib_server_ops[call_type].prepare(request, input, section); -} - -int -cib_op_cleanup(int call_type, int options, xmlNode ** input, xmlNode ** output) -{ - crm_trace("Cleanup %d", call_type); - return cib_server_ops[call_type].cleanup(options, input, output); -} diff --git a/daemons/based/based_io.c b/daemons/based/based_io.c index fc34f39..f252ac1 100644 --- a/daemons/based/based_io.c +++ b/daemons/based/based_io.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -22,6 +22,9 @@ #include #include +#include +#include + #include #include @@ -45,12 +48,15 @@ cib_rename(const char *old) umask(S_IWGRP | S_IWOTH | S_IROTH); new_fd = mkstemp(new); - crm_err("Archiving unusable file %s as %s", old, new); + if ((new_fd < 0) || (rename(old, new) < 0)) { - crm_perror(LOG_ERR, "Couldn't rename %s as %s", old, new); - crm_err("Disabling disk writes and continuing"); + crm_err("Couldn't archive unusable file %s (disabling disk writes and continuing)", + old); cib_writes_enabled = FALSE; + } else { + crm_err("Archived unusable file %s as %s", old, new); } + if (new_fd > 0) { close(new_fd); } @@ -107,7 +113,7 @@ static int cib_archive_filter(const struct dirent * a) if(stat(a_path, &s) != 0) { rc = errno; - crm_trace("%s - stat failed: %s (%d)", a->d_name, pcmk_strerror(rc), rc); + crm_trace("%s - stat failed: %s (%d)", a->d_name, pcmk_rc_str(rc), rc); rc = 0; } else if ((s.st_mode & S_IFREG) != S_IFREG) { @@ -189,7 +195,7 @@ readCibXmlFile(const char *dir, const char *file, gboolean discard_status) const char *name = NULL; const char *value = NULL; const char *validation = NULL; - const char *use_valgrind = getenv("PCMK_valgrind_enabled"); + const char *use_valgrind = pcmk__env_option(PCMK__ENV_VALGRIND_ENABLED); xmlNode *root = NULL; xmlNode *status = NULL; @@ -214,7 +220,7 @@ readCibXmlFile(const char *dir, const char *file, gboolean discard_status) crm_warn("Primary configuration corrupt or unusable, trying backups in %s", cib_root); lpc = scandir(cib_root, &namelist, cib_archive_filter, cib_archive_sort); if (lpc < 0) { - crm_perror(LOG_NOTICE, "scandir(%s) failed", cib_root); + crm_err("scandir(%s) failed: %s", cib_root, pcmk_rc_str(errno)); } } @@ -418,7 +424,7 @@ write_cib_contents(gpointer p) pid = fork(); if (pid < 0) { - crm_perror(LOG_ERR, "Disabling disk writes after fork failure"); + crm_err("Disabling disk writes after fork failure: %s", pcmk_rc_str(errno)); cib_writes_enabled = FALSE; return FALSE; } diff --git a/daemons/based/based_messages.c b/daemons/based/based_messages.c index d46456c..35d639a 100644 --- a/daemons/based/based_messages.c +++ b/daemons/based/based_messages.c @@ -19,6 +19,9 @@ #include #include +#include +#include + #include #include #include @@ -61,25 +64,15 @@ cib_process_shutdown_req(const char *op, int options, const char *section, xmlNo return pcmk_ok; } +// @COMPAT: Remove when PCMK__CIB_REQUEST_NOOP is removed int -cib_process_default(const char *op, int options, const char *section, xmlNode * req, - xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, - xmlNode ** answer) +cib_process_noop(const char *op, int options, const char *section, xmlNode *req, + xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, + xmlNode **answer) { - int result = pcmk_ok; - crm_trace("Processing \"%s\" event", op); *answer = NULL; - - if (op == NULL) { - result = -EINVAL; - crm_err("No operation specified"); - - } else if (strcmp(PCMK__CIB_REQUEST_NOOP, op) != 0) { - result = -EPROTONOSUPPORT; - crm_err("Action [%s] is not supported by the CIB manager", op); - } - return result; + return pcmk_ok; } int @@ -158,10 +151,10 @@ cib_process_ping(const char *op, int options, const char *section, xmlNode * req // Append additional detail so the receiver can log the differences add_message_xml(*answer, F_CIB_CALLDATA, the_cib); }, - { + if (the_cib != NULL) { // Always include at least the version details - const char *tag = TYPE(the_cib); - xmlNode *shallow = create_xml_node(NULL, tag); + xmlNode *shallow = create_xml_node(NULL, + (const char *) the_cib->name); copy_in_properties(shallow, the_cib); add_message_xml(*answer, F_CIB_CALLDATA, shallow); @@ -250,7 +243,7 @@ cib_process_upgrade_server(const char *op, int options, const char *section, xml if (rc != pcmk_ok) { // Notify originating peer so it can notify its local clients - crm_node_t *origin = pcmk__search_cluster_node_cache(0, host); + crm_node_t *origin = pcmk__search_cluster_node_cache(0, host, NULL); crm_info("Rejecting upgrade request from %s: %s " CRM_XS " rc=%d peer=%s", host, pcmk_strerror(rc), rc, @@ -341,8 +334,7 @@ cib_server_process_diff(const char *op, int options, const char *section, xmlNod crm_warn("Requesting full CIB refresh because update failed: %s" CRM_XS " rc=%d", pcmk_strerror(rc), rc); - pcmk__output_set_log_level(logger_out, LOG_INFO); - logger_out->message(logger_out, "xml-patchset", input); + pcmk__log_xml_patchset(LOG_INFO, input); free_xml(*result_cib); *result_cib = NULL; send_sync_request(NULL); @@ -356,15 +348,16 @@ cib_process_replace_svr(const char *op, int options, const char *section, xmlNod xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { - const char *tag = crm_element_name(input); int rc = cib_process_replace(op, options, section, req, input, existing_cib, result_cib, answer); - if (rc == pcmk_ok && pcmk__str_eq(tag, XML_TAG_CIB, pcmk__str_casei)) { + + if ((rc == pcmk_ok) && pcmk__xe_is(input, XML_TAG_CIB)) { sync_in_progress = 0; } return rc; } +// @COMPAT: Remove when PCMK__CIB_REQUEST_ABS_DELETE is removed int cib_process_delete_absolute(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, @@ -373,6 +366,49 @@ cib_process_delete_absolute(const char *op, int options, const char *section, xm return -EINVAL; } +static xmlNode * +cib_msg_copy(xmlNode *msg) +{ + static const char *field_list[] = { + F_XML_TAGNAME, + F_TYPE, + F_CIB_CLIENTID, + F_CIB_CALLOPTS, + F_CIB_CALLID, + F_CIB_OPERATION, + F_CIB_ISREPLY, + F_CIB_SECTION, + F_CIB_HOST, + F_CIB_RC, + F_CIB_DELEGATED, + F_CIB_OBJID, + F_CIB_OBJTYPE, + F_CIB_EXISTING, + F_CIB_SEENCOUNT, + F_CIB_TIMEOUT, + F_CIB_GLOBAL_UPDATE, + F_CIB_CLIENTNAME, + F_CIB_USER, + F_CIB_NOTIFY_TYPE, + F_CIB_NOTIFY_ACTIVATE + }; + + xmlNode *copy = create_xml_node(NULL, "copy"); + + CRM_ASSERT(copy != NULL); + + for (int lpc = 0; lpc < PCMK__NELEM(field_list); lpc++) { + const char *field = field_list[lpc]; + const char *value = crm_element_value(msg, field); + + if (value != NULL) { + crm_xml_add(copy, field, value); + } + } + + return copy; +} + int sync_our_cib(xmlNode * request, gboolean all) { @@ -384,22 +420,12 @@ sync_our_cib(xmlNode * request, gboolean all) xmlNode *replace_request = NULL; CRM_CHECK(the_cib != NULL, return -EINVAL); - - replace_request = cib_msg_copy(request, FALSE); - CRM_CHECK(replace_request != NULL, return -EINVAL); + CRM_CHECK(all || (host != NULL), return -EINVAL); crm_debug("Syncing CIB to %s", all ? "all peers" : host); - if (all == FALSE && host == NULL) { - crm_log_xml_err(request, "bad sync"); - } - /* remove the "all == FALSE" condition - * - * sync_from was failing, the local client wasn't being notified - * because it didn't know it was a reply - * setting this does not prevent the other nodes from applying it - * if all == TRUE - */ + replace_request = cib_msg_copy(request); + if (host != NULL) { crm_xml_add(replace_request, F_CIB_ISREPLY, host); } @@ -425,3 +451,30 @@ sync_our_cib(xmlNode * request, gboolean all) free(digest); return result; } + +int +cib_process_commit_transaction(const char *op, int options, const char *section, + xmlNode *req, xmlNode *input, + xmlNode *existing_cib, xmlNode **result_cib, + xmlNode **answer) +{ + /* On success, our caller will activate *result_cib locally, trigger a + * replace notification if appropriate, and sync *result_cib to all nodes. + * On failure, our caller will free *result_cib. + */ + int rc = pcmk_rc_ok; + const char *client_id = crm_element_value(req, F_CIB_CLIENTID); + const char *origin = crm_element_value(req, F_ORIG); + pcmk__client_t *client = pcmk__find_client_by_id(client_id); + + rc = based_commit_transaction(input, client, origin, result_cib); + + if (rc != pcmk_rc_ok) { + char *source = based_transaction_source_str(client, origin); + + crm_err("Could not commit transaction for %s: %s", + source, pcmk_rc_str(rc)); + free(source); + } + return pcmk_rc2legacy(rc); +} diff --git a/daemons/based/based_notify.c b/daemons/based/based_notify.c index 5881f6d..00a4c54 100644 --- a/daemons/based/based_notify.c +++ b/daemons/based/based_notify.c @@ -21,6 +21,9 @@ #include +#include +#include + #include #include #include @@ -30,7 +33,7 @@ #include struct cib_notification_s { - xmlNode *msg; + const xmlNode *msg; struct iovec *iov; int32_t iov_size; }; @@ -58,10 +61,6 @@ cib_notify_send_one(gpointer key, gpointer value, gpointer user_data) do_send = TRUE; - } else if (pcmk_is_set(client->flags, cib_notify_replace) - && pcmk__str_eq(type, T_CIB_REPLACE_NOTIFY, pcmk__str_casei)) { - do_send = TRUE; - } else if (pcmk_is_set(client->flags, cib_notify_confirm) && pcmk__str_eq(type, T_CIB_UPDATE_CONFIRM, pcmk__str_casei)) { do_send = TRUE; @@ -104,7 +103,7 @@ cib_notify_send_one(gpointer key, gpointer value, gpointer user_data) } static void -cib_notify_send(xmlNode * xml) +cib_notify_send(const xmlNode *xml) { struct iovec *iov; struct cib_notification_s update; @@ -198,15 +197,16 @@ cib_diff_notify(const char *op, int result, const char *call_id, crm_xml_add(update_msg, F_SUBTYPE, T_CIB_DIFF_NOTIFY); crm_xml_add(update_msg, F_CIB_OPERATION, op); crm_xml_add(update_msg, F_CIB_CLIENTID, client_id); + crm_xml_add(update_msg, F_CIB_CLIENTNAME, client_name); crm_xml_add(update_msg, F_CIB_CALLID, call_id); crm_xml_add(update_msg, F_ORIG, origin); crm_xml_add_int(update_msg, F_CIB_RC, result); if (update != NULL) { - type = crm_element_name(update); + type = (const char *) update->name; crm_trace("Setting type to update->name: %s", type); } else { - type = crm_element_name(diff); + type = (const char *) diff->name; crm_trace("Setting type to new_obj->name: %s", type); } crm_xml_add(update_msg, F_CIB_OBJID, ID(diff)); @@ -218,88 +218,7 @@ cib_diff_notify(const char *op, int result, const char *call_id, } add_message_xml(update_msg, F_CIB_UPDATE_RESULT, diff); + crm_log_xml_trace(update_msg, "diff-notify"); cib_notify_send(update_msg); free_xml(update_msg); } - -void -cib_replace_notify(const char *op, int result, const char *call_id, - const char *client_id, const char *client_name, - const char *origin, xmlNode *update, xmlNode *diff, - uint32_t change_section) -{ - xmlNode *replace_msg = NULL; - - int add_updates = 0; - int add_epoch = 0; - int add_admin_epoch = 0; - - int del_updates = 0; - int del_epoch = 0; - int del_admin_epoch = 0; - - uint8_t log_level = LOG_INFO; - - if (diff == NULL) { - return; - } - - if (result != pcmk_ok) { - log_level = LOG_WARNING; - } - - cib_diff_version_details(diff, &add_admin_epoch, &add_epoch, &add_updates, - &del_admin_epoch, &del_epoch, &del_updates); - - if (del_updates < 0) { - crm_log_xml_debug(diff, "Bad replace diff"); - } - - if ((add_admin_epoch != del_admin_epoch) - || (add_epoch != del_epoch) - || (add_updates != del_updates)) { - - do_crm_log(log_level, - "Replaced CIB generation %d.%d.%d with %d.%d.%d from client " - "%s%s%s (%s) (%s)", - del_admin_epoch, del_epoch, del_updates, - add_admin_epoch, add_epoch, add_updates, - client_name, - ((call_id != NULL)? " call " : ""), pcmk__s(call_id, ""), - pcmk__s(origin, "unspecified peer"), pcmk_strerror(result)); - - } else if ((add_admin_epoch != 0) - || (add_epoch != 0) - || (add_updates != 0)) { - - do_crm_log(log_level, - "Local-only replace of CIB generation %d.%d.%d from client " - "%s%s%s (%s) (%s)", - add_admin_epoch, add_epoch, add_updates, - client_name, - ((call_id != NULL)? " call " : ""), pcmk__s(call_id, ""), - pcmk__s(origin, "unspecified peer"), pcmk_strerror(result)); - } - - replace_msg = create_xml_node(NULL, "notify-replace"); - - crm_xml_add(replace_msg, F_TYPE, T_CIB_NOTIFY); - crm_xml_add(replace_msg, F_SUBTYPE, T_CIB_REPLACE_NOTIFY); - crm_xml_add(replace_msg, F_CIB_OPERATION, op); - crm_xml_add(replace_msg, F_CIB_CLIENTID, client_id); - crm_xml_add(replace_msg, F_CIB_CALLID, call_id); - crm_xml_add(replace_msg, F_ORIG, origin); - crm_xml_add_int(replace_msg, F_CIB_RC, result); - crm_xml_add_ll(replace_msg, F_CIB_CHANGE_SECTION, - (long long) change_section); - attach_cib_generation(replace_msg, "cib-replace-generation", update); - - /* We can include update and diff if a replace callback needs them. Until - * then, avoid the overhead. - */ - - crm_log_xml_trace(replace_msg, "CIB replaced"); - - cib_notify_send(replace_msg); - free_xml(replace_msg); -} diff --git a/daemons/based/based_operation.c b/daemons/based/based_operation.c new file mode 100644 index 0000000..736d425 --- /dev/null +++ b/daemons/based/based_operation.c @@ -0,0 +1,59 @@ +/* + * Copyright 2008-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU General Public License version 2 + * or later (GPLv2+) WITHOUT ANY WARRANTY. + */ + +#include + +#include + +#include +#include +#include + +static const cib__op_fn_t cib_op_functions[] = { + [cib__op_abs_delete] = cib_process_delete_absolute, + [cib__op_apply_patch] = cib_server_process_diff, + [cib__op_bump] = cib_process_bump, + [cib__op_commit_transact] = cib_process_commit_transaction, + [cib__op_create] = cib_process_create, + [cib__op_delete] = cib_process_delete, + [cib__op_erase] = cib_process_erase, + [cib__op_is_primary] = cib_process_readwrite, + [cib__op_modify] = cib_process_modify, + [cib__op_noop] = cib_process_noop, + [cib__op_ping] = cib_process_ping, + [cib__op_primary] = cib_process_readwrite, + [cib__op_query] = cib_process_query, + [cib__op_replace] = cib_process_replace_svr, + [cib__op_secondary] = cib_process_readwrite, + [cib__op_shutdown] = cib_process_shutdown_req, + [cib__op_sync_all] = cib_process_sync, + [cib__op_sync_one] = cib_process_sync_one, + [cib__op_upgrade] = cib_process_upgrade_server, +}; + +/*! + * \internal + * \brief Get the function that performs a given server-side CIB operation + * + * \param[in] operation Operation whose function to look up + * + * \return Function that performs \p operation within \c pacemaker-based + */ +cib__op_fn_t +based_get_op_function(const cib__operation_t *operation) +{ + enum cib__op_type type = operation->type; + + CRM_ASSERT(type >= 0); + + if (type >= PCMK__NELEM(cib_op_functions)) { + return NULL; + } + return cib_op_functions[type]; +} diff --git a/daemons/based/based_remote.c b/daemons/based/based_remote.c index 38136d2..4aa41fa 100644 --- a/daemons/based/based_remote.c +++ b/daemons/based/based_remote.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2021 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -23,7 +23,9 @@ #include #include + #include +#include #include #include @@ -126,13 +128,13 @@ init_remote_listener(int port, gboolean encrypted) /* create server socket */ ssock = malloc(sizeof(int)); if(ssock == NULL) { - crm_perror(LOG_ERR, "Listener socket allocation failed"); + crm_err("Listener socket allocation failed: %s", pcmk_rc_str(errno)); return -1; } *ssock = socket(AF_INET, SOCK_STREAM, 0); if (*ssock == -1) { - crm_perror(LOG_ERR, "Listener socket creation failed"); + crm_err("Listener socket creation failed: %s", pcmk_rc_str(errno)); free(ssock); return -1; } @@ -141,8 +143,8 @@ init_remote_listener(int port, gboolean encrypted) optval = 1; rc = setsockopt(*ssock, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { - crm_perror(LOG_WARNING, - "Local address reuse not allowed on listener socket"); + crm_err("Local address reuse not allowed on listener socket: %s", + pcmk_rc_str(errno)); } /* bind server socket */ @@ -151,13 +153,13 @@ init_remote_listener(int port, gboolean encrypted) saddr.sin_addr.s_addr = INADDR_ANY; saddr.sin_port = htons(port); if (bind(*ssock, (struct sockaddr *)&saddr, sizeof(saddr)) == -1) { - crm_perror(LOG_ERR, "Cannot bind to listener socket"); + crm_err("Cannot bind to listener socket: %s", pcmk_rc_str(errno)); close(*ssock); free(ssock); return -2; } if (listen(*ssock, 10) == -1) { - crm_perror(LOG_ERR, "Cannot listen on socket"); + crm_err("Cannot listen on socket: %s", pcmk_rc_str(errno)); close(*ssock); free(ssock); return -3; @@ -222,9 +224,9 @@ cib_remote_auth(xmlNode * login) return FALSE; } - tmp = crm_element_name(login); - if (!pcmk__str_eq(tmp, "cib_command", pcmk__str_casei)) { - crm_err("Wrong tag: %s", tmp); + if (!pcmk__xe_is(login, T_CIB_COMMAND)) { + crm_err("Unrecognizable message from remote client"); + crm_log_xml_info(login, "bad"); return FALSE; } @@ -296,7 +298,7 @@ cib_remote_listen(gpointer data) memset(&addr, 0, sizeof(addr)); csock = accept(ssock, (struct sockaddr *)&addr, &laddr); if (csock == -1) { - crm_perror(LOG_ERR, "Could not accept socket connection"); + crm_err("Could not accept socket connection: %s", pcmk_rc_str(errno)); return TRUE; } @@ -411,9 +413,8 @@ cib_handle_remote_msg(pcmk__client_t *client, xmlNode *command) { const char *value = NULL; - value = crm_element_name(command); - if (!pcmk__str_eq(value, "cib_command", pcmk__str_casei)) { - crm_log_xml_trace(command, "Bad command: "); + if (!pcmk__xe_is(command, T_CIB_COMMAND)) { + crm_log_xml_trace(command, "bad"); return; } diff --git a/daemons/based/based_transaction.c b/daemons/based/based_transaction.c new file mode 100644 index 0000000..89aea2e --- /dev/null +++ b/daemons/based/based_transaction.c @@ -0,0 +1,167 @@ +/* + * Copyright 2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU General Public License version 2 + * or later (GPLv2+) WITHOUT ANY WARRANTY. + */ + +#include + +#include +#include + +#include "pacemaker-based.h" + +/*! + * \internal + * \brief Create a string describing the source of a commit-transaction request + * + * \param[in] client CIB client + * \param[in] origin Host where the commit request originated + * + * \return String describing the request source + * + * \note The caller is responsible for freeing the return value using \c free(). + */ +char * +based_transaction_source_str(const pcmk__client_t *client, const char *origin) +{ + char *source = NULL; + + if (client != NULL) { + source = crm_strdup_printf("client %s (%s)%s%s", + pcmk__client_name(client), + pcmk__s(client->id, "unidentified"), + ((origin != NULL)? " on " : ""), + pcmk__s(origin, "")); + + } else { + source = strdup((origin != NULL)? origin : "unknown source"); + } + + CRM_ASSERT(source != NULL); + return source; +} + +/*! + * \internal + * \brief Process requests in a transaction + * + * Stop when a request fails or when all requests have been processed. + * + * \param[in,out] transaction Transaction to process + * \param[in] client CIB client + * \param[in] source String describing the commit request source + * + * \return Standard Pacemaker return code + */ +static int +process_transaction_requests(xmlNodePtr transaction, + const pcmk__client_t *client, const char *source) +{ + for (xmlNodePtr request = first_named_child(transaction, T_CIB_COMMAND); + request != NULL; request = crm_next_same_xml(request)) { + + const char *op = crm_element_value(request, F_CIB_OPERATION); + const char *host = crm_element_value(request, F_CIB_HOST); + const cib__operation_t *operation = NULL; + int rc = cib__get_operation(op, &operation); + + if (rc == pcmk_rc_ok) { + if (!pcmk_is_set(operation->flags, cib__op_attr_transaction) + || (host != NULL)) { + + rc = EOPNOTSUPP; + } else { + /* Commit-transaction is a privileged operation. If we reached + * this point, the request came from a privileged connection. + */ + rc = cib_process_request(request, TRUE, client); + rc = pcmk_legacy2rc(rc); + } + } + + if (rc != pcmk_rc_ok) { + crm_err("Aborting CIB transaction for %s due to failed %s request: " + "%s", + source, op, pcmk_rc_str(rc)); + crm_log_xml_info(request, "Failed request"); + return rc; + } + + crm_trace("Applied %s request to transaction working CIB for %s", + op, source); + crm_log_xml_trace(request, "Successful request"); + } + + return pcmk_rc_ok; +} + +/*! + * \internal + * \brief Commit a given CIB client's transaction to a working CIB copy + * + * \param[in] transaction Transaction to commit + * \param[in] client CIB client + * \param[in] origin Host where the commit request originated + * \param[in,out] result_cib Where to store result CIB + * + * \return Standard Pacemaker return code + * + * \note This function is expected to be called only by + * \p cib_process_commit_transaction(). + * \note \p result_cib is expected to be a copy of the current CIB as created by + * \p cib_perform_op(). + * \note The caller is responsible for activating and syncing \p result_cib on + * success, and for freeing it on failure. + */ +int +based_commit_transaction(xmlNodePtr transaction, const pcmk__client_t *client, + const char *origin, xmlNodePtr *result_cib) +{ + xmlNodePtr saved_cib = the_cib; + int rc = pcmk_rc_ok; + char *source = NULL; + + CRM_ASSERT(result_cib != NULL); + + CRM_CHECK(pcmk__xe_is(transaction, T_CIB_TRANSACTION), + return pcmk_rc_no_transaction); + + /* *result_cib should be a copy of the_cib (created by cib_perform_op()). If + * not, make a copy now. Change tracking isn't strictly required here + * because: + * * Each request in the transaction will have changes tracked and ACLs + * checked if appropriate. + * * cib_perform_op() will infer changes for the commit request at the end. + */ + CRM_CHECK((*result_cib != NULL) && (*result_cib != the_cib), + *result_cib = copy_xml(the_cib)); + + source = based_transaction_source_str(client, origin); + crm_trace("Committing transaction for %s to working CIB", source); + + // Apply all changes to a working copy of the CIB + the_cib = *result_cib; + + rc = process_transaction_requests(transaction, client, origin); + + crm_trace("Transaction commit %s for %s", + ((rc == pcmk_rc_ok)? "succeeded" : "failed"), source); + + /* Some request types (for example, erase) may have freed the_cib (the + * working copy) and pointed it at a new XML object. In that case, it + * follows that *result_cib (the working copy) was freed. + * + * Point *result_cib at the updated working copy stored in the_cib. + */ + *result_cib = the_cib; + + // Point the_cib back to the unchanged original copy + the_cib = saved_cib; + + free(source); + return rc; +} diff --git a/daemons/based/based_transaction.h b/daemons/based/based_transaction.h new file mode 100644 index 0000000..9935c73 --- /dev/null +++ b/daemons/based/based_transaction.h @@ -0,0 +1,24 @@ +/* + * Copyright 2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef BASED_TRANSACTION__H +#define BASED_TRANSACTION__H + +#include + +#include + +char *based_transaction_source_str(const pcmk__client_t *client, + const char *origin); + +int based_commit_transaction(xmlNodePtr transaction, + const pcmk__client_t *client, + const char *origin, xmlNodePtr *result_cib); + +#endif // BASED_TRANSACTION__H diff --git a/daemons/based/pacemaker-based.c b/daemons/based/pacemaker-based.c index 129997e..5dd7938 100644 --- a/daemons/based/pacemaker-based.c +++ b/daemons/based/pacemaker-based.c @@ -16,7 +16,8 @@ #include #include -#include +#include +#include #include #include @@ -42,6 +43,7 @@ gchar *cib_root = NULL; static gboolean preserve_status = FALSE; gboolean cib_writes_enabled = TRUE; +gboolean stand_alone = FALSE; int remote_fd = 0; int remote_tls_fd = 0; @@ -49,8 +51,6 @@ int remote_tls_fd = 0; GHashTable *config_hash = NULL; GHashTable *local_notify_queue = NULL; -pcmk__output_t *logger_out = NULL; - static void cib_init(void); void cib_shutdown(int nsig); static bool startCib(const char *filename); @@ -197,15 +197,6 @@ main(int argc, char **argv) goto done; } - rc = pcmk__log_output_new(&logger_out); - if (rc != pcmk_rc_ok) { - exit_code = CRM_EX_ERROR; - g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - "Error creating output format log: %s", pcmk_rc_str(rc)); - goto done; - } - pcmk__output_set_log_level(logger_out, LOG_TRACE); - mainloop_add_signal(SIGTERM, cib_shutdown); mainloop_add_signal(SIGPIPE, cib_enable_writes); @@ -230,7 +221,7 @@ main(int argc, char **argv) goto done; } - if (crm_ipc_connect(old_instance)) { + if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) { /* IPC end-point already up */ crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); diff --git a/daemons/based/pacemaker-based.h b/daemons/based/pacemaker-based.h index 05e49b3..33c7642 100644 --- a/daemons/based/pacemaker-based.h +++ b/daemons/based/pacemaker-based.h @@ -18,6 +18,9 @@ #include #include +#include +#include + #include #include #include @@ -26,16 +29,19 @@ #include #include +#include "based_transaction.h" + #ifdef HAVE_GNUTLS_GNUTLS_H # include #endif +#define OUR_NODENAME (stand_alone? "localhost" : crm_cluster->uname) + // CIB-specific client flags enum cib_client_flags { // Notifications cib_notify_pre = (UINT64_C(1) << 0), cib_notify_post = (UINT64_C(1) << 1), - cib_notify_replace = (UINT64_C(1) << 2), cib_notify_confirm = (UINT64_C(1) << 3), cib_notify_diff = (UINT64_C(1) << 4), @@ -43,16 +49,6 @@ enum cib_client_flags { cib_is_daemon = (UINT64_C(1) << 12), }; -typedef struct cib_operation_s { - const char *operation; - gboolean modifies_cib; - gboolean needs_privileges; - int (*prepare) (xmlNode *, xmlNode **, const char **); - int (*cleanup) (int, xmlNode **, xmlNode **); - int (*fn) (const char *, int, const char *, xmlNode *, - xmlNode *, xmlNode *, xmlNode **, xmlNode **); -} cib_operation_t; - extern bool based_is_primary; extern GHashTable *config_hash; extern xmlNode *the_cib; @@ -67,7 +63,6 @@ extern gboolean stand_alone; extern gboolean cib_shutdown_flag; extern gchar *cib_root; extern int cib_status; -extern pcmk__output_t *logger_out; extern struct qb_ipcs_service_handlers ipc_ro_callbacks; extern struct qb_ipcs_service_handlers ipc_rw_callbacks; @@ -79,6 +74,8 @@ void cib_peer_callback(xmlNode *msg, void *private_data); void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode *op_request, pcmk__client_t *cib_client, gboolean privileged); +int cib_process_request(xmlNode *request, gboolean privileged, + const pcmk__client_t *cib_client); void cib_shutdown(int nsig); void terminate_cib(const char *caller, int fast); gboolean cib_legacy_mode(void); @@ -92,9 +89,9 @@ int cib_process_shutdown_req(const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); -int cib_process_default(const char *op, int options, const char *section, - xmlNode *req, xmlNode *input, xmlNode *existing_cib, - xmlNode **result_cib, xmlNode **answer); +int cib_process_noop(const char *op, int options, const char *section, + xmlNode *req, xmlNode *input, xmlNode *existing_cib, + xmlNode **result_cib, xmlNode **answer); int cib_process_ping(const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); @@ -121,25 +118,17 @@ int cib_process_upgrade_server(const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); +int cib_process_commit_transaction(const char *op, int options, + const char *section, xmlNode *req, + xmlNode *input, xmlNode *existing_cib, + xmlNode **result_cib, xmlNode **answer); void send_sync_request(const char *host); int sync_our_cib(xmlNode *request, gboolean all); -xmlNode *cib_msg_copy(xmlNode *msg, gboolean with_data); -int cib_get_operation_id(const char *op, int *operation); -cib_op_t *cib_op_func(int call_type); -gboolean cib_op_modifies(int call_type); -int cib_op_prepare(int call_type, xmlNode *request, xmlNode **input, - const char **section); -int cib_op_cleanup(int call_type, int options, xmlNode **input, - xmlNode **output); -int cib_op_can_run(int call_type, int call_options, bool privileged); +cib__op_fn_t based_get_op_function(const cib__operation_t *operation); void cib_diff_notify(const char *op, int result, const char *call_id, const char *client_id, const char *client_name, const char *origin, xmlNode *update, xmlNode *diff); -void cib_replace_notify(const char *op, int result, const char *call_id, - const char *client_id, const char *client_name, - const char *origin, xmlNode *update, xmlNode *diff, - uint32_t change_section); static inline const char * cib_config_lookup(const char *opt) diff --git a/daemons/controld/Makefile.am b/daemons/controld/Makefile.am index 08be1ff..1312090 100644 --- a/daemons/controld/Makefile.am +++ b/daemons/controld/Makefile.am @@ -14,34 +14,20 @@ halibdir = $(CRM_DAEMON_DIR) halib_PROGRAMS = pacemaker-controld -noinst_HEADERS = controld_alerts.h \ - controld_callbacks.h \ - controld_cib.h \ - controld_fencing.h \ - controld_fsa.h \ - controld_globals.h \ - controld_lrm.h \ - controld_membership.h \ - controld_messages.h \ - controld_metadata.h \ - controld_throttle.h \ - controld_timers.h \ - controld_transition.h \ - controld_utils.h \ - pacemaker-controld.h +noinst_HEADERS = $(wildcard *.h) pacemaker_controld_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_controld_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemaker_controld_LDADD = $(top_builddir)/lib/fencing/libstonithd.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/pengine/libpe_rules.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/cluster/libcrmcluster.la \ - $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/services/libcrmservice.la \ - $(top_builddir)/lib/lrmd/liblrmd.la \ - $(CLUSTERLIBS) +pacemaker_controld_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +pacemaker_controld_LDADD += $(top_builddir)/lib/cib/libcib.la +pacemaker_controld_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la +pacemaker_controld_LDADD += $(top_builddir)/lib/fencing/libstonithd.la +pacemaker_controld_LDADD += $(top_builddir)/lib/cluster/libcrmcluster.la +pacemaker_controld_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la +pacemaker_controld_LDADD += $(top_builddir)/lib/services/libcrmservice.la +pacemaker_controld_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +pacemaker_controld_LDADD += $(CLUSTERLIBS) pacemaker_controld_SOURCES = pacemaker-controld.c \ controld_alerts.c \ @@ -79,9 +65,11 @@ endif CLEANFILES = $(man7_MANS) if BUILD_LEGACY_LINKS +.PHONY: install-exec-hook install-exec-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f crmd && $(LN_S) pacemaker-controld crmd +.PHONY: uninstall-hook uninstall-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f crmd endif diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c index d578adc..7078739 100644 --- a/daemons/controld/controld_callbacks.c +++ b/daemons/controld/controld_callbacks.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -107,6 +107,8 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d bool appeared = FALSE; bool is_remote = pcmk_is_set(node->flags, crm_remote_node); + controld_node_pending_timer(node); + /* The controller waits to receive some information from the membership * layer before declaring itself operational. If this is being called for a * cluster node, indicate that we have it. @@ -274,13 +276,14 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d if (down) { const char *task = crm_element_value(down->xml, XML_LRM_ATTR_TASK); - if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) { + if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { /* tengine_stonith_callback() confirms fence actions */ crm_trace("Updating CIB %s fencer reported fencing of %s complete", (pcmk_is_set(down->flags, pcmk__graph_action_confirmed)? "after" : "before"), node->uname); - } else if (!appeared && pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) { + } else if (!appeared && pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, + pcmk__str_casei)) { // Shutdown actions are immediately confirmed (i.e. no_wait) if (!is_remote) { @@ -342,6 +345,17 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d } } + if (!appeared && (type == crm_status_processes) + && (node->when_member > 1)) { + /* The node left CPG but is still a cluster member. Set its + * membership time to 1 to record it in the cluster state as a + * boolean, so we don't fence it due to node-pending-timeout. + */ + node->when_member = 1; + flags |= node_update_cluster; + controld_node_pending_timer(node); + } + /* Update the CIB node state */ update = create_node_state_update(node, flags, NULL, __func__); if (update == NULL) { diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c index 94b99dd..865e41f 100644 --- a/daemons/controld/controld_cib.c +++ b/daemons/controld/controld_cib.c @@ -22,90 +22,6 @@ // Call ID of the most recent in-progress CIB resource update (or 0 if none) static int pending_rsc_update = 0; -// Call IDs of requested CIB replacements that won't trigger a new election -// (used as a set of gint values) -static GHashTable *cib_replacements = NULL; - -/*! - * \internal - * \brief Store the call ID of a CIB replacement that the controller requested - * - * The \p do_cib_replaced() callback function will avoid triggering a new - * election when we're notified of one of these expected replacements. - * - * \param[in] call_id CIB call ID (or 0 for a synchronous call) - * - * \note This function should be called after making any asynchronous CIB - * request (or before making any synchronous CIB request) that may replace - * part of the nodes or status section. This may include CIB sync calls. - */ -void -controld_record_cib_replace_call(int call_id) -{ - CRM_CHECK(call_id >= 0, return); - - if (cib_replacements == NULL) { - cib_replacements = g_hash_table_new(NULL, NULL); - } - - /* If the call ID is already present in the table, then it's old. We may not - * be removing them properly, and we could improperly ignore replacement - * notifications if cib_t:call_id wraps around. - */ - CRM_LOG_ASSERT(g_hash_table_add(cib_replacements, - GINT_TO_POINTER((gint) call_id))); -} - -/*! - * \internal - * \brief Remove the call ID of a CIB replacement from the replacements table - * - * \param[in] call_id CIB call ID (or 0 for a synchronous call) - * - * \return \p true if \p call_id was found in the table, or \p false otherwise - * - * \note CIB notifications run before CIB callbacks. If this function is called - * from within a callback, \p do_cib_replaced() will have removed - * \p call_id from the table first if relevant changes triggered a - * notification. - */ -bool -controld_forget_cib_replace_call(int call_id) -{ - CRM_CHECK(call_id >= 0, return false); - - if (cib_replacements == NULL) { - return false; - } - return g_hash_table_remove(cib_replacements, - GINT_TO_POINTER((gint) call_id)); -} - -/*! - * \internal - * \brief Empty the hash table containing call IDs of CIB replacement requests - */ -void -controld_forget_all_cib_replace_calls(void) -{ - if (cib_replacements != NULL) { - g_hash_table_remove_all(cib_replacements); - } -} - -/*! - * \internal - * \brief Free the hash table containing call IDs of CIB replacement requests - */ -void -controld_destroy_cib_replacements_table(void) -{ - if (cib_replacements != NULL) { - g_hash_table_destroy(cib_replacements); - cib_replacements = NULL; - } -} - /*! * \internal * \brief Respond to a dropped CIB connection @@ -127,54 +43,54 @@ handle_cib_disconnect(gpointer user_data) controld_clear_fsa_input_flags(R_CIB_CONNECTED); } else { // Expected - crm_info("Connection to the CIB manager terminated"); + crm_info("Disconnected from the CIB manager"); } } static void do_cib_updated(const char *event, xmlNode * msg) { - if (pcmk__alert_in_patchset(msg, TRUE)) { - controld_trigger_config(); + const xmlNode *patchset = NULL; + const char *client_name = NULL; + + crm_debug("Received CIB diff notification: DC=%s", pcmk__btoa(AM_I_DC)); + + if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) { + return; } -} -static void -do_cib_replaced(const char *event, xmlNode * msg) -{ - int call_id = 0; - const char *client_id = crm_element_value(msg, F_CIB_CLIENTID); - uint32_t change_section = cib_change_section_nodes - |cib_change_section_status; - long long value = 0; + if (cib__element_in_patchset(patchset, XML_CIB_TAG_ALERTS) + || cib__element_in_patchset(patchset, XML_CIB_TAG_CRMCONFIG)) { + + controld_trigger_config(); + } - crm_debug("Updating the CIB after a replace: DC=%s", pcmk__btoa(AM_I_DC)); if (!AM_I_DC) { + // We're not in control of the join sequence return; } - if ((crm_element_value_int(msg, F_CIB_CALLID, &call_id) == 0) - && pcmk__str_eq(client_id, controld_globals.cib_client_id, - pcmk__str_none) - && controld_forget_cib_replace_call(call_id)) { - // We requested this replace op. No need to restart the join. + client_name = crm_element_value(msg, F_CIB_CLIENTNAME); + if (!cib__client_triggers_refresh(client_name)) { + // The CIB is still accurate return; } - if ((crm_element_value_ll(msg, F_CIB_CHANGE_SECTION, &value) < 0) - || (value < 0) || (value > UINT32_MAX)) { + if (cib__element_in_patchset(patchset, XML_CIB_TAG_NODES) + || cib__element_in_patchset(patchset, XML_CIB_TAG_STATUS)) { - crm_trace("Couldn't parse '%s' from message", F_CIB_CHANGE_SECTION); - } else { - change_section = (uint32_t) value; - } - - if (pcmk_any_flags_set(change_section, cib_change_section_nodes - |cib_change_section_status)) { + /* An unsafe client modified the nodes or status section. Ensure the + * node list is up-to-date, and start the join process again so we get + * everyone's current resource history. + */ + if (client_name == NULL) { + client_name = crm_element_value(msg, F_CIB_CLIENTID); + } + crm_notice("Populating nodes and starting an election after %s event " + "triggered by %s", + event, pcmk__s(client_name, "(unidentified client)")); - /* start the join process again so we get everyone's LRM status */ populate_cib_nodes(node_update_quick|node_update_all, __func__); - register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); } } @@ -186,12 +102,10 @@ controld_disconnect_cib_manager(void) CRM_ASSERT(cib_conn != NULL); - crm_info("Disconnecting from the CIB manager"); + crm_debug("Disconnecting from the CIB manager"); controld_clear_fsa_input_flags(R_CIB_CONNECTED); - cib_conn->cmds->del_notify_callback(cib_conn, T_CIB_REPLACE_NOTIFY, - do_cib_replaced); cib_conn->cmds->del_notify_callback(cib_conn, T_CIB_DIFF_NOTIFY, do_cib_updated); cib_free_callbacks(cib_conn); @@ -201,8 +115,6 @@ controld_disconnect_cib_manager(void) cib_scope_local|cib_discard_reply); cib_conn->cmds->signoff(cib_conn); } - - crm_notice("Disconnected from the CIB manager"); } /* A_CIB_STOP, A_CIB_START, O_CIB_RESTART */ @@ -217,7 +129,6 @@ do_cib_control(long long action, cib_t *cib_conn = controld_globals.cib_conn; void (*dnotify_fn) (gpointer user_data) = handle_cib_disconnect; - void (*replace_cb) (const char *event, xmlNodePtr msg) = do_cib_replaced; void (*update_cb) (const char *event, xmlNodePtr msg) = do_cib_updated; int rc = pcmk_ok; @@ -263,11 +174,6 @@ do_cib_control(long long action, dnotify_fn) != pcmk_ok) { crm_err("Could not set dnotify callback"); - } else if (cib_conn->cmds->add_notify_callback(cib_conn, - T_CIB_REPLACE_NOTIFY, - replace_cb) != pcmk_ok) { - crm_err("Could not set CIB notification callback (replace)"); - } else if (cib_conn->cmds->add_notify_callback(cib_conn, T_CIB_DIFF_NOTIFY, update_cb) != pcmk_ok) { @@ -276,8 +182,6 @@ do_cib_control(long long action, } else { controld_set_fsa_input_flags(R_CIB_CONNECTED); cib_retries = 0; - cib_conn->cmds->client_id(cib_conn, &controld_globals.cib_client_id, - NULL); } if (!pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) { @@ -310,11 +214,12 @@ do_cib_control(long long action, unsigned int cib_op_timeout(void) { + // @COMPAT: Drop env_timeout at 3.0.0 static int env_timeout = -1; unsigned int calculated_timeout = 0; if (env_timeout == -1) { - const char *env = getenv("PCMK_cib_timeout"); + const char *env = pcmk__env_option(PCMK__ENV_CIB_TIMEOUT); pcmk__scan_min_int(env, &env_timeout, MIN_CIB_OP_TIMEOUT); crm_trace("Minimum CIB op timeout: %ds (environment: %s)", @@ -401,67 +306,87 @@ cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, /*! * \internal - * \brief Delete subsection of a node's CIB node_state + * \brief Get the XPath and description of a node state section to be deleted * - * \param[in] uname Desired node - * \param[in] section Subsection of node_state to delete - * \param[in] options CIB call options to use + * \param[in] uname Desired node + * \param[in] section Subsection of node_state to be deleted + * \param[out] xpath Where to store XPath of \p section + * \param[out] desc If not \c NULL, where to store description of \p section */ void -controld_delete_node_state(const char *uname, enum controld_section_e section, - int options) +controld_node_state_deletion_strings(const char *uname, + enum controld_section_e section, + char **xpath, char **desc) { - cib_t *cib_conn = controld_globals.cib_conn; - - char *xpath = NULL; - char *desc = NULL; + const char *desc_pre = NULL; // Shutdown locks that started before this time are expired long long expire = (long long) time(NULL) - controld_globals.shutdown_lock_limit; - CRM_CHECK(uname != NULL, return); switch (section) { case controld_section_lrm: - xpath = crm_strdup_printf(XPATH_NODE_LRM, uname); - desc = crm_strdup_printf("resource history for node %s", uname); + *xpath = crm_strdup_printf(XPATH_NODE_LRM, uname); + desc_pre = "resource history"; break; case controld_section_lrm_unlocked: - xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED, - uname, uname, expire); - desc = crm_strdup_printf("resource history (other than shutdown " - "locks) for node %s", uname); + *xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED, + uname, uname, expire); + desc_pre = "resource history (other than shutdown locks)"; break; case controld_section_attrs: - xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname); - desc = crm_strdup_printf("transient attributes for node %s", uname); + *xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname); + desc_pre = "transient attributes"; break; case controld_section_all: - xpath = crm_strdup_printf(XPATH_NODE_ALL, uname); - desc = crm_strdup_printf("all state for node %s", uname); + *xpath = crm_strdup_printf(XPATH_NODE_ALL, uname); + desc_pre = "all state"; break; case controld_section_all_unlocked: - xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED, - uname, uname, expire, uname); - desc = crm_strdup_printf("all state (other than shutdown locks) " - "for node %s", uname); + *xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED, + uname, uname, expire, uname); + desc_pre = "all state (other than shutdown locks)"; + break; + default: + // We called this function incorrectly + CRM_ASSERT(false); break; } - if (cib_conn == NULL) { - crm_warn("Unable to delete %s: no CIB connection", desc); - free(desc); - } else { - int call_id; - - cib__set_call_options(options, "node state deletion", - cib_xpath|cib_multiple); - call_id = cib_conn->cmds->remove(cib_conn, xpath, NULL, options); - crm_info("Deleting %s (via CIB call %d) " CRM_XS " xpath=%s", - desc, call_id, xpath); - fsa_register_cib_callback(call_id, desc, cib_delete_callback); - // CIB library handles freeing desc + if (desc != NULL) { + *desc = crm_strdup_printf("%s for node %s", desc_pre, uname); } +} + +/*! + * \internal + * \brief Delete subsection of a node's CIB node_state + * + * \param[in] uname Desired node + * \param[in] section Subsection of node_state to delete + * \param[in] options CIB call options to use + */ +void +controld_delete_node_state(const char *uname, enum controld_section_e section, + int options) +{ + cib_t *cib = controld_globals.cib_conn; + char *xpath = NULL; + char *desc = NULL; + int cib_rc = pcmk_ok; + + CRM_ASSERT((uname != NULL) && (cib != NULL)); + + controld_node_state_deletion_strings(uname, section, &xpath, &desc); + + cib__set_call_options(options, "node state deletion", + cib_xpath|cib_multiple); + cib_rc = cib->cmds->remove(cib, xpath, NULL, options); + fsa_register_cib_callback(cib_rc, desc, cib_delete_callback); + crm_info("Deleting %s (via CIB call %d) " CRM_XS " xpath=%s", + desc, cib_rc, xpath); + + // CIB library handles freeing desc free(xpath); } @@ -491,11 +416,12 @@ controld_delete_resource_history(const char *rsc_id, const char *node, char *desc = NULL; char *xpath = NULL; int rc = pcmk_rc_ok; + cib_t *cib = controld_globals.cib_conn; CRM_CHECK((rsc_id != NULL) && (node != NULL), return EINVAL); desc = crm_strdup_printf("resource history for %s on %s", rsc_id, node); - if (controld_globals.cib_conn == NULL) { + if (cib == NULL) { crm_err("Unable to clear %s: no CIB connection", desc); free(desc); return ENOTCONN; @@ -503,9 +429,10 @@ controld_delete_resource_history(const char *rsc_id, const char *node, // Ask CIB to delete the entry xpath = crm_strdup_printf(XPATH_RESOURCE_HISTORY, node, rsc_id); - rc = cib_internal_op(controld_globals.cib_conn, PCMK__CIB_REQUEST_DELETE, - NULL, xpath, NULL, NULL, call_options|cib_xpath, - user_name); + + cib->cmds->set_user(cib, user_name); + rc = cib->cmds->remove(cib, xpath, NULL, call_options|cib_xpath); + cib->cmds->set_user(cib, NULL); if (rc < 0) { rc = pcmk_legacy2rc(rc); @@ -841,10 +768,17 @@ cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *use case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: - crm_trace("Resource update %d complete: rc=%d", call_id, rc); + crm_trace("Resource history update completed (call=%d rc=%d)", + call_id, rc); break; default: - crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); + if (call_id > 0) { + crm_warn("Resource history update %d failed: %s " + CRM_XS " rc=%d", call_id, pcmk_strerror(rc), rc); + } else { + crm_warn("Resource history update failed: %s " CRM_XS " rc=%d", + pcmk_strerror(rc), rc); + } } if (call_id == pending_rsc_update) { @@ -863,10 +797,11 @@ should_preserve_lock(lrmd_event_data_t *op) if (!pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) { return false; } - if (!strcmp(op->op_type, RSC_STOP) && (op->rc == PCMK_OCF_OK)) { + if (!strcmp(op->op_type, PCMK_ACTION_STOP) && (op->rc == PCMK_OCF_OK)) { return true; } - if (!strcmp(op->op_type, RSC_STATUS) && (op->rc == PCMK_OCF_NOT_RUNNING)) { + if (!strcmp(op->op_type, PCMK_ACTION_MONITOR) + && (op->rc == PCMK_OCF_NOT_RUNNING)) { return true; } return false; @@ -876,10 +811,10 @@ should_preserve_lock(lrmd_event_data_t *op) * \internal * \brief Request a CIB update * - * \param[in] section Section of CIB to update - * \param[in,out] data New XML of CIB section to update - * \param[in] options CIB call options - * \param[in] callback If not NULL, set this as the operation callback + * \param[in] section Section of CIB to update + * \param[in] data New XML of CIB section to update + * \param[in] options CIB call options + * \param[in] callback If not \c NULL, set this as the operation callback * * \return Standard Pacemaker return code * @@ -890,14 +825,13 @@ int controld_update_cib(const char *section, xmlNode *data, int options, void (*callback)(xmlNode *, int, int, xmlNode *, void *)) { + cib_t *cib = controld_globals.cib_conn; int cib_rc = -ENOTCONN; CRM_ASSERT(data != NULL); - if (controld_globals.cib_conn != NULL) { - cib_rc = cib_internal_op(controld_globals.cib_conn, - PCMK__CIB_REQUEST_MODIFY, NULL, section, - data, NULL, options, NULL); + if (cib != NULL) { + cib_rc = cib->cmds->modify(cib, section, data, options); if (cib_rc >= 0) { crm_debug("Submitted CIB update %d for %s section", cib_rc, section); @@ -1047,7 +981,6 @@ controld_delete_action_history(const lrmd_event_data_t *op) controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_none); - crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } @@ -1087,7 +1020,6 @@ controld_cib_delete_last_failure(const char *rsc_id, const char *node, { char *xpath = NULL; char *last_failure_key = NULL; - CRM_CHECK((rsc_id != NULL) && (node != NULL), return); // Generate XPath to match desired entry diff --git a/daemons/controld/controld_cib.h b/daemons/controld/controld_cib.h index bd9492a..dcc5a48 100644 --- a/daemons/controld/controld_cib.h +++ b/daemons/controld/controld_cib.h @@ -43,11 +43,6 @@ fsa_cib_anon_update_discard_reply(const char *section, xmlNode *data) { } } -void controld_record_cib_replace_call(int call_id); -bool controld_forget_cib_replace_call(int call_id); -void controld_forget_all_cib_replace_calls(void); -void controld_destroy_cib_replacements_table(void); - int controld_update_cib(const char *section, xmlNode *data, int options, void (*callback)(xmlNode *, int, int, xmlNode *, void *)); @@ -62,6 +57,9 @@ enum controld_section_e { controld_section_all_unlocked }; +void controld_node_state_deletion_strings(const char *uname, + enum controld_section_e section, + char **xpath, char **desc); void controld_delete_node_state(const char *uname, enum controld_section_e section, int options); int controld_delete_resource_history(const char *rsc_id, const char *node, @@ -118,8 +116,8 @@ int crmd_cib_smart_opt(void); static inline bool controld_action_is_recordable(const char *action) { - return !pcmk__str_any_of(action, CRMD_ACTION_CANCEL, CRMD_ACTION_DELETE, - CRMD_ACTION_NOTIFY, CRMD_ACTION_METADATA, NULL); + return !pcmk__str_any_of(action, PCMK_ACTION_CANCEL, PCMK_ACTION_DELETE, + PCMK_ACTION_NOTIFY, PCMK_ACTION_META_DATA, NULL); } #endif // PCMK__CONTROLD_CIB__H diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c index ffc62a0..644d686 100644 --- a/daemons/controld/controld_control.c +++ b/daemons/controld/controld_control.c @@ -221,6 +221,7 @@ crmd_exit(crm_exit_t exit_code) g_list_free(controld_globals.fsa_message_queue); controld_globals.fsa_message_queue = NULL; + controld_free_node_pending_timers(); controld_election_fini(); /* Tear down the CIB manager connection, but don't free it yet -- it could @@ -265,7 +266,6 @@ crmd_exit(crm_exit_t exit_code) controld_globals.te_uuid = NULL; free_max_generation(); - controld_destroy_cib_replacements_table(); controld_destroy_failed_sync_table(); controld_destroy_outside_events_table(); @@ -323,20 +323,12 @@ do_exit(long long action, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_exit_t exit_code = CRM_EX_OK; - int log_level = LOG_INFO; - const char *exit_type = "gracefully"; - if (action & A_EXIT_1) { - log_level = LOG_ERR; - exit_type = "forcefully"; + if (pcmk_is_set(action, A_EXIT_1)) { exit_code = CRM_EX_ERROR; + crm_err("Exiting now due to errors"); } - verify_stopped(cur_state, LOG_ERR); - do_crm_log(log_level, "Performing %s - %s exiting the controller", - fsa_action2string(action), exit_type); - - crm_info("[%s] stopped (%d)", crm_system_name, exit_code); crmd_exit(exit_code); } @@ -504,7 +496,8 @@ do_started(long long action, } else { crm_notice("Pacemaker controller successfully started and accepting connections"); } - controld_trigger_fencer_connect(); + controld_set_fsa_input_flags(R_ST_REQUIRED); + controld_timer_fencer_connect(GINT_TO_POINTER(TRUE)); controld_clear_fsa_input_flags(R_STARTING); register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL); @@ -684,6 +677,17 @@ static pcmk__cluster_option_t controller_options[] = { "passed since the shutdown was initiated, even if the node has not " "rejoined.") }, + { + XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL, + "0", pcmk__valid_interval_spec, + N_("How long to wait for a node that has joined the cluster to join " + "the controller process group"), + N_("Fence nodes that do not join the controller process group within " + "this much time after joining the cluster, to allow the cluster " + "to continue managing resources. A value of 0 means never fence " + "pending nodes. Setting the value to 2h means fence nodes after " + "2 hours.") + }, }; void @@ -722,9 +726,8 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void } crmconfig = output; - if ((crmconfig) && - (crm_element_name(crmconfig)) && - (strcmp(crm_element_name(crmconfig), XML_CIB_TAG_CRMCONFIG) != 0)) { + if ((crmconfig != NULL) + && !pcmk__xe_is(crmconfig, XML_CIB_TAG_CRMCONFIG)) { crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG); } if (!crmconfig) { @@ -761,6 +764,10 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void controld_globals.shutdown_lock_limit = crm_parse_interval_spec(value) / 1000; + value = g_hash_table_lookup(config_hash, + XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT); + controld_globals.node_pending_timeout = crm_parse_interval_spec(value) / 1000; + value = g_hash_table_lookup(config_hash, "cluster-name"); pcmk__str_update(&(controld_globals.cluster_name), value); diff --git a/daemons/controld/controld_corosync.c b/daemons/controld/controld_corosync.c index 4378b30..b69e821 100644 --- a/daemons/controld/controld_corosync.c +++ b/daemons/controld/controld_corosync.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -81,9 +81,6 @@ crmd_cs_destroy(gpointer user_data) if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) { crm_crit("Lost connection to cluster layer, shutting down"); crmd_exit(CRM_EX_DISCONNECT); - - } else { - crm_info("Corosync connection closed"); } } @@ -122,7 +119,8 @@ cpg_membership_callback(cpg_handle_t handle, const struct cpg_name *cpg_name, if (controld_globals.dc_name != NULL) { crm_node_t *peer = NULL; - peer = pcmk__search_cluster_node_cache(0, controld_globals.dc_name); + peer = pcmk__search_cluster_node_cache(0, controld_globals.dc_name, + NULL); if (peer != NULL) { for (int i = 0; i < left_list_entries; ++i) { if (left_list[i].nodeid == peer->id) { diff --git a/daemons/controld/controld_election.c b/daemons/controld/controld_election.c index 5f33d5b..70ffecc 100644 --- a/daemons/controld/controld_election.c +++ b/daemons/controld/controld_election.c @@ -263,13 +263,6 @@ do_dc_release(long long action, } else if (action & A_DC_RELEASED) { crm_info("DC role released"); -#if 0 - if (are there errors) { - /* we can't stay up if not healthy */ - /* or perhaps I_ERROR and go to S_RECOVER? */ - result = I_SHUTDOWN; - } -#endif if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { xmlNode *update = NULL; crm_node_t *node = crm_get_peer(0, controld_globals.our_nodename); diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c index 0de399c..480d37d 100644 --- a/daemons/controld/controld_execd.c +++ b/daemons/controld/controld_execd.c @@ -52,14 +52,10 @@ static void lrm_connection_destroy(void) { if (pcmk_is_set(controld_globals.fsa_input_register, R_LRM_CONNECTED)) { - crm_crit("Connection to executor failed"); + crm_crit("Lost connection to local executor"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); controld_clear_fsa_input_flags(R_LRM_CONNECTED); - - } else { - crm_info("Disconnected from executor"); } - } static char * @@ -171,7 +167,7 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_ return; } - if (pcmk__str_eq(op->op_type, RSC_NOTIFY, pcmk__str_casei)) { + if (pcmk__str_eq(op->op_type, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { return; } @@ -222,10 +218,10 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_ } entry->last = lrmd_copy_event(op); - if (op->params && pcmk__strcase_any_of(op->op_type, CRMD_ACTION_START, - CRMD_ACTION_RELOAD, - CRMD_ACTION_RELOAD_AGENT, - CRMD_ACTION_STATUS, NULL)) { + if (op->params && pcmk__strcase_any_of(op->op_type, PCMK_ACTION_START, + PCMK_ACTION_RELOAD, + PCMK_ACTION_RELOAD_AGENT, + PCMK_ACTION_MONITOR, NULL)) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } @@ -243,7 +239,9 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_ op->rsc_id, op->op_type, op->interval_ms); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); - } else if (entry->recurring_op_list && !pcmk__str_eq(op->op_type, RSC_STATUS, pcmk__str_casei)) { + } else if ((entry->recurring_op_list != NULL) + && !pcmk__str_eq(op->op_type, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { crm_trace("Dropping %d recurring ops because of: " PCMK__OP_FMT, g_list_length(entry->recurring_op_list), op->rsc_id, op->op_type, op->interval_ms); @@ -376,10 +374,8 @@ do_lrm_control(long long action, } controld_clear_fsa_input_flags(R_LRM_CONNECTED); - crm_info("Disconnecting from the executor"); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state, FALSE); - crm_notice("Disconnected from the executor"); } if (action & A_LRM_CONNECT) { @@ -510,11 +506,14 @@ is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval_ms, entry->last->rc); - if (entry->last->rc == PCMK_OCF_OK && pcmk__str_eq(entry->last->op_type, CRMD_ACTION_STOP, pcmk__str_casei)) { + if ((entry->last->rc == PCMK_OCF_OK) + && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_STOP, + pcmk__str_casei)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK - && pcmk__str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE, pcmk__str_casei)) { + && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_MIGRATE_TO, + pcmk__str_casei)) { // A stricter check is too complex ... leave that to the scheduler return FALSE; @@ -668,7 +667,7 @@ notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_ crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, ((rc == pcmk_ok)? "" : " not")); - op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); + op = construct_op(lrm_state, input->xml, rsc_id, PCMK_ACTION_DELETE); controld_rc2event(op, pcmk_legacy2rc(rc)); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); @@ -1117,7 +1116,8 @@ synthesize_lrmd_failure(lrm_state_t *lrm_state, const xmlNode *action, op = construct_op(lrm_state, action, ID(xml_rsc), operation); - if (pcmk__str_eq(operation, RSC_NOTIFY, pcmk__str_casei)) { // Notifications can't fail + if (pcmk__str_eq(operation, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { + // Notifications can't fail fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_OK, NULL); } else { fake_op_status(lrm_state, op, op_status, rc, exit_reason); @@ -1329,7 +1329,7 @@ do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, if (cib_rc != pcmk_rc_ok) { lrmd_event_data_t *op = NULL; - op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE); + op = construct_op(lrm_state, input->xml, rsc->id, PCMK_ACTION_DELETE); /* These are resource clean-ups, not actions, so no exit reason is * needed. @@ -1394,7 +1394,9 @@ metadata_complete(int pid, const pcmk__action_result_t *result, void *user_data) md = controld_cache_metadata(lrm_state->metadata_cache, data->rsc, result->action_stdout); } - do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md); + if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) { + do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md); + } free_metadata_cb_data(data); } @@ -1438,11 +1440,11 @@ do_lrm_invoke(long long action, from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } - if (pcmk__str_eq(crm_op, CRM_OP_LRM_DELETE, pcmk__str_none)) { + if (pcmk__str_eq(crm_op, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { crm_rsc_delete = TRUE; // from crm_resource } - operation = CRMD_ACTION_DELETE; + operation = PCMK_ACTION_DELETE; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); @@ -1486,7 +1488,7 @@ do_lrm_invoke(long long action, } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); - gboolean create_rsc = !pcmk__str_eq(operation, CRMD_ACTION_DELETE, + gboolean create_rsc = !pcmk__str_eq(operation, PCMK_ACTION_DELETE, pcmk__str_none); int rc; @@ -1534,12 +1536,13 @@ do_lrm_invoke(long long action, return; } - if (pcmk__str_eq(operation, CRMD_ACTION_CANCEL, pcmk__str_none)) { + if (pcmk__str_eq(operation, PCMK_ACTION_CANCEL, pcmk__str_none)) { if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) { crm_log_xml_warn(input->xml, "Bad command"); } - } else if (pcmk__str_eq(operation, CRMD_ACTION_DELETE, pcmk__str_none)) { + } else if (pcmk__str_eq(operation, PCMK_ACTION_DELETE, + pcmk__str_none)) { do_lrm_delete(input, lrm_state, rsc, from_sys, from_host, crm_rsc_delete, user_name); @@ -1554,7 +1557,7 @@ do_lrm_invoke(long long action, * changed (using something like inotify, or a hash or modification * time of the agent executable). */ - if (strcmp(operation, CRMD_ACTION_START) != 0) { + if (strcmp(operation, PCMK_ACTION_START) != 0) { md = controld_get_rsc_metadata(lrm_state, rsc, controld_metadata_from_cache); } @@ -1619,7 +1622,8 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL); if (rsc_op == NULL) { - CRM_LOG_ASSERT(pcmk__str_eq(CRMD_ACTION_STOP, operation, pcmk__str_casei)); + CRM_LOG_ASSERT(pcmk__str_eq(operation, PCMK_ACTION_STOP, + pcmk__str_casei)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting @@ -1654,7 +1658,7 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, class = crm_element_value(primitive, XML_AGENT_ATTR_CLASS); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params) - && pcmk__str_eq(operation, CRMD_ACTION_STATUS, pcmk__str_casei) + && pcmk__str_eq(operation, PCMK_ACTION_MONITOR, pcmk__str_casei) && (op->interval_ms > 0)) { op_timeout = g_hash_table_lookup(params, "pcmk_monitor_timeout"); @@ -1663,7 +1667,7 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, } } - if (!pcmk__str_eq(operation, RSC_STOP, pcmk__str_casei)) { + if (!pcmk__str_eq(operation, PCMK_ACTION_STOP, pcmk__str_casei)) { op->params = params; } else { @@ -1703,7 +1707,8 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, op->user_data = strdup(transition); if (op->interval_ms != 0) { - if (pcmk__strcase_any_of(operation, CRMD_ACTION_START, CRMD_ACTION_STOP, NULL)) { + if (pcmk__strcase_any_of(operation, PCMK_ACTION_START, PCMK_ACTION_STOP, + NULL)) { crm_err("Start and Stop actions cannot have an interval: %u", op->interval_ms); op->interval_ms = 0; @@ -1849,7 +1854,7 @@ static bool should_cancel_recurring(const char *rsc_id, const char *action, guint interval_ms) { if (is_remote_lrmd_ra(NULL, NULL, rsc_id) && (interval_ms == 0) - && (strcmp(action, CRMD_ACTION_MIGRATE) == 0)) { + && (strcmp(action, PCMK_ACTION_MIGRATE_TO) == 0)) { /* Don't stop monitoring a migrating Pacemaker Remote connection * resource until the entire migration has completed. We must detect if * the connection is unexpectedly severed, even during a migration. @@ -1859,8 +1864,8 @@ should_cancel_recurring(const char *rsc_id, const char *action, guint interval_m // Cancel recurring actions before changing resource state return (interval_ms == 0) - && !pcmk__str_any_of(action, CRMD_ACTION_STATUS, CRMD_ACTION_NOTIFY, - NULL); + && !pcmk__str_any_of(action, PCMK_ACTION_MONITOR, + PCMK_ACTION_NOTIFY, NULL); } /*! @@ -1876,7 +1881,7 @@ static const char * should_nack_action(const char *action) { if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN) - && pcmk__str_eq(action, RSC_START, pcmk__str_none)) { + && pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) { register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); return "Not attempting start due to shutdown in progress"; @@ -1888,7 +1893,7 @@ should_nack_action(const char *action) case S_TRANSITION_ENGINE: break; default: - if (!pcmk__str_eq(action, CRMD_ACTION_STOP, pcmk__str_none)) { + if (!pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) { return "Controller cannot attempt actions at this time"; } break; @@ -1930,8 +1935,8 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, return; } - if (pcmk__str_any_of(operation, CRMD_ACTION_RELOAD, - CRMD_ACTION_RELOAD_AGENT, NULL)) { + if (pcmk__str_any_of(operation, PCMK_ACTION_RELOAD, + PCMK_ACTION_RELOAD_AGENT, NULL)) { /* Pre-2.1.0 DCs will schedule reload actions only, and 2.1.0+ DCs * will schedule reload-agent actions only. In either case, we need * to map that to whatever the resource agent actually supports. @@ -1939,9 +1944,9 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, */ if ((md != NULL) && pcmk_is_set(md->ra_flags, ra_supports_legacy_reload)) { - operation = CRMD_ACTION_RELOAD; + operation = PCMK_ACTION_RELOAD; } else { - operation = CRMD_ACTION_RELOAD_AGENT; + operation = PCMK_ACTION_RELOAD_AGENT; } } @@ -1968,8 +1973,9 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, /* now do the op */ crm_notice("Requesting local execution of %s operation for %s on %s " CRM_XS " transition_key=%s op_key=" PCMK__OP_FMT, - crm_action_str(op->op_type, op->interval_ms), rsc->id, lrm_state->node_name, - pcmk__s(transition, ""), rsc->id, operation, op->interval_ms); + pcmk__readable_action(op->op_type, op->interval_ms), rsc->id, + lrm_state->node_name, pcmk__s(transition, ""), rsc->id, + operation, op->interval_ms); nack_reason = should_nack_action(operation); if (nack_reason != NULL) { @@ -2131,7 +2137,8 @@ log_executor_event(const lrmd_event_data_t *op, const char *op_key, GString *str = g_string_sized_new(100); // reasonable starting size pcmk__g_strcat(str, - "Result of ", crm_action_str(op->op_type, op->interval_ms), + "Result of ", + pcmk__readable_action(op->op_type, op->interval_ms), " operation for ", op->rsc_id, NULL); if (node_name != NULL) { @@ -2401,7 +2408,8 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, log_executor_event(op, op_key, node_name, removed); if (lrm_state) { - if (!pcmk__str_eq(op->op_type, RSC_METADATA, pcmk__str_casei)) { + if (!pcmk__str_eq(op->op_type, PCMK_ACTION_META_DATA, + pcmk__str_casei)) { crmd_alert_resource_op(lrm_state->node_name, op); } else if (rsc && (op->rc == PCMK_OCF_OK)) { char *metadata = unescape_newlines(op->output); diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c index 8c68bfc..b90cc5e 100644 --- a/daemons/controld/controld_execd_state.c +++ b/daemons/controld/controld_execd_state.c @@ -132,12 +132,6 @@ lrm_state_create(const char *node_name) return state; } -void -lrm_state_destroy(const char *node_name) -{ - g_hash_table_remove(lrm_state_table, node_name); -} - static gboolean remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data) { @@ -307,7 +301,7 @@ lrm_state_destroy_all(void) lrm_state_t * lrm_state_find(const char *node_name) { - if (!node_name) { + if ((node_name == NULL) || (lrm_state_table == NULL)) { return NULL; } return g_hash_table_lookup(lrm_state_table, node_name); @@ -318,6 +312,8 @@ lrm_state_find_or_create(const char *node_name) { lrm_state_t *lrm_state; + CRM_CHECK(lrm_state_table != NULL, return NULL); + lrm_state = g_hash_table_lookup(lrm_state_table, node_name); if (!lrm_state) { lrm_state = lrm_state_create(node_name); @@ -329,6 +325,9 @@ lrm_state_find_or_create(const char *node_name) GList * lrm_state_get_list(void) { + if (lrm_state_table == NULL) { + return NULL; + } return g_hash_table_get_values(lrm_state_table); } @@ -799,7 +798,7 @@ lrm_state_unregister_rsc(lrm_state_t * lrm_state, } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { - lrm_state_destroy(rsc_id); + g_hash_table_remove(lrm_state_table, rsc_id); return pcmk_ok; } diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c index 89cb61f..9557d9e 100644 --- a/daemons/controld/controld_fencing.c +++ b/daemons/controld/controld_fencing.c @@ -218,8 +218,11 @@ send_stonith_update(pcmk__graph_action_t *action, const char *target, CRM_CHECK(target != NULL, return); CRM_CHECK(uuid != NULL, return); - /* Make sure the membership and join caches are accurate */ - peer = crm_get_peer_full(0, target, CRM_GET_PEER_ANY); + /* Make sure the membership and join caches are accurate. + * Try getting any existing node cache entry also by node uuid in case it + * doesn't have an uname yet. + */ + peer = pcmk__get_peer_full(0, target, uuid, CRM_GET_PEER_ANY); CRM_CHECK(peer != NULL, return); @@ -391,7 +394,7 @@ execute_stonith_cleanup(void) */ static stonith_t *stonith_api = NULL; -static crm_trigger_t *stonith_reconnect = NULL; +static mainloop_timer_t *controld_fencer_connect_timer = NULL; static char *te_client_id = NULL; static gboolean @@ -422,7 +425,7 @@ fail_incompletable_stonith(pcmk__graph_t *graph) } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); - if (task && pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) { + if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); last_action = action->xml; pcmk__update_graph(graph, action); @@ -447,11 +450,12 @@ tengine_stonith_connection_destroy(stonith_t *st, stonith_event_t *e) te_cleanup_stonith_history_sync(st, FALSE); if (pcmk_is_set(controld_globals.fsa_input_register, R_ST_REQUIRED)) { - crm_crit("Fencing daemon connection failed"); - mainloop_set_trigger(stonith_reconnect); - + crm_err("Lost fencer connection (will attempt to reconnect)"); + if (!mainloop_timer_running(controld_fencer_connect_timer)) { + mainloop_timer_start(controld_fencer_connect_timer); + } } else { - crm_info("Fencing daemon disconnected"); + crm_info("Disconnected from fencer"); } if (stonith_api) { @@ -515,7 +519,7 @@ handle_fence_notification(stonith_t *st, stonith_event_t *event) crmd_alert_fencing_op(event); - if (pcmk__str_eq("on", event->action, pcmk__str_none)) { + if (pcmk__str_eq(PCMK_ACTION_ON, event->action, pcmk__str_none)) { // Unfencing doesn't need special handling, just a log message if (succeeded) { crm_notice("%s was unfenced by %s at the request of %s@%s", @@ -647,14 +651,14 @@ handle_fence_notification(stonith_t *st, stonith_event_t *event) /*! * \brief Connect to fencer * - * \param[in] user_data If NULL, retry failures now, otherwise retry in main loop + * \param[in] user_data If NULL, retry failures now, otherwise retry in mainloop timer * - * \return TRUE + * \return G_SOURCE_REMOVE on success, G_SOURCE_CONTINUE to retry * \note If user_data is NULL, this will wait 2s between attempts, for up to * 30 attempts, meaning the controller could be blocked as long as 58s. */ -static gboolean -te_connect_stonith(gpointer user_data) +gboolean +controld_timer_fencer_connect(gpointer user_data) { int rc = pcmk_ok; @@ -662,13 +666,13 @@ te_connect_stonith(gpointer user_data) stonith_api = stonith_api_new(); if (stonith_api == NULL) { crm_err("Could not connect to fencer: API memory allocation failed"); - return TRUE; + return G_SOURCE_REMOVE; } } if (stonith_api->state != stonith_disconnected) { crm_trace("Already connected to fencer, no need to retry"); - return TRUE; + return G_SOURCE_REMOVE; } if (user_data == NULL) { @@ -681,17 +685,30 @@ te_connect_stonith(gpointer user_data) } else { // Non-blocking (retry failures later in main loop) rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL); + + if (controld_fencer_connect_timer == NULL) { + controld_fencer_connect_timer = + mainloop_timer_add("controld_fencer_connect", 1000, + TRUE, controld_timer_fencer_connect, + GINT_TO_POINTER(TRUE)); + } + if (rc != pcmk_ok) { if (pcmk_is_set(controld_globals.fsa_input_register, R_ST_REQUIRED)) { crm_notice("Fencer connection failed (will retry): %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); - mainloop_set_trigger(stonith_reconnect); + + if (!mainloop_timer_running(controld_fencer_connect_timer)) { + mainloop_timer_start(controld_fencer_connect_timer); + } + + return G_SOURCE_CONTINUE; } else { crm_info("Fencer connection failed (ignoring because no longer required): %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } - return TRUE; + return G_SOURCE_REMOVE; } } @@ -709,23 +726,7 @@ te_connect_stonith(gpointer user_data) crm_notice("Fencer successfully connected"); } - return TRUE; -} - -/*! - \internal - \brief Schedule fencer connection attempt in main loop -*/ -void -controld_trigger_fencer_connect(void) -{ - if (stonith_reconnect == NULL) { - stonith_reconnect = mainloop_add_trigger(G_PRIORITY_LOW, - te_connect_stonith, - GINT_TO_POINTER(TRUE)); - } - controld_set_fsa_input_flags(R_ST_REQUIRED); - mainloop_set_trigger(stonith_reconnect); + return G_SOURCE_REMOVE; } void @@ -745,9 +746,9 @@ controld_disconnect_fencer(bool destroy) stonith_api->cmds->free(stonith_api); stonith_api = NULL; } - if (stonith_reconnect) { - mainloop_destroy_trigger(stonith_reconnect); - stonith_reconnect = NULL; + if (controld_fencer_connect_timer) { + mainloop_timer_del(controld_fencer_connect_timer); + controld_fencer_connect_timer = NULL; } if (te_client_id) { free(te_client_id); @@ -843,7 +844,7 @@ tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) crm_info("Fence operation %d for %s succeeded", data->call_id, target); if (!(pcmk_is_set(action->flags, pcmk__graph_action_confirmed))) { te_action_confirmed(action, NULL); - if (pcmk__str_eq("on", op, pcmk__str_casei)) { + if (pcmk__str_eq(PCMK_ACTION_ON, op, pcmk__str_casei)) { const char *value = NULL; char *now = pcmk__ttoa(time(NULL)); gboolean is_remote_node = FALSE; @@ -981,7 +982,7 @@ controld_execute_fence_action(pcmk__graph_t *graph, priority_delay ? priority_delay : ""); /* Passing NULL means block until we can connect... */ - te_connect_stonith(NULL); + controld_timer_fencer_connect(NULL); pcmk__scan_min_int(priority_delay, &delay_i, 0); rc = fence_with_delay(target, type, delay_i); @@ -1000,12 +1001,14 @@ controld_execute_fence_action(pcmk__graph_t *graph, bool controld_verify_stonith_watchdog_timeout(const char *value) { + long st_timeout = value? crm_get_msec(value) : 0; const char *our_nodename = controld_globals.our_nodename; gboolean rv = TRUE; - if (stonith_api && (stonith_api->state != stonith_disconnected) && - stonith__watchdog_fencing_enabled_for_node_api(stonith_api, - our_nodename)) { + if (st_timeout == 0 + || (stonith_api && (stonith_api->state != stonith_disconnected) && + stonith__watchdog_fencing_enabled_for_node_api(stonith_api, + our_nodename))) { rv = pcmk__valid_sbd_timeout(value); } return rv; diff --git a/daemons/controld/controld_fencing.h b/daemons/controld/controld_fencing.h index 86a5050..76779c6 100644 --- a/daemons/controld/controld_fencing.h +++ b/daemons/controld/controld_fencing.h @@ -19,7 +19,7 @@ void controld_configure_fencing(GHashTable *options); void st_fail_count_reset(const char * target); // stonith API client -void controld_trigger_fencer_connect(void); +gboolean controld_timer_fencer_connect(gpointer user_data); void controld_disconnect_fencer(bool destroy); int controld_execute_fence_action(pcmk__graph_t *graph, pcmk__graph_action_t *action); diff --git a/daemons/controld/controld_fsa.c b/daemons/controld/controld_fsa.c index 622d1c8..06559b8 100644 --- a/daemons/controld/controld_fsa.c +++ b/daemons/controld/controld_fsa.c @@ -205,7 +205,6 @@ s_crmd_fsa(enum crmd_fsa_cause cause) fsa_data->data_type = fsa_dt_none; controld_globals.fsa_message_queue = g_list_append(controld_globals.fsa_message_queue, fsa_data); - fsa_data = NULL; } while ((controld_globals.fsa_message_queue != NULL) && !pcmk_is_set(controld_globals.flags, controld_fsa_is_stalled)) { @@ -275,7 +274,6 @@ s_crmd_fsa(enum crmd_fsa_cause cause) /* start doing things... */ s_crmd_fsa_actions(fsa_data); delete_fsa_input(fsa_data); - fsa_data = NULL; } if ((controld_globals.fsa_message_queue != NULL) @@ -620,11 +618,6 @@ do_state_transition(enum crmd_fsa_state cur_state, if (next_state != S_ELECTION && cur_state != S_RELEASE_DC) { controld_stop_current_election_timeout(); } -#if 0 - if ((controld_globals.fsa_input_register & R_SHUTDOWN)) { - controld_set_fsa_action_flags(A_DC_TIMER_STOP); - } -#endif if (next_state == S_INTEGRATION) { controld_set_fsa_action_flags(A_INTEGRATE_TIMER_START); } else { diff --git a/daemons/controld/controld_globals.h b/daemons/controld/controld_globals.h index eff1607..2ff8a57 100644 --- a/daemons/controld/controld_globals.h +++ b/daemons/controld/controld_globals.h @@ -45,9 +45,6 @@ typedef struct { //! Connection to the CIB cib_t *cib_conn; - //! CIB connection's client ID - const char *cib_client_id; - // Scheduler @@ -93,6 +90,9 @@ typedef struct { //! Max lifetime (in seconds) of a resource's shutdown lock to a node guint shutdown_lock_limit; + //! Node pending timeout + guint node_pending_timeout; + //! Main event loop GMainLoop *mainloop; } controld_globals_t; diff --git a/daemons/controld/controld_join_client.c b/daemons/controld/controld_join_client.c index da6a9d6..805ecbd 100644 --- a/daemons/controld/controld_join_client.c +++ b/daemons/controld/controld_join_client.c @@ -112,15 +112,6 @@ do_cl_join_offer_respond(long long action, CRM_CHECK(input != NULL, return); -#if 0 - if (we are sick) { - log error; - - /* save the request for later? */ - return; - } -#endif - welcome_from = crm_element_value(input->msg, F_CRM_HOST_FROM); join_id = crm_element_value(input->msg, F_CRM_JOIN_ID); crm_trace("Accepting cluster join offer from node %s "CRM_XS" join-%s", @@ -195,32 +186,34 @@ join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void * free_xml(generation); } -static void -set_join_state(const char * start_state) +void +set_join_state(const char *start_state, const char *node_name, const char *node_uuid, + bool remote) { if (pcmk__str_eq(start_state, "standby", pcmk__str_casei)) { crm_notice("Forcing node %s to join in %s state per configured " - "environment", controld_globals.our_nodename, start_state); + "environment", node_name, start_state); cib__update_node_attr(controld_globals.logger_out, controld_globals.cib_conn, cib_sync_call, - XML_CIB_TAG_NODES, controld_globals.our_uuid, - NULL, NULL, NULL, "standby", "on", NULL, NULL); + XML_CIB_TAG_NODES, node_uuid, + NULL, NULL, NULL, "standby", "on", NULL, + remote ? "remote" : NULL); } else if (pcmk__str_eq(start_state, "online", pcmk__str_casei)) { crm_notice("Forcing node %s to join in %s state per configured " - "environment", controld_globals.our_nodename, start_state); + "environment", node_name, start_state); cib__update_node_attr(controld_globals.logger_out, controld_globals.cib_conn, cib_sync_call, - XML_CIB_TAG_NODES, controld_globals.our_uuid, - NULL, NULL, NULL, "standby", "off", NULL, NULL); + XML_CIB_TAG_NODES, node_uuid, + NULL, NULL, NULL, "standby", "off", NULL, + remote ? "remote" : NULL); } else if (pcmk__str_eq(start_state, "default", pcmk__str_casei)) { - crm_debug("Not forcing a starting state on node %s", - controld_globals.our_nodename); + crm_debug("Not forcing a starting state on node %s", node_name); } else { crm_warn("Unrecognized start state '%s', using 'default' (%s)", - start_state, controld_globals.our_nodename); + start_state, node_name); } } @@ -335,7 +328,8 @@ do_cl_join_finalize_respond(long long action, first_join = FALSE; if (start_state) { - set_join_state(start_state); + set_join_state(start_state, controld_globals.our_nodename, + controld_globals.our_uuid, false); } } diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c index f82b132..2fe6710 100644 --- a/daemons/controld/controld_join_dc.c +++ b/daemons/controld/controld_join_dc.c @@ -172,7 +172,6 @@ start_join_round(void) max_generation_xml = NULL; } controld_clear_fsa_input_flags(R_HAVE_CIB); - controld_forget_all_cib_replace_calls(); } /*! @@ -607,10 +606,6 @@ do_dc_join_finalize(long long action, rc = controld_globals.cib_conn->cmds->sync_from(controld_globals.cib_conn, sync_from, NULL, cib_none); - - if (pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { - controld_record_cib_replace_call(rc); - } fsa_register_cib_callback(rc, sync_from, finalize_sync_callback); } @@ -629,8 +624,6 @@ finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, voi { CRM_LOG_ASSERT(-EPERM != rc); - controld_forget_cib_replace_call(call_id); - if (rc != pcmk_ok) { const char *sync_from = (const char *) user_data; @@ -674,22 +667,25 @@ finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, voi } static void -join_update_complete_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) +join_node_state_commit_callback(xmlNode *msg, int call_id, int rc, + xmlNode *output, void *user_data) { - fsa_data_t *msg_data = NULL; + const char *node = user_data; - if (rc == pcmk_ok) { - crm_debug("join-%d node history update (via CIB call %d) complete", - current_join_id, call_id); - check_join_state(controld_globals.fsa_state, __func__); + if (rc != pcmk_ok) { + fsa_data_t *msg_data = NULL; // for register_fsa_error() macro - } else { - crm_err("join-%d node history update (via CIB call %d) failed: %s " - "(next transition may determine resource status incorrectly)", - current_join_id, call_id, pcmk_strerror(rc)); + crm_crit("join-%d node history update (via CIB call %d) for node %s " + "failed: %s", + current_join_id, call_id, node, pcmk_strerror(rc)); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } + + crm_debug("join-%d node history update (via CIB call %d) for node %s " + "complete", + current_join_id, call_id, node); + check_join_state(controld_globals.fsa_state, __func__); } /* A_DC_JOIN_PROCESS_ACK */ @@ -701,33 +697,39 @@ do_dc_join_ack(long long action, { int join_id = -1; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); - enum controld_section_e section = controld_section_lrm; - const int cib_opts = cib_scope_local|cib_can_create; const char *op = crm_element_value(join_ack->msg, F_CRM_TASK); - const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM); + char *join_from = crm_element_value_copy(join_ack->msg, F_CRM_HOST_FROM); crm_node_t *peer = NULL; + enum controld_section_e section = controld_section_lrm; + char *xpath = NULL; + xmlNode *state = join_ack->xml; + xmlNode *execd_state = NULL; + + cib_t *cib = controld_globals.cib_conn; + int rc = pcmk_ok; + // Sanity checks if (join_from == NULL) { crm_warn("Ignoring message received without node identification"); - return; + goto done; } if (op == NULL) { crm_warn("Ignoring message received from %s without task", join_from); - return; + goto done; } if (strcmp(op, CRM_OP_JOIN_CONFIRM)) { crm_debug("Ignoring '%s' message from %s while waiting for '%s'", op, join_from, CRM_OP_JOIN_CONFIRM); - return; + goto done; } if (crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id) != 0) { crm_warn("Ignoring join confirmation from %s without valid join ID", join_from); - return; + goto done; } peer = crm_get_peer(0, join_from); @@ -736,7 +738,7 @@ do_dc_join_ack(long long action, "(currently %s not %s)", join_id, join_from, crm_join_phase_str(peer->join), crm_join_phase_str(crm_join_finalized)); - return; + goto done; } if (join_id != current_join_id) { @@ -744,40 +746,85 @@ do_dc_join_ack(long long action, "because currently on join-%d", join_id, join_from, current_join_id); crm_update_peer_join(__func__, peer, crm_join_nack); - return; + goto done; } crm_update_peer_join(__func__, peer, crm_join_confirmed); /* Update CIB with node's current executor state. A new transition will be - * triggered later, when the CIB notifies us of the change. + * triggered later, when the CIB manager notifies us of the change. + * + * The delete and modify requests are part of an atomic transaction. */ + rc = cib->cmds->init_transaction(cib); + if (rc != pcmk_ok) { + goto done; + } + + // Delete relevant parts of node's current executor state from CIB if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) { section = controld_section_lrm_unlocked; } - controld_delete_node_state(join_from, section, cib_scope_local); + controld_node_state_deletion_strings(join_from, section, &xpath, NULL); + + rc = cib->cmds->remove(cib, xpath, NULL, + cib_scope_local + |cib_xpath + |cib_multiple + |cib_transaction); + if (rc != pcmk_ok) { + goto done; + } + + // Update CIB with node's latest known executor state if (pcmk__str_eq(join_from, controld_globals.our_nodename, pcmk__str_casei)) { - xmlNode *now_dc_lrmd_state = controld_query_executor_state(); - - if (now_dc_lrmd_state != NULL) { - crm_debug("Updating local node history for join-%d " - "from query result", join_id); - controld_update_cib(XML_CIB_TAG_STATUS, now_dc_lrmd_state, cib_opts, - join_update_complete_callback); - free_xml(now_dc_lrmd_state); + + // Use the latest possible state if processing our own join ack + execd_state = controld_query_executor_state(); + + if (execd_state != NULL) { + crm_debug("Updating local node history for join-%d from query " + "result", + current_join_id); + state = execd_state; + } else { crm_warn("Updating local node history from join-%d confirmation " - "because query failed", join_id); - controld_update_cib(XML_CIB_TAG_STATUS, join_ack->xml, cib_opts, - join_update_complete_callback); + "because query failed", + current_join_id); } + } else { crm_debug("Updating node history for %s from join-%d confirmation", - join_from, join_id); - controld_update_cib(XML_CIB_TAG_STATUS, join_ack->xml, cib_opts, - join_update_complete_callback); + join_from, current_join_id); + } + + rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, state, + cib_scope_local|cib_can_create|cib_transaction); + free_xml(execd_state); + if (rc != pcmk_ok) { + goto done; + } + + // Commit the transaction + rc = cib->cmds->end_transaction(cib, true, cib_scope_local); + fsa_register_cib_callback(rc, join_from, join_node_state_commit_callback); + + if (rc > 0) { + // join_from will be freed after callback + join_from = NULL; + rc = pcmk_ok; + } + +done: + if (rc != pcmk_ok) { + crm_crit("join-%d node history update for node %s failed: %s", + current_join_id, join_from, pcmk_strerror(rc)); + register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } + free(join_from); + free(xpath); } void @@ -808,7 +855,7 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data) */ crm_trace("Updating node name and UUID in CIB for %s", join_to); tmp1 = create_xml_node(NULL, XML_CIB_TAG_NODE); - set_uuid(tmp1, XML_ATTR_ID, join_node); + crm_xml_add(tmp1, XML_ATTR_ID, crm_peer_uuid(join_node)); crm_xml_add(tmp1, XML_ATTR_UNAME, join_to); fsa_cib_anon_update(XML_CIB_TAG_NODES, tmp1); free_xml(tmp1); diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h index 25f3db3..c3113e4 100644 --- a/daemons/controld/controld_lrm.h +++ b/daemons/controld/controld_lrm.h @@ -108,11 +108,6 @@ gboolean lrm_state_init_local(void); */ void lrm_state_destroy_all(void); -/*! - * \brief Destroy executor connection by node name - */ -void lrm_state_destroy(const char *node_name); - /*! * \brief Find lrm_state data by node name */ diff --git a/daemons/controld/controld_membership.c b/daemons/controld/controld_membership.c index 1f7e4c0..f25d1e9 100644 --- a/daemons/controld/controld_membership.c +++ b/daemons/controld/controld_membership.c @@ -138,10 +138,8 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent, pcmk__xe_set_bool_attr(node_state, XML_NODE_IS_REMOTE, true); } - set_uuid(node_state, XML_ATTR_ID, node); - - if (crm_element_value(node_state, XML_ATTR_ID) == NULL) { - crm_info("Node update for %s cancelled: no id", node->uname); + if (crm_xml_add(node_state, XML_ATTR_ID, crm_peer_uuid(node)) == NULL) { + crm_info("Node update for %s cancelled: no ID", node->uname); free_xml(node_state); return NULL; } @@ -149,17 +147,31 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent, crm_xml_add(node_state, XML_ATTR_UNAME, node->uname); if ((flags & node_update_cluster) && node->state) { - pcmk__xe_set_bool_attr(node_state, XML_NODE_IN_CLUSTER, - pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_casei)); + if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) { + // A value 0 means the node is not a cluster member. + crm_xml_add_ll(node_state, PCMK__XA_IN_CCM, node->when_member); + + } else { + pcmk__xe_set_bool_attr(node_state, PCMK__XA_IN_CCM, + pcmk__str_eq(node->state, CRM_NODE_MEMBER, + pcmk__str_casei)); + } } if (!pcmk_is_set(node->flags, crm_remote_node)) { if (flags & node_update_peer) { - value = OFFLINESTATUS; - if (pcmk_is_set(node->processes, crm_get_cluster_proc())) { - value = ONLINESTATUS; + if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) { + // A value 0 means the peer is offline in CPG. + crm_xml_add_ll(node_state, PCMK__XA_CRMD, node->when_online); + + } else { + // @COMPAT DCs < 2.1.7 use online/offline rather than timestamp + value = OFFLINESTATUS; + if (pcmk_is_set(node->processes, crm_get_cluster_proc())) { + value = ONLINESTATUS; + } + crm_xml_add(node_state, PCMK__XA_CRMD, value); } - crm_xml_add(node_state, XML_NODE_IS_PEER, value); } if (flags & node_update_join) { @@ -168,11 +180,11 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent, } else { value = CRMD_JOINSTATE_MEMBER; } - crm_xml_add(node_state, XML_NODE_JOIN_STATE, value); + crm_xml_add(node_state, PCMK__XA_JOIN, value); } if (flags & node_update_expected) { - crm_xml_add(node_state, XML_NODE_EXPECTED, node->expected); + crm_xml_add(node_state, PCMK__XA_EXPECTED, node->expected); } } @@ -210,7 +222,7 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, return; } - if (pcmk__str_eq(crm_element_name(output), XML_CIB_TAG_NODE, pcmk__str_casei)) { + if (pcmk__xe_is(output, XML_CIB_TAG_NODE)) { node_xml = output; } else { @@ -224,7 +236,7 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, crm_node_t *node = NULL; gboolean known = FALSE; - if (!pcmk__str_eq(crm_element_name(node_xml), XML_CIB_TAG_NODE, pcmk__str_casei)) { + if (!pcmk__xe_is(node_xml, XML_CIB_TAG_NODE)) { continue; } diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c index 54b27ec..39f3c7a 100644 --- a/daemons/controld/controld_messages.c +++ b/daemons/controld/controld_messages.c @@ -328,52 +328,80 @@ route_message(enum crmd_fsa_cause cause, xmlNode * input) gboolean relay_message(xmlNode * msg, gboolean originated_locally) { - int dest = 1; + enum crm_ais_msg_types dest = crm_msg_ais; bool is_for_dc = false; bool is_for_dcib = false; bool is_for_te = false; bool is_for_crm = false; bool is_for_cib = false; bool is_local = false; - const char *host_to = crm_element_value(msg, F_CRM_HOST_TO); - const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); - const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); - const char *type = crm_element_value(msg, F_TYPE); - const char *task = crm_element_value(msg, F_CRM_TASK); - const char *ref = crm_element_value(msg, XML_ATTR_REFERENCE); + bool broadcast = false; + const char *host_to = NULL; + const char *sys_to = NULL; + const char *sys_from = NULL; + const char *type = NULL; + const char *task = NULL; + const char *ref = NULL; + crm_node_t *node_to = NULL; + + CRM_CHECK(msg != NULL, return TRUE); + + host_to = crm_element_value(msg, F_CRM_HOST_TO); + sys_to = crm_element_value(msg, F_CRM_SYS_TO); + sys_from = crm_element_value(msg, F_CRM_SYS_FROM); + type = crm_element_value(msg, F_TYPE); + task = crm_element_value(msg, F_CRM_TASK); + ref = crm_element_value(msg, XML_ATTR_REFERENCE); + + broadcast = pcmk__str_empty(host_to); if (ref == NULL) { ref = "without reference ID"; } - if (msg == NULL) { - crm_warn("Cannot route empty message"); - return TRUE; - - } else if (pcmk__str_eq(task, CRM_OP_HELLO, pcmk__str_casei)) { - crm_trace("No routing needed for hello message %s", ref); + if (pcmk__str_eq(task, CRM_OP_HELLO, pcmk__str_casei)) { + crm_trace("Received hello %s from %s (no processing needed)", + ref, pcmk__s(sys_from, "unidentified source")); + crm_log_xml_trace(msg, "hello"); return TRUE; + } - } else if (!pcmk__str_eq(type, T_CRM, pcmk__str_casei)) { - crm_warn("Received invalid message %s: type '%s' not '" T_CRM "'", + // Require message type (set by create_request()) + if (!pcmk__str_eq(type, T_CRM, pcmk__str_casei)) { + crm_warn("Ignoring invalid message %s with type '%s' (not '" T_CRM "')", ref, pcmk__s(type, "")); - crm_log_xml_warn(msg, "[bad message type]"); + crm_log_xml_trace(msg, "ignored"); return TRUE; + } - } else if (sys_to == NULL) { - crm_warn("Received invalid message %s: no subsystem", ref); - crm_log_xml_warn(msg, "[no subsystem]"); + // Require a destination subsystem (also set by create_request()) + if (sys_to == NULL) { + crm_warn("Ignoring invalid message %s with no " F_CRM_SYS_TO, ref); + crm_log_xml_trace(msg, "ignored"); return TRUE; } + // Get the message type appropriate to the destination subsystem + if (is_corosync_cluster()) { + dest = text2msg_type(sys_to); + if ((dest < crm_msg_ais) || (dest > crm_msg_stonith_ng)) { + /* Unrecognized value, use a sane default + * + * @TODO Maybe we should bail instead + */ + dest = crm_msg_crmd; + } + } + is_for_dc = (strcasecmp(CRM_SYSTEM_DC, sys_to) == 0); is_for_dcib = (strcasecmp(CRM_SYSTEM_DCIB, sys_to) == 0); is_for_te = (strcasecmp(CRM_SYSTEM_TENGINE, sys_to) == 0); is_for_cib = (strcasecmp(CRM_SYSTEM_CIB, sys_to) == 0); is_for_crm = (strcasecmp(CRM_SYSTEM_CRMD, sys_to) == 0); + // Check whether message should be processed locally is_local = false; - if (pcmk__str_empty(host_to)) { + if (broadcast) { if (is_for_dc || is_for_te) { is_local = false; @@ -397,6 +425,7 @@ relay_message(xmlNode * msg, gboolean originated_locally) } else if (pcmk__str_eq(controld_globals.our_nodename, host_to, pcmk__str_casei)) { is_local = true; + } else if (is_for_crm && pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { xmlNode *msg_data = get_message_xml(msg, F_CRM_DATA); const char *mode = crm_element_value(msg_data, PCMK__XA_MODE); @@ -407,69 +436,68 @@ relay_message(xmlNode * msg, gboolean originated_locally) } } - if (is_for_dc || is_for_dcib || is_for_te) { - if (AM_I_DC && is_for_te) { - crm_trace("Route message %s locally as transition request", ref); - send_msg_via_ipc(msg, sys_to); + // Check whether message should be relayed - } else if (AM_I_DC) { + if (is_for_dc || is_for_dcib || is_for_te) { + if (AM_I_DC) { + if (is_for_te) { + crm_trace("Route message %s locally as transition request", + ref); + crm_log_xml_trace(msg, sys_to); + send_msg_via_ipc(msg, sys_to); + return TRUE; // No further processing of message is needed + } crm_trace("Route message %s locally as DC request", ref); return FALSE; // More to be done by caller + } - } else if (originated_locally && !pcmk__strcase_any_of(sys_from, CRM_SYSTEM_PENGINE, - CRM_SYSTEM_TENGINE, NULL)) { - - if (is_corosync_cluster()) { - dest = text2msg_type(sys_to); + if (originated_locally + && !pcmk__strcase_any_of(sys_from, CRM_SYSTEM_PENGINE, + CRM_SYSTEM_TENGINE, NULL)) { + crm_trace("Relay message %s to DC (via %s)", + ref, pcmk__s(host_to, "broadcast")); + crm_log_xml_trace(msg, "relayed"); + if (!broadcast) { + node_to = crm_get_peer(0, host_to); } - crm_trace("Relay message %s to DC", ref); - send_cluster_message(host_to ? crm_get_peer(0, host_to) : NULL, dest, msg, TRUE); - - } else { - /* Neither the TE nor the scheduler should be sending messages - * to DCs on other nodes. By definition, if we are no longer the DC, - * then the scheduler's or TE's data should be discarded. - */ - crm_trace("Discard message %s because we are not DC", ref); + send_cluster_message(node_to, dest, msg, TRUE); + return TRUE; } - } else if (is_local && (is_for_crm || is_for_cib)) { - crm_trace("Route message %s locally as controller request", ref); - return FALSE; // More to be done by caller - - } else if (is_local) { - crm_trace("Relay message %s locally to %s", - ref, (sys_to? sys_to : "unknown client")); - crm_log_xml_trace(msg, "[IPC relay]"); - send_msg_via_ipc(msg, sys_to); - - } else { - crm_node_t *node_to = NULL; - - if (is_corosync_cluster()) { - dest = text2msg_type(sys_to); + /* Transition engine and scheduler messages are sent only to the DC on + * the same node. If we are no longer the DC, discard this message. + */ + crm_trace("Ignoring message %s because we are no longer DC", ref); + crm_log_xml_trace(msg, "ignored"); + return TRUE; // No further processing of message is needed + } - if (dest == crm_msg_none || dest > crm_msg_stonith_ng) { - dest = crm_msg_crmd; - } + if (is_local) { + if (is_for_crm || is_for_cib) { + crm_trace("Route message %s locally as controller request", ref); + return FALSE; // More to be done by caller } + crm_trace("Relay message %s locally to %s", ref, sys_to); + crm_log_xml_trace(msg, "IPC-relay"); + send_msg_via_ipc(msg, sys_to); + return TRUE; + } - if (host_to) { - node_to = pcmk__search_cluster_node_cache(0, host_to); - if (node_to == NULL) { - crm_warn("Cannot route message %s: Unknown node %s", - ref, host_to); - return TRUE; - } - crm_trace("Relay message %s to %s", - ref, (node_to->uname? node_to->uname : "peer")); - } else { - crm_trace("Broadcast message %s to all peers", ref); + if (!broadcast) { + node_to = pcmk__search_cluster_node_cache(0, host_to, NULL); + if (node_to == NULL) { + crm_warn("Ignoring message %s because node %s is unknown", + ref, host_to); + crm_log_xml_trace(msg, "ignored"); + return TRUE; } - send_cluster_message(host_to ? node_to : NULL, dest, msg, TRUE); } - return TRUE; // No further processing of message is needed + crm_trace("Relay message %s to %s", + ref, pcmk__s(host_to, "all peers")); + crm_log_xml_trace(msg, "relayed"); + send_cluster_message(node_to, dest, msg, TRUE); + return TRUE; } // Return true if field contains a positive integer @@ -546,6 +574,7 @@ controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_c } crm_trace("Validated IPC hello from client %s", client_name); + crm_log_xml_trace(client_msg, "hello"); if (curr_client) { curr_client->userdata = strdup(client_name); } @@ -553,6 +582,7 @@ controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_c return false; rejected: + crm_log_xml_trace(client_msg, "rejected"); if (curr_client) { qb_ipcs_disconnect(curr_client->ipcs); } @@ -575,7 +605,9 @@ handle_message(xmlNode *msg, enum crmd_fsa_cause cause) return I_NULL; } - crm_err("Unknown message type: %s", type); + crm_warn("Ignoring message with unknown " F_CRM_MSG_TYPE " '%s'", + pcmk__s(type, "")); + crm_log_xml_trace(msg, "bad"); return I_NULL; } @@ -701,7 +733,7 @@ handle_lrm_delete(xmlNode *stored_msg) crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "local node"), rsc_id, ((rc == pcmk_rc_ok)? "" : " not")); - op = lrmd_new_event(rsc_id, CRMD_ACTION_DELETE, 0); + op = lrmd_new_event(rsc_id, PCMK_ACTION_DELETE, 0); op->type = lrmd_event_exec_complete; op->user_data = strdup(transition? transition : FAKE_TE_ID); op->params = pcmk__strkey_table(free, free); @@ -732,7 +764,7 @@ handle_remote_state(const xmlNode *msg) bool remote_is_up = false; int rc = pcmk_rc_ok; - rc = pcmk__xe_get_bool_attr(msg, XML_NODE_IN_CLUSTER, &remote_is_up); + rc = pcmk__xe_get_bool_attr(msg, PCMK__XA_IN_CCM, &remote_is_up); CRM_CHECK(remote_uname && rc == pcmk_rc_ok, return I_NULL); @@ -818,7 +850,7 @@ handle_node_list(const xmlNode *request) crm_xml_add_ll(xml, XML_ATTR_ID, (long long) node->id); // uint32_t crm_xml_add(xml, XML_ATTR_UNAME, node->uname); - crm_xml_add(xml, XML_NODE_IN_CLUSTER, node->state); + crm_xml_add(xml, PCMK__XA_IN_CCM, node->state); } // Create and send reply @@ -875,7 +907,7 @@ handle_node_info_request(const xmlNode *msg) if (node) { crm_xml_add(reply_data, XML_ATTR_ID, node->uuid); crm_xml_add(reply_data, XML_ATTR_UNAME, node->uname); - crm_xml_add(reply_data, XML_NODE_IS_PEER, node->state); + crm_xml_add(reply_data, PCMK__XA_CRMD, node->state); pcmk__xe_set_bool_attr(reply_data, XML_NODE_IS_REMOTE, pcmk_is_set(node->flags, crm_remote_node)); } @@ -988,14 +1020,15 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) /* Optimize this for the DC - it has the most to do */ + crm_log_xml_trace(stored_msg, "request"); if (op == NULL) { - crm_log_xml_warn(stored_msg, "[request without " F_CRM_TASK "]"); + crm_warn("Ignoring request without " F_CRM_TASK); return I_NULL; } if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) { const char *from = crm_element_value(stored_msg, F_CRM_HOST_FROM); - crm_node_t *node = pcmk__search_cluster_node_cache(0, from); + crm_node_t *node = pcmk__search_cluster_node_cache(0, from, NULL); pcmk__update_peer_expected(__func__, node, CRMD_JOINSTATE_DOWN); if(AM_I_DC == FALSE) { @@ -1062,11 +1095,6 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) if (controld_globals.fsa_state == S_HALT) { crm_debug("Forcing an election from S_HALT"); return I_ELECTION; -#if 0 - } else if (AM_I_DC) { - /* This is the old way of doing things but what is gained? */ - return I_ELECTION; -#endif } } else if (strcmp(op, CRM_OP_JOIN_OFFER) == 0) { @@ -1157,8 +1185,9 @@ handle_response(xmlNode *stored_msg) { const char *op = crm_element_value(stored_msg, F_CRM_TASK); + crm_log_xml_trace(stored_msg, "reply"); if (op == NULL) { - crm_log_xml_err(stored_msg, "Bad message"); + crm_warn("Ignoring reply without " F_CRM_TASK); } else if (AM_I_DC && strcmp(op, CRM_OP_PECALC) == 0) { // Check whether scheduler answer been superseded by subsequent request @@ -1295,7 +1324,7 @@ broadcast_remote_state_message(const char *node_name, bool node_up) node_name, node_up? "coming up" : "going down"); crm_xml_add(msg, XML_ATTR_ID, node_name); - pcmk__xe_set_bool_attr(msg, XML_NODE_IN_CLUSTER, node_up); + pcmk__xe_set_bool_attr(msg, PCMK__XA_IN_CCM, node_up); if (node_up) { crm_xml_add(msg, PCMK__XA_CONN_HOST, controld_globals.our_nodename); diff --git a/daemons/controld/controld_metadata.c b/daemons/controld/controld_metadata.c index 240a978..c813ceb 100644 --- a/daemons/controld/controld_metadata.c +++ b/daemons/controld/controld_metadata.c @@ -1,5 +1,5 @@ /* - * Copyright 2017-2022 the Pacemaker project contributors + * Copyright 2017-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -172,7 +172,7 @@ controld_cache_metadata(GHashTable *mdc, const lrmd_rsc_info_t *rsc, const char *action_name = crm_element_value(match, "name"); - if (pcmk__str_eq(action_name, CRMD_ACTION_RELOAD_AGENT, + if (pcmk__str_eq(action_name, PCMK_ACTION_RELOAD_AGENT, pcmk__str_none)) { if (ocf1_1) { controld_set_ra_flags(md, key, ra_supports_reload_agent); @@ -181,7 +181,7 @@ controld_cache_metadata(GHashTable *mdc, const lrmd_rsc_info_t *rsc, "because it does not support OCF 1.1 or later", key); } - } else if (!ocf1_1 && pcmk__str_eq(action_name, CRMD_ACTION_RELOAD, + } else if (!ocf1_1 && pcmk__str_eq(action_name, PCMK_ACTION_RELOAD, pcmk__str_casei)) { controld_set_ra_flags(md, key, ra_supports_legacy_reload); } diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c index f24b755..d692ef6 100644 --- a/daemons/controld/controld_remote_ra.c +++ b/daemons/controld/controld_remote_ra.c @@ -280,6 +280,7 @@ remote_node_up(const char *node_name) int call_opt; xmlNode *update, *state; crm_node_t *node; + lrm_state_t *connection_rsc = NULL; CRM_CHECK(node_name != NULL, return); crm_info("Announcing Pacemaker Remote node %s", node_name); @@ -301,6 +302,20 @@ remote_node_up(const char *node_name) purge_remote_node_attrs(call_opt, node); pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0); + /* Apply any start state that we were given from the environment on the + * remote node. + */ + connection_rsc = lrm_state_find(node->uname); + + if (connection_rsc != NULL) { + lrmd_t *lrm = connection_rsc->conn; + const char *start_state = lrmd__node_start_state(lrm); + + if (start_state) { + set_join_state(start_state, node->uname, node->uuid, true); + } + } + /* pacemaker_remote nodes don't participate in the membership layer, * so cluster nodes don't automatically get notified when they come and go. * We send a cluster message to the DC, and update the CIB node state entry, @@ -392,10 +407,11 @@ check_remote_node_state(const remote_ra_cmd_t *cmd) return; } - if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) { + if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) { remote_node_up(cmd->rsc_id); - } else if (pcmk__str_eq(cmd->action, "migrate_from", pcmk__str_casei)) { + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM, + pcmk__str_casei)) { /* After a successful migration, we don't need to do remote_node_up() * because the DC already knows the node is up, and we don't want to * clear LRM history etc. We do need to add the remote node to this @@ -408,7 +424,7 @@ check_remote_node_state(const remote_ra_cmd_t *cmd) CRM_CHECK(node != NULL, return); pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0); - } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id); remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL; @@ -510,7 +526,8 @@ retry_start_cmd_cb(gpointer data) return FALSE; } cmd = ra_data->cur_cmd; - if (!pcmk__strcase_any_of(cmd->action, "start", "migrate_from", NULL)) { + if (!pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START, + PCMK_ACTION_MIGRATE_FROM, NULL)) { return FALSE; } update_remaining_timeout(cmd); @@ -681,7 +698,8 @@ remote_lrm_op_callback(lrmd_event_data_t * op) handle_remote_ra_stop(lrm_state, NULL); remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM); /* now fake the reply of a successful 'stop' */ - synthesize_lrmd_success(NULL, lrm_state->node_name, "stop"); + synthesize_lrmd_success(NULL, lrm_state->node_name, + PCMK_ACTION_STOP); } return; } @@ -695,8 +713,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op) /* Start actions and migrate from actions complete after connection * comes back to us. */ - if (op->type == lrmd_event_connect && pcmk__strcase_any_of(cmd->action, "start", - "migrate_from", NULL)) { + if ((op->type == lrmd_event_connect) + && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START, + PCMK_ACTION_MIGRATE_FROM, NULL)) { if (op->connection_rc < 0) { update_remaining_timeout(cmd); @@ -731,7 +750,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op) report_remote_ra_result(cmd); cmd_handled = TRUE; - } else if (op->type == lrmd_event_poke && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + } else if ((op->type == lrmd_event_poke) + && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); @@ -758,7 +779,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op) } cmd_handled = TRUE; - } else if (op->type == lrmd_event_disconnect && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + } else if ((op->type == lrmd_event_disconnect) + && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { if (pcmk_is_set(ra_data->status, remote_active) && !pcmk_is_set(cmd->status, cmd_cancel)) { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, @@ -771,7 +794,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op) } cmd_handled = TRUE; - } else if (op->type == lrmd_event_new_client && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { + } else if ((op->type == lrmd_event_new_client) + && pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, + pcmk__str_casei)) { handle_remote_ra_stop(lrm_state, cmd); cmd_handled = TRUE; @@ -882,7 +907,8 @@ handle_remote_ra_exec(gpointer user_data) ra_data->cmds = g_list_remove_link(ra_data->cmds, first); g_list_free_1(first); - if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) { + if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START, + PCMK_ACTION_MIGRATE_FROM, NULL)) { lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete); if (handle_remote_ra_start(lrm_state, cmd, cmd->timeout) == pcmk_rc_ok) { @@ -894,7 +920,7 @@ handle_remote_ra_exec(gpointer user_data) } report_remote_ra_result(cmd); - } else if (!strcmp(cmd->action, "monitor")) { + } else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) { if (lrm_state_is_connected(lrm_state) == TRUE) { rc = lrm_state_poke_connection(lrm_state); @@ -917,7 +943,7 @@ handle_remote_ra_exec(gpointer user_data) } report_remote_ra_result(cmd); - } else if (!strcmp(cmd->action, "stop")) { + } else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) { if (pcmk_is_set(ra_data->status, expect_takeover)) { /* briefly wait on stop for the takeover event to occur. If the @@ -933,13 +959,14 @@ handle_remote_ra_exec(gpointer user_data) handle_remote_ra_stop(lrm_state, cmd); - } else if (!strcmp(cmd->action, "migrate_to")) { + } else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) { lrm_remote_clear_flags(lrm_state, takeover_complete); lrm_remote_set_flags(lrm_state, expect_takeover); pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); report_remote_ra_result(cmd); - } else if (pcmk__str_any_of(cmd->action, CRMD_ACTION_RELOAD, - CRMD_ACTION_RELOAD_AGENT, NULL)) { + + } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD, + PCMK_ACTION_RELOAD_AGENT, NULL)) { /* Currently the only reloadable parameter is reconnect_interval, * which is only used by the scheduler via the CIB, so reloads are a * no-op. @@ -1029,13 +1056,13 @@ static gboolean is_remote_ra_supported_action(const char *action) { return pcmk__str_any_of(action, - CRMD_ACTION_START, - CRMD_ACTION_STOP, - CRMD_ACTION_STATUS, - CRMD_ACTION_MIGRATE, - CRMD_ACTION_MIGRATED, - CRMD_ACTION_RELOAD_AGENT, - CRMD_ACTION_RELOAD, + PCMK_ACTION_START, + PCMK_ACTION_STOP, + PCMK_ACTION_MONITOR, + PCMK_ACTION_MIGRATE_TO, + PCMK_ACTION_MIGRATE_FROM, + PCMK_ACTION_RELOAD_AGENT, + PCMK_ACTION_RELOAD, NULL); } @@ -1048,7 +1075,9 @@ fail_all_monitor_cmds(GList * list) for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; - if ((cmd->interval_ms > 0) && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + if ((cmd->interval_ms > 0) + && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { rm_list = g_list_append(rm_list, cmd); } } @@ -1137,8 +1166,9 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms, if (ra_data->cur_cmd && !pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) && - (ra_data->cur_cmd->interval_ms == interval_ms) && - pcmk__str_eq(ra_data->cur_cmd->action, "monitor", pcmk__str_casei)) { + (ra_data->cur_cmd->interval_ms == interval_ms) + && pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { cmd = ra_data->cur_cmd; goto handle_dup; @@ -1147,7 +1177,8 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms, for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) - && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { goto handle_dup; } } @@ -1155,7 +1186,8 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms, for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if ((cmd->interval_ms == interval_ms) - && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { goto handle_dup; } } @@ -1165,7 +1197,7 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms, handle_dup: crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT, - cmd->rsc_id, "monitor", interval_ms); + cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms); /* update the userdata */ if (userdata) { @@ -1385,7 +1417,7 @@ remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance) } #define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \ - "[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \ + "[@" XML_LRM_ATTR_TASK "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \ XML_GRAPH_TAG_MAINTENANCE /*! @@ -1403,9 +1435,10 @@ remote_ra_process_maintenance_nodes(xmlNode *xml) xmlNode *node; int cnt = 0, cnt_remote = 0; - for (node = - first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE); - node != NULL; node = pcmk__xml_next(node)) { + for (node = first_named_child(getXpathResult(search, 0), + XML_CIB_TAG_NODE); + node != NULL; node = crm_next_same_xml(node)) { + lrm_state_t *lrm_state = lrm_state_find(ID(node)); cnt++; diff --git a/daemons/controld/controld_schedulerd.c b/daemons/controld/controld_schedulerd.c index 912f9a5..8aca83f 100644 --- a/daemons/controld/controld_schedulerd.c +++ b/daemons/controld/controld_schedulerd.c @@ -45,11 +45,11 @@ controld_shutdown_schedulerd_ipc(void) * \internal * \brief Save CIB query result to file, raising FSA error * - * \param[in] msg Ignored - * \param[in] call_id Call ID of CIB query - * \param[in] rc Return code of CIB query - * \param[in,out] output Result of CIB query - * \param[in] user_data Unique identifier for filename + * \param[in] msg Ignored + * \param[in] call_id Call ID of CIB query + * \param[in] rc Return code of CIB query + * \param[in] output Result of CIB query + * \param[in] user_data Unique identifier for filename * * \note This is intended to be called after a scheduler connection fails. */ @@ -90,8 +90,9 @@ handle_disconnect(void) int rc = pcmk_ok; char *uuid_str = crm_generate_uuid(); - crm_crit("Connection to the scheduler failed " - CRM_XS " uuid=%s", uuid_str); + crm_crit("Lost connection to the scheduler " + CRM_XS " CIB will be saved to " PE_STATE_DIR "/pe-core-%s.bz2", + uuid_str); /* * The scheduler died... @@ -107,9 +108,6 @@ handle_disconnect(void) NULL, NULL, cib_scope_local); fsa_register_cib_callback(rc, uuid_str, save_cib_contents); - - } else { - crm_info("Connection to the scheduler released"); } controld_clear_fsa_input_flags(R_PE_CONNECTED); @@ -199,9 +197,10 @@ new_schedulerd_ipc_connection(void) pcmk_register_ipc_callback(schedulerd_api, scheduler_event_callback, NULL); - rc = pcmk_connect_ipc(schedulerd_api, pcmk_ipc_dispatch_main); + rc = pcmk__connect_ipc(schedulerd_api, pcmk_ipc_dispatch_main, 3); if (rc != pcmk_rc_ok) { - crm_err("Error connecting to the scheduler: %s", pcmk_rc_str(rc)); + crm_err("Error connecting to %s: %s", + pcmk_ipc_name(schedulerd_api, true), pcmk_rc_str(rc)); return false; } diff --git a/daemons/controld/controld_te_actions.c b/daemons/controld/controld_te_actions.c index d8cfcad..fe6b744 100644 --- a/daemons/controld/controld_te_actions.c +++ b/daemons/controld/controld_te_actions.c @@ -47,7 +47,7 @@ execute_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *pseudo) const char *task = crm_element_value(pseudo->xml, XML_LRM_ATTR_TASK); /* send to peers as well? */ - if (pcmk__str_eq(task, CRM_OP_MAINTENANCE_NODES, pcmk__str_casei)) { + if (pcmk__str_eq(task, PCMK_ACTION_MAINTENANCE_NODES, pcmk__str_casei)) { GHashTableIter iter; crm_node_t *node = NULL; @@ -125,7 +125,7 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) router_node = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); if (router_node == NULL) { router_node = on_node; - if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_none)) { + if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { const char *mode = crm_element_value(action->xml, PCMK__XA_MODE); if (pcmk__str_eq(mode, XML_TAG_CIB, pcmk__str_none)) { @@ -148,7 +148,8 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) id, task, on_node, (is_local? " locally" : ""), (no_wait? " without waiting" : "")); - if (is_local && pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_none)) { + if (is_local + && pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) { /* defer until everything else completes */ crm_info("Controller request '%s' is a local shutdown", id); graph->completion_action = pcmk__graph_shutdown; @@ -156,7 +157,7 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) te_action_confirmed(action, graph); return pcmk_rc_ok; - } else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_none)) { + } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) { crm_node_t *peer = crm_get_peer(0, router_node); pcmk__update_peer_expected(__func__, peer, CRMD_JOINSTATE_DOWN); @@ -318,7 +319,7 @@ controld_record_action_timeout(pcmk__graph_action_t *action) int target_rc = get_target_rc(action); crm_warn("%s %d: %s on %s timed out", - crm_element_name(action->xml), action->id, task_uuid, target); + action->xml->name, action->id, task_uuid, target); op = synthesize_timeout_event(action, target_rc); controld_record_action_event(action, op); @@ -528,9 +529,9 @@ te_update_job_count(pcmk__graph_action_t *action, int offset) * the connection resources */ target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); - if ((target == NULL) && pcmk__strcase_any_of(task, CRMD_ACTION_MIGRATE, - CRMD_ACTION_MIGRATED, NULL)) { - + if ((target == NULL) + && pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO, + PCMK_ACTION_MIGRATE_FROM, NULL)) { const char *t1 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE); const char *t2 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_TARGET); @@ -586,7 +587,8 @@ allowed_on_node(const pcmk__graph_t *graph, const pcmk__graph_action_t *action, return false; } else if(graph->migration_limit > 0 && r->migrate_jobs >= graph->migration_limit) { - if (pcmk__strcase_any_of(task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) { + if (pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO, + PCMK_ACTION_MIGRATE_FROM, NULL)) { crm_trace("Peer %s is over their migration job limit of %d (%d): deferring %s", target, graph->migration_limit, r->migrate_jobs, id); return false; @@ -624,8 +626,9 @@ graph_action_allowed(pcmk__graph_t *graph, pcmk__graph_action_t *action) * the connection resources */ target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); - if ((target == NULL) && pcmk__strcase_any_of(task, CRMD_ACTION_MIGRATE, - CRMD_ACTION_MIGRATED, NULL)) { + if ((target == NULL) + && pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO, + PCMK_ACTION_MIGRATE_FROM, NULL)) { target = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE); if (!allowed_on_node(graph, action, target)) { return false; diff --git a/daemons/controld/controld_te_callbacks.c b/daemons/controld/controld_te_callbacks.c index cf9de83..c26e757 100644 --- a/daemons/controld/controld_te_callbacks.c +++ b/daemons/controld/controld_te_callbacks.c @@ -225,12 +225,12 @@ process_resource_updates(const char *node, xmlNode *xml, xmlNode *change, return; } - if (strcmp(TYPE(xml), XML_CIB_TAG_LRM) == 0) { + if (pcmk__xe_is(xml, XML_CIB_TAG_LRM)) { xml = first_named_child(xml, XML_LRM_TAG_RESOURCES); CRM_CHECK(xml != NULL, return); } - CRM_CHECK(strcmp(TYPE(xml), XML_LRM_TAG_RESOURCES) == 0, return); + CRM_CHECK(pcmk__xe_is(xml, XML_LRM_TAG_RESOURCES), return); /* * Updates by, or in response to, TE actions will never contain updates @@ -558,7 +558,7 @@ te_update_diff(const char *event, xmlNode * msg) p_del[0], p_del[1], p_del[2], p_add[0], p_add[1], p_add[2], fsa_state2string(controld_globals.fsa_state)); - crm_element_value_int(diff, "format", &format); + crm_element_value_int(diff, PCMK_XA_FORMAT, &format); switch (format) { case 1: te_update_diff_v1(event, diff); diff --git a/daemons/controld/controld_te_events.c b/daemons/controld/controld_te_events.c index d4e2b0f..28977c0 100644 --- a/daemons/controld/controld_te_events.c +++ b/daemons/controld/controld_te_events.c @@ -111,7 +111,7 @@ fail_incompletable_actions(pcmk__graph_t *graph, const char *down_node) } else if (action->type == pcmk__cluster_graph_action) { const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); - if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) { + if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { continue; } } @@ -196,16 +196,16 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, /* Decide whether update is necessary and what value to use */ if ((interval_ms > 0) - || pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_none) - || pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_none)) { + || pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_none) + || pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_none)) { do_update = TRUE; - } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_none)) { + } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_none)) { do_update = TRUE; value = pcmk__s(controld_globals.transition_graph->failed_start_offset, CRM_INFINITY_S); - } else if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_none)) { + } else if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_none)) { do_update = TRUE; value = pcmk__s(controld_globals.transition_graph->failed_stop_offset, CRM_INFINITY_S); @@ -314,7 +314,7 @@ get_cancel_action(const char *id, const char *node) pcmk__graph_action_t *action = (pcmk__graph_action_t *) gIter2->data; task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); - if (!pcmk__str_eq(CRMD_ACTION_CANCEL, task, pcmk__str_casei)) { + if (!pcmk__str_eq(PCMK_ACTION_CANCEL, task, pcmk__str_casei)) { continue; } diff --git a/daemons/controld/controld_te_utils.c b/daemons/controld/controld_te_utils.c index ecbc0b2..5a9f029 100644 --- a/daemons/controld/controld_te_utils.c +++ b/daemons/controld/controld_te_utils.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -17,6 +17,8 @@ //! Triggers transition graph processing static crm_trigger_t *transition_trigger = NULL; +static GHashTable *node_pending_timers = NULL; + gboolean stop_te_timer(pcmk__graph_action_t *action) { @@ -132,11 +134,13 @@ static struct abort_timer_s { static gboolean abort_timer_popped(gpointer data) { - if (AM_I_DC && (abort_timer.aborted == FALSE)) { - abort_transition(abort_timer.priority, abort_timer.action, - abort_timer.text, NULL); + struct abort_timer_s *abort_timer = (struct abort_timer_s *) data; + + if (AM_I_DC && (abort_timer->aborted == FALSE)) { + abort_transition(abort_timer->priority, abort_timer->action, + abort_timer->text, NULL); } - abort_timer.id = 0; + abort_timer->id = 0; return FALSE; // do not immediately reschedule timer } @@ -158,7 +162,143 @@ abort_after_delay(int abort_priority, enum pcmk__graph_next abort_action, abort_timer.priority = abort_priority; abort_timer.action = abort_action; abort_timer.text = abort_text; - abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, NULL); + abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, &abort_timer); +} + +static void +free_node_pending_timer(gpointer data) +{ + struct abort_timer_s *node_pending_timer = (struct abort_timer_s *) data; + + if (node_pending_timer->id != 0) { + g_source_remove(node_pending_timer->id); + node_pending_timer->id = 0; + } + + free(node_pending_timer); +} + +static gboolean +node_pending_timer_popped(gpointer key) +{ + struct abort_timer_s *node_pending_timer = NULL; + + if (node_pending_timers == NULL) { + return FALSE; + } + + node_pending_timer = g_hash_table_lookup(node_pending_timers, key); + if (node_pending_timer == NULL) { + return FALSE; + } + + crm_warn("Node with id '%s' pending timed out (%us) on joining the process " + "group", + (const char *) key, controld_globals.node_pending_timeout); + + if (controld_globals.node_pending_timeout > 0) { + abort_timer_popped(node_pending_timer); + } + + g_hash_table_remove(node_pending_timers, key); + + return FALSE; // do not reschedule timer +} + +static void +init_node_pending_timer(const crm_node_t *node, guint timeout) +{ + struct abort_timer_s *node_pending_timer = NULL; + char *key = NULL; + + if (node->uuid == NULL) { + return; + } + + if (node_pending_timers == NULL) { + node_pending_timers = pcmk__strikey_table(free, + free_node_pending_timer); + + // The timer is somehow already existing + } else if (g_hash_table_lookup(node_pending_timers, node->uuid) != NULL) { + return; + } + + crm_notice("Waiting for pending %s with id '%s' to join the process " + "group (timeout=%us)", + node->uname ? node->uname : "node", node->uuid, + controld_globals.node_pending_timeout); + + node_pending_timer = calloc(1, sizeof(struct abort_timer_s)); + CRM_ASSERT(node_pending_timer != NULL); + + node_pending_timer->aborted = FALSE; + node_pending_timer->priority = INFINITY; + node_pending_timer->action = pcmk__graph_restart; + node_pending_timer->text = "Node pending timed out"; + + key = strdup(node->uuid); + CRM_ASSERT(key != NULL); + + g_hash_table_replace(node_pending_timers, key, node_pending_timer); + + node_pending_timer->id = g_timeout_add_seconds(timeout, + node_pending_timer_popped, + key); + CRM_ASSERT(node_pending_timer->id != 0); +} + +static void +remove_node_pending_timer(const char *node_uuid) +{ + if (node_pending_timers == NULL) { + return; + } + + g_hash_table_remove(node_pending_timers, node_uuid); +} + +void +controld_node_pending_timer(const crm_node_t *node) +{ + long long remaining_timeout = 0; + + /* If the node is not an active cluster node, is leaving the cluster, or is + * already part of CPG, or node-pending-timeout is disabled, free any + * node pending timer for it. + */ + if (pcmk_is_set(node->flags, crm_remote_node) + || (node->when_member <= 1) || (node->when_online > 0) + || (controld_globals.node_pending_timeout == 0)) { + remove_node_pending_timer(node->uuid); + return; + } + + // Node is a cluster member but offline in CPG + + remaining_timeout = node->when_member - time(NULL) + + controld_globals.node_pending_timeout; + + /* It already passed node pending timeout somehow. + * Free any node pending timer of it. + */ + if (remaining_timeout <= 0) { + remove_node_pending_timer(node->uuid); + return; + } + + init_node_pending_timer(node, remaining_timeout); +} + +void +controld_free_node_pending_timers(void) +{ + if (node_pending_timers == NULL) { + return; + } + + g_hash_table_destroy(node_pending_timers); + node_pending_timers = NULL; } static const char * @@ -246,7 +386,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, const xmlNode *search = NULL; for(search = reason; search; search = search->parent) { - if (pcmk__str_eq(XML_TAG_DIFF, TYPE(search), pcmk__str_casei)) { + if (pcmk__xe_is(search, XML_TAG_DIFF)) { diff = search; break; } @@ -255,7 +395,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, if(diff) { xml_patch_versions(diff, add, del); for(search = reason; search; search = search->parent) { - if (pcmk__str_eq(XML_DIFF_CHANGE, TYPE(search), pcmk__str_casei)) { + if (pcmk__xe_is(search, XML_DIFF_CHANGE)) { change = search; break; } @@ -276,14 +416,13 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, do_crm_log(level, "Transition %d aborted by %s.%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", - controld_globals.transition_graph->id, TYPE(reason), + controld_globals.transition_graph->id, reason->name, ID(reason), abort_text, add[0], add[1], add[2], fn, line, (const char *) local_path->str, pcmk__btoa(controld_globals.transition_graph->complete)); g_string_free(local_path, TRUE); } else { - const char *kind = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *path = crm_element_value(change, XML_DIFF_PATH); @@ -297,9 +436,9 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, reason = reason->children; } } + CRM_CHECK(reason != NULL, goto done); } - kind = TYPE(reason); if(strcmp(op, "delete") == 0) { const char *shortpath = strrchr(path, '/'); @@ -310,7 +449,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); - } else if (pcmk__str_eq(XML_CIB_TAG_NVPAIR, kind, pcmk__str_none)) { + } else if (pcmk__xe_is(reason, XML_CIB_TAG_NVPAIR)) { do_crm_log(level, "Transition %d aborted by %s doing %s %s=%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, @@ -320,7 +459,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, abort_text, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); - } else if (pcmk__str_eq(XML_LRM_TAG_RSC_OP, kind, pcmk__str_none)) { + } else if (pcmk__xe_is(reason, XML_LRM_TAG_RSC_OP)) { const char *magic = crm_element_value(reason, XML_ATTR_TRANSITION_MAGIC); do_crm_log(level, "Transition %d aborted by operation %s '%s' on %s: %s " @@ -331,14 +470,15 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, magic, add[0], add[1], add[2], fn, line, pcmk__btoa(controld_globals.transition_graph->complete)); - } else if (pcmk__str_any_of(kind, XML_CIB_TAG_STATE, XML_CIB_TAG_NODE, NULL)) { + } else if (pcmk__str_any_of((const char *) reason->name, + XML_CIB_TAG_STATE, XML_CIB_TAG_NODE, NULL)) { const char *uname = crm_peer_uname(ID(reason)); do_crm_log(level, "Transition %d aborted by %s '%s' on %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d complete=%s", controld_globals.transition_graph->id, - kind, op, (uname? uname : ID(reason)), abort_text, - add[0], add[1], add[2], fn, line, + reason->name, op, pcmk__s(uname, ID(reason)), + abort_text, add[0], add[1], add[2], fn, line, pcmk__btoa(controld_globals.transition_graph->complete)); } else { @@ -347,12 +487,13 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, do_crm_log(level, "Transition %d aborted by %s.%s '%s': %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, - TYPE(reason), (id? id : ""), (op? op : "change"), + reason->name, pcmk__s(id, ""), pcmk__s(op, "change"), abort_text, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); } } +done: if (controld_globals.transition_graph->complete) { if (controld_get_period_transition_timer() > 0) { controld_stop_transition_timer(); diff --git a/daemons/controld/controld_throttle.c b/daemons/controld/controld_throttle.c index 5b7f9c0..a4775e5 100644 --- a/daemons/controld/controld_throttle.c +++ b/daemons/controld/controld_throttle.c @@ -154,7 +154,7 @@ throttle_cib_load(float *load) if(stream == NULL) { int rc = errno; - crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_strerror(rc), rc); + crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_rc_str(rc), rc); free(loadfile); loadfile = NULL; return FALSE; } @@ -220,7 +220,7 @@ throttle_load_avg(float *load) stream = fopen(loadfile, "r"); if(stream == NULL) { int rc = errno; - crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_strerror(rc), rc); + crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_rc_str(rc), rc); return FALSE; } @@ -407,7 +407,7 @@ static void throttle_update_job_max(const char *preference) { long long max = 0LL; - const char *env_limit = getenv("PCMK_node_action_limit"); + const char *env_limit = pcmk__env_option(PCMK__ENV_NODE_ACTION_LIMIT); if (env_limit != NULL) { preference = env_limit; // Per-node override diff --git a/daemons/controld/controld_transition.c b/daemons/controld/controld_transition.c index c8a342c..897c6d3 100644 --- a/daemons/controld/controld_transition.c +++ b/daemons/controld/controld_transition.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -15,11 +15,6 @@ #include -static void -global_cib_callback(const xmlNode * msg, int callid, int rc, xmlNode * output) -{ -} - static pcmk__graph_t * create_blank_graph(void) { @@ -82,12 +77,6 @@ do_te_control(long long action, crm_err("Could not set CIB notification callback"); init_ok = FALSE; } - - if (cib_conn->cmds->set_op_callback(cib_conn, - global_cib_callback) != pcmk_ok) { - crm_err("Could not set CIB global callback"); - init_ok = FALSE; - } } if (init_ok) { diff --git a/daemons/controld/controld_transition.h b/daemons/controld/controld_transition.h index 2da4221..0655bd9 100644 --- a/daemons/controld/controld_transition.h +++ b/daemons/controld/controld_transition.h @@ -48,6 +48,8 @@ void controld_destroy_transition_trigger(void); void controld_trigger_graph_as(const char *fn, int line); void abort_after_delay(int abort_priority, enum pcmk__graph_next abort_action, const char *abort_text, guint delay_ms); +void controld_node_pending_timer(const crm_node_t *node); +void controld_free_node_pending_timers(void); void abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, const char *abort_text, const xmlNode *reason, diff --git a/daemons/controld/controld_utils.c b/daemons/controld/controld_utils.c index 4ce09d9..9b306ee 100644 --- a/daemons/controld/controld_utils.c +++ b/daemons/controld/controld_utils.c @@ -828,7 +828,7 @@ get_node_id(xmlNode *lrm_rsc_op) { xmlNode *node = lrm_rsc_op; - while (node != NULL && !pcmk__str_eq(XML_CIB_TAG_STATE, TYPE(node), pcmk__str_casei)) { + while ((node != NULL) && !pcmk__xe_is(node, XML_CIB_TAG_STATE)) { node = node->parent; } diff --git a/daemons/controld/pacemaker-controld.c b/daemons/controld/pacemaker-controld.c index 5858898..e4a72c2 100644 --- a/daemons/controld/pacemaker-controld.c +++ b/daemons/controld/pacemaker-controld.c @@ -112,7 +112,7 @@ main(int argc, char **argv) goto done; } - if (crm_ipc_connect(old_instance)) { + if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) { /* IPC end-point already up */ crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); diff --git a/daemons/controld/pacemaker-controld.h b/daemons/controld/pacemaker-controld.h index 1484a00..2334cce 100644 --- a/daemons/controld/pacemaker-controld.h +++ b/daemons/controld/pacemaker-controld.h @@ -36,4 +36,7 @@ void controld_remove_voter(const char *uname); void controld_election_fini(void); void controld_stop_current_election_timeout(void); +void set_join_state(const char *start_state, const char *node_name, + const char *node_uuid, bool remote); + #endif diff --git a/daemons/execd/Makefile.am b/daemons/execd/Makefile.am index 466f0df..ab8544f 100644 --- a/daemons/execd/Makefile.am +++ b/daemons/execd/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2012-2021 the Pacemaker project contributors +# Copyright 2012-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -12,18 +12,20 @@ include $(top_srcdir)/mk/man.mk halibdir = $(CRM_DAEMON_DIR) -halib_PROGRAMS = pacemaker-execd cts-exec-helper +halib_PROGRAMS = pacemaker-execd \ + cts-exec-helper EXTRA_DIST = pacemaker-remoted.8.inc pacemaker_execd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_execd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemaker_execd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/services/libcrmservice.la \ - $(top_builddir)/lib/fencing/libstonithd.la -pacemaker_execd_SOURCES = pacemaker-execd.c execd_commands.c \ - execd_alerts.c +pacemaker_execd_LDADD = $(top_builddir)/lib/fencing/libstonithd.la +pacemaker_execd_LDADD += $(top_builddir)/lib/services/libcrmservice.la +pacemaker_execd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +pacemaker_execd_SOURCES = pacemaker-execd.c \ + execd_commands.c \ + execd_alerts.c if BUILD_REMOTE sbin_PROGRAMS = pacemaker-remoted @@ -34,22 +36,27 @@ initdir = $(INITDIR) init_SCRIPTS = pacemaker_remote endif -pacemaker_remoted_CPPFLAGS = -DPCMK__COMPILE_REMOTE $(AM_CPPFLAGS) +pacemaker_remoted_CPPFLAGS = -DPCMK__COMPILE_REMOTE \ + $(AM_CPPFLAGS) pacemaker_remoted_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_remoted_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemaker_remoted_LDADD = $(pacemaker_execd_LDADD) \ - $(top_builddir)/lib/lrmd/liblrmd.la -pacemaker_remoted_SOURCES = $(pacemaker_execd_SOURCES) \ - remoted_tls.c remoted_pidone.c remoted_proxy.c +pacemaker_remoted_LDADD = $(top_builddir)/lib/fencing/libstonithd.la +pacemaker_remoted_LDADD += $(top_builddir)/lib/services/libcrmservice.la +pacemaker_remoted_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la +pacemaker_remoted_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +pacemaker_remoted_SOURCES = $(pacemaker_execd_SOURCES) \ + remoted_tls.c \ + remoted_pidone.c \ + remoted_proxy.c endif -cts_exec_helper_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/lrmd/liblrmd.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/services/libcrmservice.la \ - $(top_builddir)/lib/pengine/libpe_status.la +cts_exec_helper_LDADD = $(top_builddir)/lib/pengine/libpe_status.la +cts_exec_helper_LDADD += $(top_builddir)/lib/cib/libcib.la +cts_exec_helper_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la +cts_exec_helper_LDADD += $(top_builddir)/lib/services/libcrmservice.la +cts_exec_helper_LDADD += $(top_builddir)/lib/common/libcrmcommon.la cts_exec_helper_SOURCES = cts-exec-helper.c noinst_HEADERS = pacemaker-execd.h @@ -59,6 +66,7 @@ CLEANFILES = $(man8_MANS) # Always create a symlink for the old pacemaker_remoted name, so that bundle # container images using a current Pacemaker will run on cluster nodes running # Pacemaker 1 (>=1.1.17). +.PHONY: install-exec-hook install-exec-hook: if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd && $(LN_S) pacemaker-execd lrmd @@ -67,6 +75,7 @@ if BUILD_REMOTE cd $(DESTDIR)$(sbindir) && rm -f pacemaker_remoted && $(LN_S) pacemaker-remoted pacemaker_remoted endif +.PHONY: uninstall-hook uninstall-hook: if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c index 2af5e16..6ebbedf 100644 --- a/daemons/execd/cts-exec-helper.c +++ b/daemons/execd/cts-exec-helper.c @@ -443,9 +443,9 @@ static int generate_params(void) { int rc = pcmk_rc_ok; - pe_working_set_t *data_set = NULL; + pcmk_scheduler_t *scheduler = NULL; xmlNode *cib_xml_copy = NULL; - pe_resource_t *rsc = NULL; + pcmk_resource_t *rsc = NULL; GHashTable *params = NULL; GHashTable *meta = NULL; GHashTableIter iter; @@ -467,27 +467,29 @@ generate_params(void) } // Calculate cluster status - data_set = pe_new_working_set(); - if (data_set == NULL) { - crm_crit("Could not allocate working set"); + scheduler = pe_new_working_set(); + if (scheduler == NULL) { + crm_crit("Could not allocate scheduler data"); return ENOMEM; } - pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); - data_set->input = cib_xml_copy; - data_set->now = crm_time_new(NULL); - cluster_status(data_set); + pe__set_working_set_flags(scheduler, + pcmk_sched_no_counts|pcmk_sched_no_compat); + scheduler->input = cib_xml_copy; + scheduler->now = crm_time_new(NULL); + cluster_status(scheduler); // Find resource in CIB - rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id, - pe_find_renamed|pe_find_any); + rsc = pe_find_resource_with_flags(scheduler->resources, options.rsc_id, + pcmk_rsc_match_history + |pcmk_rsc_match_basename); if (rsc == NULL) { crm_err("Resource does not exist in config"); - pe_free_working_set(data_set); + pe_free_working_set(scheduler); return EINVAL; } // Add resource instance parameters to options.params - params = pe_rsc_params(rsc, NULL, data_set); + params = pe_rsc_params(rsc, NULL, scheduler); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) &key, @@ -498,7 +500,7 @@ generate_params(void) // Add resource meta-attributes to options.params meta = pcmk__strkey_table(free, free); - get_meta_attributes(meta, rsc, NULL, data_set); + get_meta_attributes(meta, rsc, NULL, scheduler); g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { @@ -509,7 +511,7 @@ generate_params(void) } g_hash_table_destroy(meta); - pe_free_working_set(data_set); + pe_free_working_set(scheduler); return rc; } @@ -587,7 +589,7 @@ main(int argc, char **argv) goto done; } options.api_call = "exec"; - options.action = "monitor"; + options.action = PCMK_ACTION_MONITOR; options.exec_call_opts = lrmd_opt_notify_orig_only; } diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c index fa2761e..cf4503a 100644 --- a/daemons/execd/execd_commands.c +++ b/daemons/execd/execd_commands.c @@ -213,7 +213,7 @@ log_finished(const lrmd_cmd_t *cmd, int exec_time_ms, int queue_time_ms) int log_level = LOG_INFO; GString *str = g_string_sized_new(100); // reasonable starting size - if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { log_level = LOG_DEBUG; } @@ -253,7 +253,7 @@ log_execute(lrmd_cmd_t * cmd) { int log_level = LOG_INFO; - if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { log_level = LOG_DEBUG; } @@ -264,9 +264,9 @@ log_execute(lrmd_cmd_t * cmd) static const char * normalize_action_name(lrmd_rsc_t * rsc, const char *action) { - if (pcmk__str_eq(action, "monitor", pcmk__str_casei) && + if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_casei) && pcmk_is_set(pcmk_get_ra_caps(rsc->class), pcmk_ra_cap_status)) { - return "status"; + return PCMK_ACTION_STATUS; } return action; } @@ -517,7 +517,7 @@ schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) /* The controller expects the executor to automatically cancel * recurring operations before a resource stops. */ - if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { + if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { cancel_all_recurring(rsc, NULL); } @@ -844,7 +844,8 @@ action_complete(svc_action_t * action) if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { if (pcmk__result_ok(&(cmd->result)) - && pcmk__strcase_any_of(cmd->action, "start", "stop", NULL)) { + && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START, + PCMK_ACTION_STOP, NULL)) { /* systemd returns from start and stop actions after the action * begins, not after it completes. We have to jump through a few * hoops so that we don't report 'complete' to the rest of pacemaker @@ -852,7 +853,7 @@ action_complete(svc_action_t * action) */ goagain = true; cmd->real_action = cmd->action; - cmd->action = strdup("monitor"); + cmd->action = strdup(PCMK_ACTION_MONITOR); } else if (cmd->real_action != NULL) { // This is follow-up monitor to check whether start/stop completed @@ -860,7 +861,8 @@ action_complete(svc_action_t * action) goagain = true; } else if (pcmk__result_ok(&(cmd->result)) - && pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) { + && pcmk__str_eq(cmd->real_action, PCMK_ACTION_STOP, + pcmk__str_casei)) { goagain = true; } else { @@ -878,9 +880,11 @@ action_complete(svc_action_t * action) if ((cmd->result.execution_status == PCMK_EXEC_DONE) && (cmd->result.exit_status == PCMK_OCF_NOT_RUNNING)) { - if (pcmk__str_eq(cmd->real_action, "start", pcmk__str_casei)) { + if (pcmk__str_eq(cmd->real_action, PCMK_ACTION_START, + pcmk__str_casei)) { cmd->result.exit_status = PCMK_OCF_UNKNOWN_ERROR; - } else if (pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) { + } else if (pcmk__str_eq(cmd->real_action, PCMK_ACTION_STOP, + pcmk__str_casei)) { cmd->result.exit_status = PCMK_OCF_OK; } } @@ -891,12 +895,12 @@ action_complete(svc_action_t * action) #if SUPPORT_NAGIOS if (rsc && pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) { - if (action_matches(cmd, "monitor", 0) + if (action_matches(cmd, PCMK_ACTION_MONITOR, 0) && pcmk__result_ok(&(cmd->result))) { /* Successfully executed --version for the nagios plugin */ cmd->result.exit_status = PCMK_OCF_NOT_RUNNING; - } else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei) + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei) && !pcmk__result_ok(&(cmd->result))) { #ifdef PCMK__TIME_USE_CGT goagain = true; @@ -1007,11 +1011,11 @@ stonith_action_complete(lrmd_cmd_t *cmd, int exit_status, /* This should be possible only for probes in practice, but * interpret for all actions to be safe. */ - if (pcmk__str_eq(cmd->action, CRMD_ACTION_STATUS, + if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_none)) { exit_status = PCMK_OCF_NOT_RUNNING; - } else if (pcmk__str_eq(cmd->action, CRMD_ACTION_STOP, + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) { exit_status = PCMK_OCF_OK; @@ -1035,11 +1039,12 @@ stonith_action_complete(lrmd_cmd_t *cmd, int exit_status, // Certain successful actions change the known state of the resource if ((rsc != NULL) && pcmk__result_ok(&(cmd->result))) { - if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) { + if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) { pcmk__set_result(&rsc->fence_probe_result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); // "running" - } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, + pcmk__str_casei)) { pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); // "not running" } @@ -1235,7 +1240,7 @@ execute_stonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) stonith_t *stonith_api = get_stonith_connection(); - if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei) + if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei) && (cmd->interval_ms == 0)) { // Probes don't require a fencer connection stonith_action_complete(cmd, rsc->fence_probe_result.exit_status, @@ -1249,16 +1254,17 @@ execute_stonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) "No connection to fencer"); return; - } else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) { + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) { rc = execd_stonith_start(stonith_api, rsc, cmd); if (rc == pcmk_ok) { do_monitor = TRUE; } - } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { rc = execd_stonith_stop(stonith_api, rsc); - } else if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { + } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, + pcmk__str_casei)) { do_monitor = TRUE; } else { @@ -1297,7 +1303,7 @@ execute_nonstonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) #if SUPPORT_NAGIOS /* Recurring operations are cancelled anyway for a stop operation */ if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei) - && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { + && pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { cmd->result.exit_status = PCMK_OCF_OK; cmd_finalize(cmd, rsc); @@ -1474,6 +1480,7 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id, int rc = pcmk_ok; time_t now = time(NULL); const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION); + const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE); if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) { crm_err("Cluster API version must be greater than or equal to %s, not %s", @@ -1503,6 +1510,10 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id, crm_xml_add(*reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); crm_xml_add_ll(*reply, PCMK__XA_UPTIME, now - start_time); + if (start_state) { + crm_xml_add(*reply, PCMK__XA_NODE_START_STATE, start_state); + } + return rc; } diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c index 83a8cd7..e7e30eb 100644 --- a/daemons/execd/pacemaker-execd.c +++ b/daemons/execd/pacemaker-execd.c @@ -493,26 +493,28 @@ main(int argc, char **argv, char **envp) pcmk__cli_init_logging(EXECD_NAME, args->verbosity); crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); + // ocf_log() (in resource-agents) uses the capitalized env options below option = pcmk__env_option(PCMK__ENV_LOGFACILITY); if (!pcmk__str_eq(option, PCMK__VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches) && !pcmk__str_eq(option, "/dev/null", pcmk__str_none)) { - setenv("HA_LOGFACILITY", option, 1); /* Used by the ocf_log/ha_log OCF macro */ + + pcmk__set_env_option("LOGFACILITY", option, true); } option = pcmk__env_option(PCMK__ENV_LOGFILE); if (!pcmk__str_eq(option, PCMK__VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches)) { - setenv("HA_LOGFILE", option, 1); /* Used by the ocf_log/ha_log OCF macro */ + pcmk__set_env_option("LOGFILE", option, true); if (pcmk__env_option_enabled(crm_system_name, PCMK__ENV_DEBUG)) { - setenv("HA_DEBUGLOG", option, 1); /* Used by the ocf_log/ha_debug OCF macro */ + pcmk__set_env_option("DEBUGLOG", option, true); } } #ifdef PCMK__COMPILE_REMOTE if (options.port != NULL) { - setenv("PCMK_remote_port", options.port, 1); + pcmk__set_env_option(PCMK__ENV_REMOTE_PORT, options.port, false); } #endif // PCMK__COMPILE_REMOTE diff --git a/daemons/execd/remoted_pidone.c b/daemons/execd/remoted_pidone.c index 4f914eb..08271bf 100644 --- a/daemons/execd/remoted_pidone.c +++ b/daemons/execd/remoted_pidone.c @@ -1,5 +1,5 @@ /* - * Copyright 2017-2020 the Pacemaker project contributors + * Copyright 2017-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -203,10 +203,14 @@ remoted_spawn_pidone(int argc, char **argv, char **envp) * from /etc/pacemaker/pcmk-init.env, which could be useful for testing or * containers with a custom PID 1 script that launches pacemaker-remoted. */ - const char *pid1 = (getpid() == 1)? "full" : getenv("PCMK_remote_pid1"); + const char *pid1 = "default"; - if (pid1 == NULL) { - return; + if (getpid() != 1) { + pid1 = pcmk__env_option(PCMK__ENV_REMOTE_PID1); + if (!pcmk__str_any_of(pid1, "full", "vars", NULL)) { + // Default, unset, or invalid + return; + } } /* When a container is launched, it may be given specific environment @@ -217,7 +221,7 @@ remoted_spawn_pidone(int argc, char **argv, char **envp) */ load_env_vars("/etc/pacemaker/pcmk-init.env"); - if (strcmp(pid1, "full")) { + if (strcmp(pid1, "vars") == 0) { return; } @@ -226,7 +230,7 @@ remoted_spawn_pidone(int argc, char **argv, char **envp) * explicitly configured in the container's environment. */ if (pcmk__env_option(PCMK__ENV_LOGFILE) == NULL) { - pcmk__set_env_option(PCMK__ENV_LOGFILE, "/var/log/pcmk-init.log"); + pcmk__set_env_option(PCMK__ENV_LOGFILE, "/var/log/pcmk-init.log", true); } sigfillset(&set); @@ -242,7 +246,7 @@ remoted_spawn_pidone(int argc, char **argv, char **envp) // Child remains as pacemaker-remoted return; case -1: - perror("fork"); + crm_err("fork failed: %s", pcmk_rc_str(errno)); } /* Parent becomes the reaper of zombie processes */ diff --git a/daemons/execd/remoted_tls.c b/daemons/execd/remoted_tls.c index c65e3f3..23a2dcf 100644 --- a/daemons/execd/remoted_tls.c +++ b/daemons/execd/remoted_tls.c @@ -273,39 +273,44 @@ bind_and_listen(struct addrinfo *addr) fd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol); if (fd < 0) { - crm_perror(LOG_ERR, "Listener socket creation failed"); - return -1; + rc = errno; + crm_err("Listener socket creation failed: %", pcmk_rc_str(rc)); + return -rc; } /* reuse address */ optval = 1; rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { - crm_perror(LOG_ERR, "Local address reuse not allowed on %s", buffer); + rc = errno; + crm_err("Local address reuse not allowed on %s: %s", buffer, pcmk_rc_str(rc)); close(fd); - return -1; + return -rc; } if (addr->ai_family == AF_INET6) { optval = 0; rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &optval, sizeof(optval)); if (rc < 0) { - crm_perror(LOG_INFO, "Couldn't disable IPV6-only on %s", buffer); + rc = errno; + crm_err("Couldn't disable IPV6-only on %s: %s", buffer, pcmk_rc_str(rc)); close(fd); - return -1; + return -rc; } } if (bind(fd, addr->ai_addr, addr->ai_addrlen) != 0) { - crm_perror(LOG_ERR, "Cannot bind to %s", buffer); + rc = errno; + crm_err("Cannot bind to %s: %s", buffer, pcmk_rc_str(rc)); close(fd); - return -1; + return -rc; } if (listen(fd, 10) == -1) { - crm_perror(LOG_ERR, "Cannot listen on %s", buffer); + rc = errno; + crm_err("Cannot listen on %s: %s", buffer, pcmk_rc_str(rc)); close(fd); - return -1; + return -rc; } return fd; } @@ -325,12 +330,15 @@ get_address_info(const char *bind_name, int port, struct addrinfo **res) snprintf(port_str, sizeof(port_str), "%d", port); rc = getaddrinfo(bind_name, port_str, &hints, res); - if (rc) { + rc = pcmk__gaierror2rc(rc); + + if (rc != pcmk_rc_ok) { crm_err("Unable to get IP address(es) for %s: %s", - (bind_name? bind_name : "local node"), gai_strerror(rc)); - return -EADDRNOTAVAIL; + (bind_name? bind_name : "local node"), pcmk_rc_str(rc)); + return rc; } - return pcmk_ok; + + return pcmk_rc_ok; } int @@ -340,7 +348,7 @@ lrmd_init_remote_tls_server(void) int port = crm_default_remote_port(); struct addrinfo *res = NULL, *iter; gnutls_datum_t psk_key = { NULL, 0 }; - const char *bind_name = getenv("PCMK_remote_address"); + const char *bind_name = pcmk__env_option(PCMK__ENV_REMOTE_ADDRESS); static struct mainloop_fd_callbacks remote_listen_fd_callbacks = { .dispatch = lrmd_remote_listen, @@ -371,7 +379,7 @@ lrmd_init_remote_tls_server(void) } gnutls_free(psk_key.data); - if (get_address_info(bind_name, port, &res) != pcmk_ok) { + if (get_address_info(bind_name, port, &res) != pcmk_rc_ok) { return -1; } @@ -391,7 +399,7 @@ lrmd_init_remote_tls_server(void) if (iter->ai_family == filter) { ssock = bind_and_listen(iter); } - if (ssock != -1) { + if (ssock >= 0) { break; } diff --git a/daemons/fenced/Makefile.am b/daemons/fenced/Makefile.am index 2ca0088..62aa864 100644 --- a/daemons/fenced/Makefile.am +++ b/daemons/fenced/Makefile.am @@ -14,7 +14,8 @@ include $(top_srcdir)/mk/man.mk halibdir = $(CRM_DAEMON_DIR) -halib_PROGRAMS = pacemaker-fenced cts-fence-helper +halib_PROGRAMS = pacemaker-fenced \ + cts-fence-helper noinst_HEADERS = pacemaker-fenced.h @@ -23,30 +24,36 @@ man7_MANS = pacemaker-fenced.7 endif cts_fence_helper_SOURCES = cts-fence-helper.c -cts_fence_helper_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/fencing/libstonithd.la +cts_fence_helper_LDADD = $(top_builddir)/lib/fencing/libstonithd.la +cts_fence_helper_LDADD += $(top_builddir)/lib/common/libcrmcommon.la pacemaker_fenced_YFLAGS = -d pacemaker_fenced_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_fenced_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemaker_fenced_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/cluster/libcrmcluster.la \ - $(top_builddir)/lib/fencing/libstonithd.la \ - $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(CLUSTERLIBS) -pacemaker_fenced_SOURCES = pacemaker-fenced.c \ - fenced_commands.c \ - fenced_remote.c \ + +pacemaker_fenced_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +pacemaker_fenced_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +pacemaker_fenced_LDADD += $(top_builddir)/lib/cib/libcib.la +pacemaker_fenced_LDADD += $(top_builddir)/lib/cluster/libcrmcluster.la +pacemaker_fenced_LDADD += $(top_builddir)/lib/fencing/libstonithd.la +pacemaker_fenced_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +pacemaker_fenced_LDADD += $(CLUSTERLIBS) + +pacemaker_fenced_SOURCES = pacemaker-fenced.c \ + fenced_cib.c \ + fenced_commands.c \ + fenced_remote.c \ + fenced_scheduler.c \ fenced_history.c CLEANFILES = $(man7_MANS) $(man8_MANS) if BUILD_LEGACY_LINKS +.PHONY: install-exec-hook install-exec-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f stonithd && $(LN_S) pacemaker-fenced stonithd +.PHONY: uninstall-hook uninstall-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f stonithd endif diff --git a/daemons/fenced/cts-fence-helper.c b/daemons/fenced/cts-fence-helper.c index e18a1f4..07bd500 100644 --- a/daemons/fenced/cts-fence-helper.c +++ b/daemons/fenced/cts-fence-helper.c @@ -212,10 +212,12 @@ run_fence_failure_test(void) cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params), "Register device1 for failure test", 1, 0); - single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0), + single_test(st->cmds->fence(st, st_opts, "false_1_node2", PCMK_ACTION_OFF, + 3, 0), "Fence failure results off", 1, -ENODATA); - single_test(st->cmds->fence(st, st_opts, "false_1_node2", "reboot", 3, 0), + single_test(st->cmds->fence(st, st_opts, "false_1_node2", + PCMK_ACTION_REBOOT, 3, 0), "Fence failure results reboot", 1, -ENODATA); single_test(st->cmds->remove_device(st, st_opts, "test-id1"), @@ -246,11 +248,13 @@ run_fence_failure_rollover_test(void) cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_dummy", params), "Register device2 for rollover test", 1, 0); - single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0), + single_test(st->cmds->fence(st, st_opts, "false_1_node2", PCMK_ACTION_OFF, + 3, 0), "Fence rollover results off", 1, 0); /* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */ - single_test(st->cmds->fence(st, st_opts, "false_1_node2", "on", 3, 0), + single_test(st->cmds->fence(st, st_opts, "false_1_node2", PCMK_ACTION_ON, 3, + 0), "Fence rollover results on", 1, -ENODEV); single_test(st->cmds->remove_device(st, st_opts, "test-id1"), @@ -278,7 +282,8 @@ run_standard_test(void) stonith_key_value_freeall(params, 1, 1); params = NULL; - single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), "list", 1, 0); + single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), + PCMK_ACTION_LIST, 1, 0); single_test(st->cmds->monitor(st, st_opts, "test-id", 1), "Monitor", 1, 0); @@ -288,14 +293,17 @@ run_standard_test(void) single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node1", 1), "Status false_1_node1", 1, 0); - single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1, 0), + single_test(st->cmds->fence(st, st_opts, "unknown-host", PCMK_ACTION_OFF, + 1, 0), "Fence unknown-host (expected failure)", 0, -ENODEV); - single_test(st->cmds->fence(st, st_opts, "false_1_node1", "off", 1, 0), + single_test(st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_OFF, + 1, 0), "Fence false_1_node1", 1, 0); /* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */ - single_test(st->cmds->fence(st, st_opts, "false_1_node1", "on", 1, 0), + single_test(st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_ON, 1, + 0), "Unfence false_1_node1", 1, -ENODEV); /* Confirm that an invalid level index is rejected */ @@ -362,31 +370,31 @@ standard_dev_test(void) rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); - rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60, 0); + rc = st->cmds->fence(st, st_opts, "unknown-host", PCMK_ACTION_OFF, 60, 0); crm_debug("Fence unknown-host: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); - rc = st->cmds->fence(st, st_opts, "false_1_node1", "off", 60, 0); + rc = st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_OFF, 60, 0); crm_debug("Fence false_1_node1: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); - rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0); + rc = st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_ON, 10, 0); crm_debug("Unfence false_1_node1: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10); crm_debug("Status false_1_node1: %d", rc); - rc = st->cmds->fence(st, st_opts, "some-host", "off", 10, 0); + rc = st->cmds->fence(st, st_opts, "some-host", PCMK_ACTION_OFF, 10, 0); crm_debug("Fence alias: %d", rc); rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10); crm_debug("Status alias: %d", rc); - rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0); + rc = st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_ON, 10, 0); crm_debug("Unfence false_1_node1: %d", rc); rc = st->cmds->remove_device(st, st_opts, "test-id"); @@ -426,7 +434,8 @@ test_async_fence_pass(int check_event) return; } - rc = st->cmds->fence(st, 0, "true_1_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0); + rc = st->cmds->fence(st, 0, "true_1_node1", PCMK_ACTION_OFF, + MAINLOOP_DEFAULT_TIMEOUT, 0); if (rc < 0) { crm_err("fence failed with rc %d", rc); mainloop_test_done(__func__, false); @@ -459,7 +468,8 @@ test_async_fence_custom_timeout(int check_event) } begin = time(NULL); - rc = st->cmds->fence(st, 0, "custom_timeout_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0); + rc = st->cmds->fence(st, 0, "custom_timeout_node1", PCMK_ACTION_OFF, + MAINLOOP_DEFAULT_TIMEOUT, 0); if (rc < 0) { crm_err("fence failed with rc %d", rc); mainloop_test_done(__func__, false); @@ -479,7 +489,8 @@ test_async_fence_timeout(int check_event) return; } - rc = st->cmds->fence(st, 0, "false_1_node2", "off", MAINLOOP_DEFAULT_TIMEOUT, 0); + rc = st->cmds->fence(st, 0, "false_1_node2", PCMK_ACTION_OFF, + MAINLOOP_DEFAULT_TIMEOUT, 0); if (rc < 0) { crm_err("fence failed with rc %d", rc); mainloop_test_done(__func__, false); diff --git a/daemons/fenced/fenced_cib.c b/daemons/fenced/fenced_cib.c new file mode 100644 index 0000000..e11bf68 --- /dev/null +++ b/daemons/fenced/fenced_cib.c @@ -0,0 +1,734 @@ +/* + * Copyright 2009-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU General Public License version 2 + * or later (GPLv2+) WITHOUT ANY WARRANTY. +*/ + +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include + +static xmlNode *local_cib = NULL; +static cib_t *cib_api = NULL; +static bool have_cib_devices = FALSE; + +/*! + * \internal + * \brief Check whether a node has a specific attribute name/value + * + * \param[in] node Name of node to check + * \param[in] name Name of an attribute to look for + * \param[in] value The value the named attribute needs to be set to in order to be considered a match + * + * \return TRUE if the locally cached CIB has the specified node attribute + */ +gboolean +node_has_attr(const char *node, const char *name, const char *value) +{ + GString *xpath = NULL; + xmlNode *match; + + CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL) + && (value != NULL), return FALSE); + + /* Search for the node's attributes in the CIB. While the schema allows + * multiple sets of instance attributes, and allows instance attributes to + * use id-ref to reference values elsewhere, that is intended for resources, + * so we ignore that here. + */ + xpath = g_string_sized_new(256); + pcmk__g_strcat(xpath, + "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE + "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS + "/" XML_CIB_TAG_NVPAIR + "[@" XML_NVPAIR_ATTR_NAME "='", name, "' " + "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL); + + match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER); + + g_string_free(xpath, TRUE); + return (match != NULL); +} + +static void +add_topology_level(xmlNode *match) +{ + char *desc = NULL; + pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; + + CRM_CHECK(match != NULL, return); + + fenced_register_level(match, &desc, &result); + fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc); + pcmk__reset_result(&result); + free(desc); +} + +static void +topology_remove_helper(const char *node, int level) +{ + char *desc = NULL; + pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; + xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); + + crm_xml_add(data, F_STONITH_ORIGIN, __func__); + crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); + crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); + + fenced_unregister_level(data, &desc, &result); + fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc); + pcmk__reset_result(&result); + free_xml(data); + free(desc); +} + +static void +remove_topology_level(xmlNode *match) +{ + int index = 0; + char *key = NULL; + + CRM_CHECK(match != NULL, return); + + key = stonith_level_key(match, fenced_target_by_unknown); + crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); + topology_remove_helper(key, index); + free(key); +} + +static void +register_fencing_topology(xmlXPathObjectPtr xpathObj) +{ + int max = numXpathResults(xpathObj), lpc = 0; + + for (lpc = 0; lpc < max; lpc++) { + xmlNode *match = getXpathResult(xpathObj, lpc); + + remove_topology_level(match); + add_topology_level(match); + } +} + +/* Fencing + + + + + + + + + + + + + + + + +*/ + +void +fencing_topology_init(void) +{ + xmlXPathObjectPtr xpathObj = NULL; + const char *xpath = "//" XML_TAG_FENCING_LEVEL; + + crm_trace("Full topology refresh"); + free_topology_list(); + init_topology_list(); + + /* Grab everything */ + xpathObj = xpath_search(local_cib, xpath); + register_fencing_topology(xpathObj); + + freeXpathObject(xpathObj); +} + +static void +remove_cib_device(xmlXPathObjectPtr xpathObj) +{ + int max = numXpathResults(xpathObj), lpc = 0; + + for (lpc = 0; lpc < max; lpc++) { + const char *rsc_id = NULL; + const char *standard = NULL; + xmlNode *match = getXpathResult(xpathObj, lpc); + + CRM_LOG_ASSERT(match != NULL); + if(match != NULL) { + standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); + } + + if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { + continue; + } + + rsc_id = crm_element_value(match, XML_ATTR_ID); + + stonith_device_remove(rsc_id, true); + } +} + +static void +update_stonith_watchdog_timeout_ms(xmlNode *cib) +{ + long timeout_ms = 0; + xmlNode *stonith_watchdog_xml = NULL; + const char *value = NULL; + + stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", + cib, LOG_NEVER); + if (stonith_watchdog_xml) { + value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE); + } + if (value) { + timeout_ms = crm_get_msec(value); + } + + if (timeout_ms < 0) { + timeout_ms = pcmk__auto_watchdog_timeout(); + } + + stonith_watchdog_timeout_ms = timeout_ms; +} + +/*! + * \internal + * \brief Update all STONITH device definitions based on current CIB + */ +static void +cib_devices_update(void) +{ + GHashTableIter iter; + stonith_device_t *device = NULL; + + crm_info("Updating devices to version %s.%s.%s", + crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN), + crm_element_value(local_cib, XML_ATTR_GENERATION), + crm_element_value(local_cib, XML_ATTR_NUMUPDATES)); + + g_hash_table_iter_init(&iter, device_list); + while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { + if (device->cib_registered) { + device->dirty = TRUE; + } + } + + /* have list repopulated if cib has a watchdog-fencing-resource + TODO: keep a cached list for queries happening while we are refreshing + */ + g_list_free_full(stonith_watchdog_targets, free); + stonith_watchdog_targets = NULL; + + fenced_scheduler_run(local_cib); + + g_hash_table_iter_init(&iter, device_list); + while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { + if (device->dirty) { + g_hash_table_iter_remove(&iter); + } + } +} + +static void +update_cib_stonith_devices_v1(const char *event, xmlNode * msg) +{ + const char *reason = "none"; + gboolean needs_update = FALSE; + xmlXPathObjectPtr xpath_obj = NULL; + + /* process new constraints */ + xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION); + if (numXpathResults(xpath_obj) > 0) { + int max = numXpathResults(xpath_obj), lpc = 0; + + /* Safest and simplest to always recompute */ + needs_update = TRUE; + reason = "new location constraint"; + + for (lpc = 0; lpc < max; lpc++) { + xmlNode *match = getXpathResult(xpath_obj, lpc); + + crm_log_xml_trace(match, "new constraint"); + } + } + freeXpathObject(xpath_obj); + + /* process deletions */ + xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE); + if (numXpathResults(xpath_obj) > 0) { + remove_cib_device(xpath_obj); + } + freeXpathObject(xpath_obj); + + /* process additions */ + xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE); + if (numXpathResults(xpath_obj) > 0) { + int max = numXpathResults(xpath_obj), lpc = 0; + + for (lpc = 0; lpc < max; lpc++) { + const char *rsc_id = NULL; + const char *standard = NULL; + xmlNode *match = getXpathResult(xpath_obj, lpc); + + rsc_id = crm_element_value(match, XML_ATTR_ID); + standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); + + if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { + continue; + } + + crm_trace("Fencing resource %s was added or modified", rsc_id); + reason = "new resource"; + needs_update = TRUE; + } + } + freeXpathObject(xpath_obj); + + if(needs_update) { + crm_info("Updating device list from CIB: %s", reason); + cib_devices_update(); + } +} + +static void +update_cib_stonith_devices_v2(const char *event, xmlNode * msg) +{ + xmlNode *change = NULL; + char *reason = NULL; + bool needs_update = FALSE; + xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + + for (change = pcmk__xml_first_child(patchset); change != NULL; + change = pcmk__xml_next(change)) { + const char *op = crm_element_value(change, XML_DIFF_OP); + const char *xpath = crm_element_value(change, XML_DIFF_PATH); + const char *shortpath = NULL; + + if ((op == NULL) || + (strcmp(op, "move") == 0) || + strstr(xpath, "/"XML_CIB_TAG_STATUS)) { + continue; + } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) { + const char *rsc_id = NULL; + char *search = NULL; + char *mutable = NULL; + + if (strstr(xpath, XML_TAG_ATTR_SETS) || + strstr(xpath, XML_TAG_META_SETS)) { + needs_update = TRUE; + pcmk__str_update(&reason, + "(meta) attribute deleted from resource"); + break; + } + pcmk__str_update(&mutable, xpath); + rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'"); + if (rsc_id != NULL) { + rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'"); + search = strchr(rsc_id, '\''); + } + if (search != NULL) { + *search = 0; + stonith_device_remove(rsc_id, true); + /* watchdog_device_update called afterwards + to fall back to implicit definition if needed */ + } else { + crm_warn("Ignoring malformed CIB update (resource deletion)"); + } + free(mutable); + + } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) || + strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) || + strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) { + shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath); + reason = crm_strdup_printf("%s %s", op, shortpath+1); + needs_update = TRUE; + break; + } + } + + if(needs_update) { + crm_info("Updating device list from CIB: %s", reason); + cib_devices_update(); + } else { + crm_trace("No updates for device list found in CIB"); + } + free(reason); +} + +static void +update_cib_stonith_devices(const char *event, xmlNode * msg) +{ + int format = 1; + xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + + CRM_ASSERT(patchset); + crm_element_value_int(patchset, PCMK_XA_FORMAT, &format); + switch(format) { + case 1: + update_cib_stonith_devices_v1(event, msg); + break; + case 2: + update_cib_stonith_devices_v2(event, msg); + break; + default: + crm_warn("Unknown patch format: %d", format); + } +} + +static void +watchdog_device_update(void) +{ + if (stonith_watchdog_timeout_ms > 0) { + if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) && + !stonith_watchdog_targets) { + /* getting here watchdog-fencing enabled, no device there yet + and reason isn't stonith_watchdog_targets preventing that + */ + int rc; + xmlNode *xml; + + xml = create_device_registration_xml( + STONITH_WATCHDOG_ID, + st_namespace_internal, + STONITH_WATCHDOG_AGENT, + NULL, /* stonith_device_register will add our + own name as PCMK_STONITH_HOST_LIST param + so we can skip that here + */ + NULL); + rc = stonith_device_register(xml, TRUE); + free_xml(xml); + if (rc != pcmk_ok) { + rc = pcmk_legacy2rc(rc); + exit_code = CRM_EX_FATAL; + crm_crit("Cannot register watchdog pseudo fence agent: %s", + pcmk_rc_str(rc)); + stonith_shutdown(0); + } + } + + } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) { + /* be silent if no device - todo parameter to stonith_device_remove */ + stonith_device_remove(STONITH_WATCHDOG_ID, true); + } +} + +/*! + * \internal + * \brief Query the full CIB + * + * \return Standard Pacemaker return code + */ +static int +fenced_query_cib(void) +{ + int rc = pcmk_ok; + + crm_trace("Re-requesting full CIB"); + rc = cib_api->cmds->query(cib_api, NULL, &local_cib, + cib_scope_local|cib_sync_call); + rc = pcmk_legacy2rc(rc); + if (rc == pcmk_rc_ok) { + CRM_ASSERT(local_cib != NULL); + } else { + crm_err("Couldn't retrieve the CIB: %s " CRM_XS " rc=%d", + pcmk_rc_str(rc), rc); + } + return rc; +} + +static void +remove_fencing_topology(xmlXPathObjectPtr xpathObj) +{ + int max = numXpathResults(xpathObj), lpc = 0; + + for (lpc = 0; lpc < max; lpc++) { + xmlNode *match = getXpathResult(xpathObj, lpc); + + CRM_LOG_ASSERT(match != NULL); + if (match && crm_element_value(match, XML_DIFF_MARKER)) { + /* Deletion */ + int index = 0; + char *target = stonith_level_key(match, fenced_target_by_unknown); + + crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); + if (target == NULL) { + crm_err("Invalid fencing target in element %s", ID(match)); + + } else if (index <= 0) { + crm_err("Invalid level for %s in element %s", target, ID(match)); + + } else { + topology_remove_helper(target, index); + } + /* } else { Deal with modifications during the 'addition' stage */ + } + } +} + +static void +update_fencing_topology(const char *event, xmlNode * msg) +{ + int format = 1; + const char *xpath; + xmlXPathObjectPtr xpathObj = NULL; + xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + + CRM_ASSERT(patchset); + crm_element_value_int(patchset, PCMK_XA_FORMAT, &format); + + if(format == 1) { + /* Process deletions (only) */ + xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL; + xpathObj = xpath_search(msg, xpath); + + remove_fencing_topology(xpathObj); + freeXpathObject(xpathObj); + + /* Process additions and changes */ + xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL; + xpathObj = xpath_search(msg, xpath); + + register_fencing_topology(xpathObj); + freeXpathObject(xpathObj); + + } else if(format == 2) { + xmlNode *change = NULL; + int add[] = { 0, 0, 0 }; + int del[] = { 0, 0, 0 }; + + xml_patch_versions(patchset, add, del); + + for (change = pcmk__xml_first_child(patchset); change != NULL; + change = pcmk__xml_next(change)) { + const char *op = crm_element_value(change, XML_DIFF_OP); + const char *xpath = crm_element_value(change, XML_DIFF_PATH); + + if(op == NULL) { + continue; + + } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) { + /* Change to a specific entry */ + + crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); + if(strcmp(op, "move") == 0) { + continue; + + } else if(strcmp(op, "create") == 0) { + add_topology_level(change->children); + + } else if(strcmp(op, "modify") == 0) { + xmlNode *match = first_named_child(change, XML_DIFF_RESULT); + + if(match) { + remove_topology_level(match->children); + add_topology_level(match->children); + } + + } else if(strcmp(op, "delete") == 0) { + /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */ + crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s", + op, add[0], add[1], add[2], xpath); + fencing_topology_init(); + return; + } + + } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) { + /* Change to the topology in general */ + crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s", + op, add[0], add[1], add[2], xpath); + fencing_topology_init(); + return; + + } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) { + /* Changes to the whole config section, possibly including the topology as a whild */ + if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) { + crm_trace("Nothing for us in %s operation %d.%d.%d for %s.", + op, add[0], add[1], add[2], xpath); + + } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) { + crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.", + op, add[0], add[1], add[2], xpath); + fencing_topology_init(); + return; + } + + } else { + crm_trace("Nothing for us in %s operation %d.%d.%d for %s", + op, add[0], add[1], add[2], xpath); + } + } + + } else { + crm_warn("Unknown patch format: %d", format); + } +} + +static void +update_cib_cache_cb(const char *event, xmlNode * msg) +{ + long timeout_ms_saved = stonith_watchdog_timeout_ms; + bool need_full_refresh = false; + + if(!have_cib_devices) { + crm_trace("Skipping updates until we get a full dump"); + return; + + } else if(msg == NULL) { + crm_trace("Missing %s update", event); + return; + } + + /* Maintain a local copy of the CIB so that we have full access + * to device definitions, location constraints, and node attributes + */ + if (local_cib != NULL) { + int rc = pcmk_ok; + xmlNode *patchset = NULL; + + crm_element_value_int(msg, F_CIB_RC, &rc); + if (rc != pcmk_ok) { + return; + } + + patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + rc = xml_apply_patchset(local_cib, patchset, TRUE); + switch (rc) { + case pcmk_ok: + case -pcmk_err_old_data: + break; + case -pcmk_err_diff_resync: + case -pcmk_err_diff_failed: + crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc); + free_xml(local_cib); + local_cib = NULL; + break; + default: + crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc); + free_xml(local_cib); + local_cib = NULL; + } + } + + if (local_cib == NULL) { + if (fenced_query_cib() != pcmk_rc_ok) { + return; + } + need_full_refresh = true; + } + + pcmk__refresh_node_caches_from_cib(local_cib); + update_stonith_watchdog_timeout_ms(local_cib); + + if (timeout_ms_saved != stonith_watchdog_timeout_ms) { + need_full_refresh = true; + } + + if (need_full_refresh) { + fencing_topology_init(); + cib_devices_update(); + } else { + // Partial refresh + update_fencing_topology(event, msg); + update_cib_stonith_devices(event, msg); + } + + watchdog_device_update(); +} + +static void +init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) +{ + crm_info("Updating device list from CIB"); + have_cib_devices = TRUE; + local_cib = copy_xml(output); + + pcmk__refresh_node_caches_from_cib(local_cib); + update_stonith_watchdog_timeout_ms(local_cib); + + fencing_topology_init(); + cib_devices_update(); + watchdog_device_update(); +} + +static void +cib_connection_destroy(gpointer user_data) +{ + if (stonith_shutdown_flag) { + crm_info("Connection to the CIB manager closed"); + return; + } else { + crm_crit("Lost connection to the CIB manager, shutting down"); + } + if (cib_api) { + cib_api->cmds->signoff(cib_api); + } + stonith_shutdown(0); +} + +/*! + * \internal + * \brief Disconnect from CIB manager + */ +void +fenced_cib_cleanup(void) +{ + if (cib_api != NULL) { + cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, + update_cib_cache_cb); + cib__clean_up_connection(&cib_api); + } + free_xml(local_cib); + local_cib = NULL; +} + +void +setup_cib(void) +{ + int rc, retries = 0; + + cib_api = cib_new(); + if (cib_api == NULL) { + crm_err("No connection to the CIB manager"); + return; + } + + do { + sleep(retries); + rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command); + } while (rc == -ENOTCONN && ++retries < 5); + + if (rc != pcmk_ok) { + crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc); + + } else if (pcmk_ok != + cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) { + crm_err("Could not set CIB notification callback"); + + } else { + rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local); + cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb", + init_cib_cache_cb); + cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy); + crm_info("Watching for fencing topology changes"); + } +} diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c index ba63cf8..7a62ed6 100644 --- a/daemons/fenced/fenced_commands.c +++ b/daemons/fenced/fenced_commands.c @@ -68,8 +68,6 @@ struct device_search_s { static gboolean stonith_device_dispatch(gpointer user_data); static void st_child_done(int pid, const pcmk__action_result_t *result, void *user_data); -static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer, - pcmk__client_t *client); static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence); @@ -124,7 +122,7 @@ static gboolean is_action_required(const char *action, const stonith_device_t *device) { return (device != NULL) && device->automatic_unfencing - && pcmk__str_eq(action, "on", pcmk__str_none); + && pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none); } static int @@ -223,11 +221,11 @@ get_action_timeout(const stonith_device_t *device, const char *action, /* If "reboot" was requested but the device does not support it, * we will remap to "off", so check timeout for "off" instead */ - if (pcmk__str_eq(action, "reboot", pcmk__str_none) + if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk_is_set(device->flags, st_device_supports_reboot)) { crm_trace("%s doesn't support reboot, using timeout for off instead", device->id); - action = "off"; + action = PCMK_ACTION_OFF; } /* If the device config specified an action-specific timeout, use it */ @@ -277,7 +275,7 @@ fenced_device_reboot_action(const char *device_id) action = g_hash_table_lookup(device->params, "pcmk_reboot_action"); } } - return pcmk__s(action, "reboot"); + return pcmk__s(action, PCMK_ACTION_REBOOT); } /*! @@ -554,7 +552,7 @@ stonith_device_execute(stonith_device_t * device) #if SUPPORT_CIBSECRETS exec_rc = pcmk__substitute_secrets(device->id, device->params); if (exec_rc != pcmk_rc_ok) { - if (pcmk__str_eq(cmd->action, "stop", pcmk__str_none)) { + if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) { crm_info("Proceeding with stop operation for %s " "despite being unable to load CIB secrets (%s)", device->id, pcmk_rc_str(exec_rc)); @@ -570,14 +568,14 @@ stonith_device_execute(stonith_device_t * device) #endif action_str = cmd->action; - if (pcmk__str_eq(cmd->action, "reboot", pcmk__str_none) + if (pcmk__str_eq(cmd->action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk_is_set(device->flags, st_device_supports_reboot)) { crm_notice("Remapping 'reboot' action%s%s using %s to 'off' " "because agent '%s' does not support reboot", ((cmd->target == NULL)? "" : " targeting "), pcmk__s(cmd->target, ""), device->id, device->agent); - action_str = "off"; + action_str = PCMK_ACTION_OFF; } if (pcmk_is_set(device->flags, st_device_supports_parameter_port)) { @@ -691,7 +689,7 @@ schedule_stonith_command(async_command_t * cmd, stonith_device_t * device) delay_base = delay_max; } if (delay_max > 0) { - // coverity[dont_call] We're not using rand() for security + // coverity[dontcall] It doesn't matter here if rand() is predictable cmd->start_delay += ((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0) + delay_base; @@ -948,16 +946,16 @@ read_action_metadata(stonith_device_t *device) action = crm_element_value(match, "name"); - if (pcmk__str_eq(action, "list", pcmk__str_none)) { + if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_list); - } else if (pcmk__str_eq(action, "status", pcmk__str_none)) { + } else if (pcmk__str_eq(action, PCMK_ACTION_STATUS, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_status); - } else if (pcmk__str_eq(action, "reboot", pcmk__str_none)) { + } else if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_reboot); - } else if (pcmk__str_eq(action, "on", pcmk__str_none)) { + } else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { /* "automatic" means the cluster will unfence node when it joins */ /* "required" is a deprecated synonym for "automatic" */ if (pcmk__xe_attr_is_true(match, "automatic") || pcmk__xe_attr_is_true(match, "required")) { @@ -1024,16 +1022,16 @@ xml2device_params(const char *name, const xmlNode *dev) if (*value == '\0') { crm_warn("Ignoring empty '%s' parameter", STONITH_ATTR_ACTION_OP); - } else if (strcmp(value, "reboot") == 0) { + } else if (strcmp(value, PCMK_ACTION_REBOOT) == 0) { crm_warn("Ignoring %s='reboot' (see stonith-action cluster property instead)", STONITH_ATTR_ACTION_OP); - } else if (strcmp(value, "off") == 0) { - map_action(params, "reboot", value); + } else if (strcmp(value, PCMK_ACTION_OFF) == 0) { + map_action(params, PCMK_ACTION_REBOOT, value); } else { - map_action(params, "off", value); - map_action(params, "reboot", value); + map_action(params, PCMK_ACTION_OFF, value); + map_action(params, PCMK_ACTION_REBOOT, value); } g_hash_table_remove(params, STONITH_ATTR_ACTION_OP); @@ -1132,7 +1130,7 @@ build_device_from_xml(xmlNode *dev) device->automatic_unfencing = TRUE; } - if (is_action_required("on", device)) { + if (is_action_required(PCMK_ACTION_ON, device)) { crm_info("Fencing device '%s' requires unfencing", device->id); } @@ -1672,8 +1670,7 @@ unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target, * search by xpath, because it might give multiple hits if the XML is the * CIB. */ - if ((xml != NULL) - && !pcmk__str_eq(TYPE(xml), XML_TAG_FENCING_LEVEL, pcmk__str_none)) { + if ((xml != NULL) && !pcmk__xe_is(xml, XML_TAG_FENCING_LEVEL)) { xml = get_xpath_object("//" XML_TAG_FENCING_LEVEL, xml, LOG_WARNING); } @@ -1972,7 +1969,7 @@ execute_agent_action(xmlNode *msg, pcmk__action_result_t *result) "Watchdog fence device not configured"); return; - } else if (pcmk__str_eq(action, "list", pcmk__str_none)) { + } else if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_result_output(result, list_to_string(stonith_watchdog_targets, @@ -1980,7 +1977,7 @@ execute_agent_action(xmlNode *msg, pcmk__action_result_t *result) NULL); return; - } else if (pcmk__str_eq(action, "monitor", pcmk__str_none)) { + } else if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return; } @@ -1994,7 +1991,8 @@ execute_agent_action(xmlNode *msg, pcmk__action_result_t *result) "'%s' not found", id); return; - } else if (!device->api_registered && !strcmp(action, "monitor")) { + } else if (!device->api_registered + && (strcmp(action, PCMK_ACTION_MONITOR) == 0)) { // Monitors may run only on "started" (API-registered) devices crm_info("Ignoring API '%s' action request because device %s not active", action, id); @@ -2104,14 +2102,14 @@ localhost_is_eligible_with_remap(const stonith_device_t *device, // Check potential remaps - if (pcmk__str_eq(action, "reboot", pcmk__str_none)) { + if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* "reboot" might get remapped to "off" then "on", so even if reboot is * disallowed, return true if either of those is allowed. We'll report * the disallowed actions with the results. We never allow self-fencing * for remapped "on" actions because the target is off at that point. */ - if (localhost_is_eligible(device, "off", target, allow_self) - || localhost_is_eligible(device, "on", target, FALSE)) { + if (localhost_is_eligible(device, PCMK_ACTION_OFF, target, allow_self) + || localhost_is_eligible(device, PCMK_ACTION_ON, target, FALSE)) { return true; } } @@ -2146,7 +2144,7 @@ can_fence_host_with_device(stonith_device_t *dev, /* Answer immediately if the device does not support the action * or the local node is not allowed to perform it */ - if (pcmk__str_eq(action, "on", pcmk__str_none) + if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none) && !pcmk_is_set(dev->flags, st_device_supports_on)) { check_type = "Agent does not support 'on'"; goto search_report_results; @@ -2175,7 +2173,8 @@ can_fence_host_with_device(stonith_device_t *dev, time_t now = time(NULL); if (dev->targets == NULL || dev->targets_age + 60 < now) { - int device_timeout = get_action_timeout(dev, "list", search->per_device_timeout); + int device_timeout = get_action_timeout(dev, PCMK_ACTION_LIST, + search->per_device_timeout); if (device_timeout > search->per_device_timeout) { crm_notice("Since the pcmk_list_timeout(%ds) parameter of %s is larger than stonith-timeout(%ds), timeout may occur", @@ -2185,7 +2184,7 @@ can_fence_host_with_device(stonith_device_t *dev, crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); - schedule_internal_command(__func__, dev, "list", NULL, + schedule_internal_command(__func__, dev, PCMK_ACTION_LIST, NULL, search->per_device_timeout, search, dynamic_list_search_cb); /* we'll respond to this search request async in the cb */ @@ -2207,7 +2206,7 @@ can_fence_host_with_device(stonith_device_t *dev, crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); - schedule_internal_command(__func__, dev, "status", target, + schedule_internal_command(__func__, dev, PCMK_ACTION_STATUS, target, search->per_device_timeout, search, status_search_cb); /* we'll respond to this search request async in the cb */ return; @@ -2384,6 +2383,30 @@ add_action_reply(xmlNode *xml, const char *action, add_disallowed(child, action, device, target, allow_suicide); } +/*! + * \internal + * \brief Send a reply to a CPG peer or IPC client + * + * \param[in] reply XML reply to send + * \param[in] call_options Send synchronously if st_opt_sync_call is set + * \param[in] remote_peer If not NULL, name of peer node to send CPG reply + * \param[in,out] client If not NULL, client to send IPC reply + */ +static void +stonith_send_reply(const xmlNode *reply, int call_options, + const char *remote_peer, pcmk__client_t *client) +{ + CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)), + return); + + if (remote_peer == NULL) { + do_local_reply(reply, client, call_options); + } else { + send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, + reply, FALSE); + } +} + static void stonith_query_capable_device_cb(GList * devices, void *user_data) { @@ -2429,15 +2452,16 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) * capable device that doesn't support "reboot", remap to "off" instead. */ if (!pcmk_is_set(device->flags, st_device_supports_reboot) - && pcmk__str_eq(query->action, "reboot", pcmk__str_none)) { + && pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, + pcmk__str_none)) { crm_trace("%s doesn't support reboot, using values for off instead", device->id); - action = "off"; + action = PCMK_ACTION_OFF; } /* Add action-specific values if available */ add_action_specific_attributes(dev, action, device, query->target); - if (pcmk__str_eq(query->action, "reboot", pcmk__str_none)) { + if (pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A "reboot" *might* get remapped to "off" then "on", so after * sending the "reboot"-specific values in the main element, we add * sub-elements for "off" and "on" values. @@ -2451,9 +2475,9 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) */ add_disallowed(dev, action, device, query->target, pcmk_is_set(query->call_options, st_opt_allow_suicide)); - add_action_reply(dev, "off", device, query->target, + add_action_reply(dev, PCMK_ACTION_OFF, device, query->target, pcmk_is_set(query->call_options, st_opt_allow_suicide)); - add_action_reply(dev, "on", device, query->target, FALSE); + add_action_reply(dev, PCMK_ACTION_ON, device, query->target, FALSE); } /* A query without a target wants device parameters */ @@ -2765,8 +2789,10 @@ st_child_done(int pid, const pcmk__action_result_t *result, void *user_data) /* The device is ready to do something else now */ if (device) { - if (!device->verified && pcmk__result_ok(result) && - (pcmk__strcase_any_of(cmd->action, "list", "monitor", "status", NULL))) { + if (!device->verified && pcmk__result_ok(result) + && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_LIST, + PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS, + NULL)) { device->verified = TRUE; } @@ -3052,30 +3078,6 @@ check_alternate_host(const char *target) return NULL; } -/*! - * \internal - * \brief Send a reply to a CPG peer or IPC client - * - * \param[in] reply XML reply to send - * \param[in] call_options Send synchronously if st_opt_sync_call is set - * \param[in] remote_peer If not NULL, name of peer node to send CPG reply - * \param[in,out] client If not NULL, client to send IPC reply - */ -static void -stonith_send_reply(xmlNode *reply, int call_options, const char *remote_peer, - pcmk__client_t *client) -{ - CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)), - return); - - if (remote_peer == NULL) { - do_local_reply(reply, client, call_options); - } else { - send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, - reply, FALSE); - } -} - static void remove_relay_op(xmlNode * request) { diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c index dc67947..843b3d4 100644 --- a/daemons/fenced/fenced_remote.c +++ b/daemons/fenced/fenced_remote.c @@ -292,7 +292,7 @@ init_stonith_remote_op_hash_table(GHashTable **table) static const char * op_requested_action(const remote_fencing_op_t *op) { - return ((op->phase > st_phase_requested)? "reboot" : op->action); + return ((op->phase > st_phase_requested)? PCMK_ACTION_REBOOT : op->action); } /*! @@ -311,7 +311,7 @@ op_phase_off(remote_fencing_op_t *op) /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the * memory allocation at each phase. */ - strcpy(op->action, "off"); + strcpy(op->action, PCMK_ACTION_OFF); } /*! @@ -329,7 +329,7 @@ op_phase_on(remote_fencing_op_t *op) "remapping to 'on' for %s " CRM_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_on; - strcpy(op->action, "on"); + strcpy(op->action, PCMK_ACTION_ON); /* Skip devices with automatic unfencing, because the cluster will handle it * when the node rejoins. @@ -362,7 +362,7 @@ undo_op_remap(remote_fencing_op_t *op) crm_info("Undoing remap of reboot targeting %s for %s " CRM_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_requested; - strcpy(op->action, "reboot"); + strcpy(op->action, PCMK_ACTION_REBOOT); } } @@ -673,8 +673,8 @@ remote_op_timeout_one(gpointer userdata) "Peer did not return fence result within timeout"); // The requested delay has been applied for the first device - if (op->delay > 0) { - op->delay = 0; + if (op->client_delay > 0) { + op->client_delay = 0; crm_trace("Try another device for '%s' action targeting %s " "for client %s without delay " CRM_XS " id=%.8s", op->action, op->target, op->client_name, op->id); @@ -961,12 +961,12 @@ advance_topology_level(remote_fencing_op_t *op, bool empty_ok) set_op_device_list(op, tp->levels[op->level]); // The requested delay has been applied for the first fencing level - if (op->level > 1 && op->delay > 0) { - op->delay = 0; + if ((op->level > 1) && (op->client_delay > 0)) { + op->client_delay = 0; } if ((g_list_next(op->devices_list) != NULL) - && pcmk__str_eq(op->action, "reboot", pcmk__str_none)) { + && pcmk__str_eq(op->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A reboot has been requested for a topology level with multiple * devices. Instead of rebooting the devices sequentially, we will * turn them all off, then turn them all on again. (Think about @@ -1163,7 +1163,7 @@ create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout)); // Value -1 means disable any static/random fencing delays - crm_element_value_int(request, F_STONITH_DELAY, &(op->delay)); + crm_element_value_int(request, F_STONITH_DELAY, &(op->client_delay)); if (peer && dev) { op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID); @@ -1474,8 +1474,8 @@ get_device_timeout(const remote_fencing_op_t *op, return op->base_timeout; } - // op->delay < 0 means disable any static/random fencing delays - if (with_delay && op->delay >= 0) { + // op->client_delay < 0 means disable any static/random fencing delays + if (with_delay && (op->client_delay >= 0)) { // delay_base is eventually limited by delay_max delay = (props->delay_max[op->phase] > 0 ? props->delay_max[op->phase] : props->delay_base[op->phase]); @@ -1541,7 +1541,7 @@ get_op_total_timeout(const remote_fencing_op_t *op, GList *iter = NULL; GList *auto_list = NULL; - if (pcmk__str_eq(op->action, "on", pcmk__str_none) + if (pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none) && (op->automatic_list != NULL)) { auto_list = g_list_copy(op->automatic_list); } @@ -1620,7 +1620,7 @@ get_op_total_timeout(const remote_fencing_op_t *op, * up the total timeout. */ return ((total_timeout ? total_timeout : op->base_timeout) - + (op->delay > 0 ? op->delay : 0)); + + ((op->client_delay > 0)? op->client_delay : 0)); } static void @@ -1695,7 +1695,7 @@ advance_topology_device_in_level(remote_fencing_op_t *op, const char *device, /* Handle automatic unfencing if an "on" action was requested */ if ((op->phase == st_phase_requested) - && pcmk__str_eq(op->action, "on", pcmk__str_none)) { + && pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)) { /* If the device we just executed was required, it's not anymore */ remove_required_device(op, device); @@ -1724,8 +1724,8 @@ advance_topology_device_in_level(remote_fencing_op_t *op, const char *device, op->target, op->client_name, op->originator); // The requested delay has been applied for the first device - if (op->delay > 0) { - op->delay = 0; + if (op->client_delay > 0) { + op->client_delay = 0; } request_peer_fencing(op, NULL); @@ -1794,7 +1794,7 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) * node back on when we should. */ device = op->devices->data; - if (pcmk__str_eq(fenced_device_reboot_action(device), "off", + if (pcmk__str_eq(fenced_device_reboot_action(device), PCMK_ACTION_OFF, pcmk__str_none)) { crm_info("Not turning %s back on using %s because the device is " "configured to stay off (pcmk_reboot_action='off')", @@ -1844,13 +1844,16 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) } if (peer) { - /* Take any requested fencing delay into account to prevent it from eating - * up the timeout. - */ - int timeout_one = (op->delay > 0 ? - TIMEOUT_MULTIPLY_FACTOR * op->delay : 0); + int timeout_one = 0; xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0); + if (op->client_delay > 0) { + /* Take requested fencing delay into account to prevent it from + * eating up the timeout. + */ + timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay; + } + crm_xml_add(remote_op, F_STONITH_REMOTE_OP_ID, op->id); crm_xml_add(remote_op, F_STONITH_TARGET, op->target); crm_xml_add(remote_op, F_STONITH_ACTION, op->action); @@ -1859,7 +1862,7 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) crm_xml_add(remote_op, F_STONITH_CLIENTNAME, op->client_name); crm_xml_add_int(remote_op, F_STONITH_TIMEOUT, timeout); crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options); - crm_xml_add_int(remote_op, F_STONITH_DELAY, op->delay); + crm_xml_add_int(remote_op, F_STONITH_DELAY, op->client_delay); if (device) { timeout_one += TIMEOUT_MULTIPLY_FACTOR * @@ -2097,7 +2100,7 @@ parse_action_specific(const xmlNode *xml, const char *peer, const char *device, } /* Handle devices with automatic unfencing */ - if (pcmk__str_eq(action, "on", pcmk__str_none)) { + if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { int required = 0; crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required); @@ -2160,11 +2163,11 @@ add_device_properties(const xmlNode *xml, remote_fencing_op_t *op, * values for "off" and "on" in child elements, just in case the reboot * winds up getting remapped. */ - if (pcmk__str_eq(ID(child), "off", pcmk__str_none)) { - parse_action_specific(child, peer->host, device, "off", + if (pcmk__str_eq(ID(child), PCMK_ACTION_OFF, pcmk__str_none)) { + parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF, op, st_phase_off, props); - } else if (pcmk__str_eq(ID(child), "on", pcmk__str_none)) { - parse_action_specific(child, peer->host, device, "on", + } else if (pcmk__str_eq(ID(child), PCMK_ACTION_ON, pcmk__str_none)) { + parse_action_specific(child, peer->host, device, PCMK_ACTION_ON, op, st_phase_on, props); } } diff --git a/daemons/fenced/fenced_scheduler.c b/daemons/fenced/fenced_scheduler.c new file mode 100644 index 0000000..27d990f --- /dev/null +++ b/daemons/fenced/fenced_scheduler.c @@ -0,0 +1,225 @@ +/* + * Copyright 2009-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU General Public License version 2 + * or later (GPLv2+) WITHOUT ANY WARRANTY. +*/ + +#include + +#include +#include +#include + +#include +#include + +#include +#include + +static pcmk_scheduler_t *scheduler = NULL; + +/*! + * \internal + * \brief Initialize scheduler data for fencer purposes + * + * \return Standard Pacemaker return code + */ +int +fenced_scheduler_init(void) +{ + pcmk__output_t *logger = NULL; + int rc = pcmk__log_output_new(&logger); + + if (rc != pcmk_rc_ok) { + return rc; + } + + scheduler = pe_new_working_set(); + if (scheduler == NULL) { + pcmk__output_free(logger); + return ENOMEM; + } + + pe__register_messages(logger); + pcmk__register_lib_messages(logger); + pcmk__output_set_log_level(logger, LOG_TRACE); + scheduler->priv = logger; + + return pcmk_rc_ok; +} + +/*! + * \internal + * \brief Free all scheduler-related resources + */ +void +fenced_scheduler_cleanup(void) +{ + if (scheduler != NULL) { + pcmk__output_t *logger = scheduler->priv; + + if (logger != NULL) { + logger->finish(logger, CRM_EX_OK, true, NULL); + pcmk__output_free(logger); + scheduler->priv = NULL; + } + pe_free_working_set(scheduler); + scheduler = NULL; + } +} + +/*! + * \internal + * \brief Check whether the local node is in a resource's allowed node list + * + * \param[in] rsc Resource to check + * + * \return Pointer to node if found, otherwise NULL + */ +static pcmk_node_t * +local_node_allowed_for(const pcmk_resource_t *rsc) +{ + if ((rsc != NULL) && (stonith_our_uname != NULL)) { + GHashTableIter iter; + pcmk_node_t *node = NULL; + + g_hash_table_iter_init(&iter, rsc->allowed_nodes); + while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { + if (pcmk__str_eq(node->details->uname, stonith_our_uname, + pcmk__str_casei)) { + return node; + } + } + } + return NULL; +} + +/*! + * \internal + * \brief If a given resource or any of its children are fencing devices, + * register the devices + * + * \param[in,out] data Resource to check + * \param[in,out] user_data Ignored + */ +static void +register_if_fencing_device(gpointer data, gpointer user_data) +{ + pcmk_resource_t *rsc = data; + + xmlNode *xml = NULL; + GHashTableIter hash_iter; + pcmk_node_t *node = NULL; + const char *name = NULL; + const char *value = NULL; + const char *rclass = NULL; + const char *agent = NULL; + const char *rsc_provides = NULL; + stonith_key_value_t *params = NULL; + + // If this is a collective resource, check children instead + if (rsc->children != NULL) { + for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { + register_if_fencing_device(iter->data, NULL); + if (pe_rsc_is_clone(rsc)) { + return; // Only one instance needs to be checked for clones + } + } + return; + } + + rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); + if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { + return; // Not a fencing device + } + + if (pe__resource_is_disabled(rsc)) { + crm_info("Ignoring fencing device %s because it is disabled", rsc->id); + return; + } + + if ((stonith_watchdog_timeout_ms <= 0) && + pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { + crm_info("Ignoring fencing device %s " + "because watchdog fencing is disabled", rsc->id); + return; + } + + // Check whether local node is allowed to run resource + node = local_node_allowed_for(rsc); + if (node == NULL) { + crm_info("Ignoring fencing device %s " + "because local node is not allowed to run it", rsc->id); + return; + } + if (node->weight < 0) { + crm_info("Ignoring fencing device %s " + "because local node has preference %s for it", + rsc->id, pcmk_readable_score(node->weight)); + return; + } + + // If device is in a group, check whether local node is allowed for group + if ((rsc->parent != NULL) + && (rsc->parent->variant == pcmk_rsc_variant_group)) { + pcmk_node_t *group_node = local_node_allowed_for(rsc->parent); + + if ((group_node != NULL) && (group_node->weight < 0)) { + crm_info("Ignoring fencing device %s " + "because local node has preference %s for its group", + rsc->id, pcmk_readable_score(group_node->weight)); + return; + } + } + + crm_debug("Reloading configuration of fencing device %s", rsc->id); + + agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE); + + get_meta_attributes(rsc->meta, rsc, node, scheduler); + rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES); + + g_hash_table_iter_init(&hash_iter, pe_rsc_params(rsc, node, scheduler)); + while (g_hash_table_iter_next(&hash_iter, (gpointer *) &name, + (gpointer *) &value)) { + if ((name == NULL) || (value == NULL)) { + continue; + } + params = stonith_key_value_add(params, name, value); + } + + xml = create_device_registration_xml(pcmk__s(rsc->clone_name, rsc->id), + st_namespace_any, agent, params, + rsc_provides); + stonith_key_value_freeall(params, 1, 1); + CRM_ASSERT(stonith_device_register(xml, TRUE) == pcmk_ok); + free_xml(xml); +} + +/*! + * \internal + * \brief Run the scheduler for fencer purposes + * + * \param[in] cib Cluster's current CIB + */ +void +fenced_scheduler_run(xmlNode *cib) +{ + CRM_CHECK((cib != NULL) && (scheduler != NULL), return); + + if (scheduler->now != NULL) { + crm_time_free(scheduler->now); + scheduler->now = NULL; + } + scheduler->localhost = stonith_our_uname; + pcmk__schedule_actions(cib, pcmk_sched_location_only + |pcmk_sched_no_compat + |pcmk_sched_no_counts, scheduler); + g_list_foreach(scheduler->resources, register_if_fencing_device, NULL); + + scheduler->input = NULL; // Wasn't a copy, so don't let API free it + pe_reset_working_set(scheduler); +} diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c index 4edda6c..7c69fb8 100644 --- a/daemons/fenced/pacemaker-fenced.c +++ b/daemons/fenced/pacemaker-fenced.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -37,8 +36,6 @@ #include #include -#include -#include #include @@ -51,18 +48,9 @@ GList *stonith_watchdog_targets = NULL; static GMainLoop *mainloop = NULL; gboolean stand_alone = FALSE; -static gboolean stonith_shutdown_flag = FALSE; +gboolean stonith_shutdown_flag = FALSE; static qb_ipcs_service_t *ipcs = NULL; -static xmlNode *local_cib = NULL; -static pe_working_set_t *fenced_data_set = NULL; -static const unsigned long long data_set_flags = pe_flag_quick_location - | pe_flag_no_compat - | pe_flag_no_counts; - -static cib_t *cib_api = NULL; - -static pcmk__output_t *logger_out = NULL; static pcmk__output_t *out = NULL; pcmk__supported_format_t formats[] = { @@ -77,9 +65,8 @@ static struct { gchar **log_files; } options; -static crm_exit_t exit_code = CRM_EX_OK; +crm_exit_t exit_code = CRM_EX_OK; -static void stonith_shutdown(int nsig); static void stonith_cleanup(void); static int32_t @@ -241,7 +228,8 @@ stonith_peer_cs_destroy(gpointer user_data) #endif void -do_local_reply(xmlNode *notify_src, pcmk__client_t *client, int call_options) +do_local_reply(const xmlNode *notify_src, pcmk__client_t *client, + int call_options) { /* send callback to originating child */ int local_rc = pcmk_rc_ok; @@ -292,7 +280,7 @@ static void stonith_notify_client(gpointer key, gpointer value, gpointer user_data) { - xmlNode *update_msg = user_data; + const xmlNode *update_msg = user_data; pcmk__client_t *client = value; const char *type = NULL; @@ -443,589 +431,6 @@ fenced_send_level_notification(const char *op, send_config_notification(op, result, desc, g_hash_table_size(topology)); } -static void -topology_remove_helper(const char *node, int level) -{ - char *desc = NULL; - pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; - xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); - - crm_xml_add(data, F_STONITH_ORIGIN, __func__); - crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); - crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); - - fenced_unregister_level(data, &desc, &result); - fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc); - pcmk__reset_result(&result); - free_xml(data); - free(desc); -} - -static void -remove_cib_device(xmlXPathObjectPtr xpathObj) -{ - int max = numXpathResults(xpathObj), lpc = 0; - - for (lpc = 0; lpc < max; lpc++) { - const char *rsc_id = NULL; - const char *standard = NULL; - xmlNode *match = getXpathResult(xpathObj, lpc); - - CRM_LOG_ASSERT(match != NULL); - if(match != NULL) { - standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); - } - - if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { - continue; - } - - rsc_id = crm_element_value(match, XML_ATTR_ID); - - stonith_device_remove(rsc_id, true); - } -} - -static void -remove_topology_level(xmlNode *match) -{ - int index = 0; - char *key = NULL; - - CRM_CHECK(match != NULL, return); - - key = stonith_level_key(match, fenced_target_by_unknown); - crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); - topology_remove_helper(key, index); - free(key); -} - -static void -add_topology_level(xmlNode *match) -{ - char *desc = NULL; - pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; - - CRM_CHECK(match != NULL, return); - - fenced_register_level(match, &desc, &result); - fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc); - pcmk__reset_result(&result); - free(desc); -} - -static void -remove_fencing_topology(xmlXPathObjectPtr xpathObj) -{ - int max = numXpathResults(xpathObj), lpc = 0; - - for (lpc = 0; lpc < max; lpc++) { - xmlNode *match = getXpathResult(xpathObj, lpc); - - CRM_LOG_ASSERT(match != NULL); - if (match && crm_element_value(match, XML_DIFF_MARKER)) { - /* Deletion */ - int index = 0; - char *target = stonith_level_key(match, fenced_target_by_unknown); - - crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); - if (target == NULL) { - crm_err("Invalid fencing target in element %s", ID(match)); - - } else if (index <= 0) { - crm_err("Invalid level for %s in element %s", target, ID(match)); - - } else { - topology_remove_helper(target, index); - } - /* } else { Deal with modifications during the 'addition' stage */ - } - } -} - -static void -register_fencing_topology(xmlXPathObjectPtr xpathObj) -{ - int max = numXpathResults(xpathObj), lpc = 0; - - for (lpc = 0; lpc < max; lpc++) { - xmlNode *match = getXpathResult(xpathObj, lpc); - - remove_topology_level(match); - add_topology_level(match); - } -} - -/* Fencing - - - - - - - - - - - - - - - - -*/ - -static void -fencing_topology_init(void) -{ - xmlXPathObjectPtr xpathObj = NULL; - const char *xpath = "//" XML_TAG_FENCING_LEVEL; - - crm_trace("Full topology refresh"); - free_topology_list(); - init_topology_list(); - - /* Grab everything */ - xpathObj = xpath_search(local_cib, xpath); - register_fencing_topology(xpathObj); - - freeXpathObject(xpathObj); -} - -#define rsc_name(x) x->clone_name?x->clone_name:x->id - -/*! - * \internal - * \brief Check whether our uname is in a resource's allowed node list - * - * \param[in] rsc Resource to check - * - * \return Pointer to node object if found, NULL otherwise - */ -static pe_node_t * -our_node_allowed_for(const pe_resource_t *rsc) -{ - GHashTableIter iter; - pe_node_t *node = NULL; - - if (rsc && stonith_our_uname) { - g_hash_table_iter_init(&iter, rsc->allowed_nodes); - while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { - if (node && strcmp(node->details->uname, stonith_our_uname) == 0) { - break; - } - node = NULL; - } - } - return node; -} - -static void -watchdog_device_update(void) -{ - if (stonith_watchdog_timeout_ms > 0) { - if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) && - !stonith_watchdog_targets) { - /* getting here watchdog-fencing enabled, no device there yet - and reason isn't stonith_watchdog_targets preventing that - */ - int rc; - xmlNode *xml; - - xml = create_device_registration_xml( - STONITH_WATCHDOG_ID, - st_namespace_internal, - STONITH_WATCHDOG_AGENT, - NULL, /* stonith_device_register will add our - own name as PCMK_STONITH_HOST_LIST param - so we can skip that here - */ - NULL); - rc = stonith_device_register(xml, TRUE); - free_xml(xml); - if (rc != pcmk_ok) { - rc = pcmk_legacy2rc(rc); - exit_code = CRM_EX_FATAL; - crm_crit("Cannot register watchdog pseudo fence agent: %s", - pcmk_rc_str(rc)); - stonith_shutdown(0); - } - } - - } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) { - /* be silent if no device - todo parameter to stonith_device_remove */ - stonith_device_remove(STONITH_WATCHDOG_ID, true); - } -} - -static void -update_stonith_watchdog_timeout_ms(xmlNode *cib) -{ - long timeout_ms = 0; - xmlNode *stonith_watchdog_xml = NULL; - const char *value = NULL; - - stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", - cib, LOG_NEVER); - if (stonith_watchdog_xml) { - value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE); - } - if (value) { - timeout_ms = crm_get_msec(value); - } - - if (timeout_ms < 0) { - timeout_ms = pcmk__auto_watchdog_timeout(); - } - - stonith_watchdog_timeout_ms = timeout_ms; -} - -/*! - * \internal - * \brief If a resource or any of its children are STONITH devices, update their - * definitions given a cluster working set. - * - * \param[in,out] rsc Resource to check - * \param[in,out] data_set Cluster working set with device information - */ -static void -cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) -{ - pe_node_t *node = NULL; - const char *value = NULL; - const char *rclass = NULL; - pe_node_t *parent = NULL; - - /* If this is a complex resource, check children rather than this resource itself. */ - if(rsc->children) { - GList *gIter = NULL; - for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { - cib_device_update(gIter->data, data_set); - if(pe_rsc_is_clone(rsc)) { - crm_trace("Only processing one copy of the clone %s", rsc->id); - break; - } - } - return; - } - - /* We only care about STONITH resources. */ - rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); - if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { - return; - } - - /* If this STONITH resource is disabled, remove it. */ - if (pe__resource_is_disabled(rsc)) { - crm_info("Device %s has been disabled", rsc->id); - return; - } - - /* if watchdog-fencing is disabled handle any watchdog-fence - resource as if it was disabled - */ - if ((stonith_watchdog_timeout_ms <= 0) && - pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { - crm_info("Watchdog-fencing disabled thus handling " - "device %s as disabled", rsc->id); - return; - } - - /* Check whether our node is allowed for this resource (and its parent if in a group) */ - node = our_node_allowed_for(rsc); - if (rsc->parent && (rsc->parent->variant == pe_group)) { - parent = our_node_allowed_for(rsc->parent); - } - - if(node == NULL) { - /* Our node is disallowed, so remove the device */ - GHashTableIter iter; - - crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname); - g_hash_table_iter_init(&iter, rsc->allowed_nodes); - while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { - crm_trace("Available: %s = %d", pe__node_name(node), node->weight); - } - - return; - - } else if(node->weight < 0 || (parent && parent->weight < 0)) { - /* Our node (or its group) is disallowed by score, so remove the device */ - int score = (node->weight < 0)? node->weight : parent->weight; - - crm_info("Device %s has been disabled on %s: score=%s", - rsc->id, stonith_our_uname, pcmk_readable_score(score)); - return; - - } else { - /* Our node is allowed, so update the device information */ - int rc; - xmlNode *data; - GHashTable *rsc_params = NULL; - GHashTableIter gIter; - stonith_key_value_t *params = NULL; - - const char *name = NULL; - const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE); - const char *rsc_provides = NULL; - - crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight); - rsc_params = pe_rsc_params(rsc, node, data_set); - get_meta_attributes(rsc->meta, rsc, node, data_set); - - rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES); - - g_hash_table_iter_init(&gIter, rsc_params); - while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) { - if (!name || !value) { - continue; - } - params = stonith_key_value_add(params, name, value); - crm_trace(" %s=%s", name, value); - } - - data = create_device_registration_xml(rsc_name(rsc), st_namespace_any, - agent, params, rsc_provides); - stonith_key_value_freeall(params, 1, 1); - rc = stonith_device_register(data, TRUE); - CRM_ASSERT(rc == pcmk_ok); - free_xml(data); - } -} - -/*! - * \internal - * \brief Update all STONITH device definitions based on current CIB - */ -static void -cib_devices_update(void) -{ - GHashTableIter iter; - stonith_device_t *device = NULL; - - crm_info("Updating devices to version %s.%s.%s", - crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN), - crm_element_value(local_cib, XML_ATTR_GENERATION), - crm_element_value(local_cib, XML_ATTR_NUMUPDATES)); - - if (fenced_data_set->now != NULL) { - crm_time_free(fenced_data_set->now); - fenced_data_set->now = NULL; - } - fenced_data_set->localhost = stonith_our_uname; - pcmk__schedule_actions(local_cib, data_set_flags, fenced_data_set); - - g_hash_table_iter_init(&iter, device_list); - while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { - if (device->cib_registered) { - device->dirty = TRUE; - } - } - - /* have list repopulated if cib has a watchdog-fencing-resource - TODO: keep a cached list for queries happening while we are refreshing - */ - g_list_free_full(stonith_watchdog_targets, free); - stonith_watchdog_targets = NULL; - g_list_foreach(fenced_data_set->resources, (GFunc) cib_device_update, fenced_data_set); - - g_hash_table_iter_init(&iter, device_list); - while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { - if (device->dirty) { - g_hash_table_iter_remove(&iter); - } - } - - fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it - pe_reset_working_set(fenced_data_set); -} - -static void -update_cib_stonith_devices_v2(const char *event, xmlNode * msg) -{ - xmlNode *change = NULL; - char *reason = NULL; - bool needs_update = FALSE; - xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); - - for (change = pcmk__xml_first_child(patchset); change != NULL; - change = pcmk__xml_next(change)) { - const char *op = crm_element_value(change, XML_DIFF_OP); - const char *xpath = crm_element_value(change, XML_DIFF_PATH); - const char *shortpath = NULL; - - if ((op == NULL) || - (strcmp(op, "move") == 0) || - strstr(xpath, "/"XML_CIB_TAG_STATUS)) { - continue; - } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) { - const char *rsc_id = NULL; - char *search = NULL; - char *mutable = NULL; - - if (strstr(xpath, XML_TAG_ATTR_SETS) || - strstr(xpath, XML_TAG_META_SETS)) { - needs_update = TRUE; - pcmk__str_update(&reason, - "(meta) attribute deleted from resource"); - break; - } - pcmk__str_update(&mutable, xpath); - rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'"); - if (rsc_id != NULL) { - rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'"); - search = strchr(rsc_id, '\''); - } - if (search != NULL) { - *search = 0; - stonith_device_remove(rsc_id, true); - /* watchdog_device_update called afterwards - to fall back to implicit definition if needed */ - } else { - crm_warn("Ignoring malformed CIB update (resource deletion)"); - } - free(mutable); - - } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) || - strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) || - strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) { - shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath); - reason = crm_strdup_printf("%s %s", op, shortpath+1); - needs_update = TRUE; - break; - } - } - - if(needs_update) { - crm_info("Updating device list from CIB: %s", reason); - cib_devices_update(); - } else { - crm_trace("No updates for device list found in CIB"); - } - free(reason); -} - - -static void -update_cib_stonith_devices_v1(const char *event, xmlNode * msg) -{ - const char *reason = "none"; - gboolean needs_update = FALSE; - xmlXPathObjectPtr xpath_obj = NULL; - - /* process new constraints */ - xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION); - if (numXpathResults(xpath_obj) > 0) { - int max = numXpathResults(xpath_obj), lpc = 0; - - /* Safest and simplest to always recompute */ - needs_update = TRUE; - reason = "new location constraint"; - - for (lpc = 0; lpc < max; lpc++) { - xmlNode *match = getXpathResult(xpath_obj, lpc); - - crm_log_xml_trace(match, "new constraint"); - } - } - freeXpathObject(xpath_obj); - - /* process deletions */ - xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE); - if (numXpathResults(xpath_obj) > 0) { - remove_cib_device(xpath_obj); - } - freeXpathObject(xpath_obj); - - /* process additions */ - xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE); - if (numXpathResults(xpath_obj) > 0) { - int max = numXpathResults(xpath_obj), lpc = 0; - - for (lpc = 0; lpc < max; lpc++) { - const char *rsc_id = NULL; - const char *standard = NULL; - xmlNode *match = getXpathResult(xpath_obj, lpc); - - rsc_id = crm_element_value(match, XML_ATTR_ID); - standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); - - if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { - continue; - } - - crm_trace("Fencing resource %s was added or modified", rsc_id); - reason = "new resource"; - needs_update = TRUE; - } - } - freeXpathObject(xpath_obj); - - if(needs_update) { - crm_info("Updating device list from CIB: %s", reason); - cib_devices_update(); - } -} - -static void -update_cib_stonith_devices(const char *event, xmlNode * msg) -{ - int format = 1; - xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); - - CRM_ASSERT(patchset); - crm_element_value_int(patchset, "format", &format); - switch(format) { - case 1: - update_cib_stonith_devices_v1(event, msg); - break; - case 2: - update_cib_stonith_devices_v2(event, msg); - break; - default: - crm_warn("Unknown patch format: %d", format); - } -} - -/*! - * \internal - * \brief Check whether a node has a specific attribute name/value - * - * \param[in] node Name of node to check - * \param[in] name Name of an attribute to look for - * \param[in] value The value the named attribute needs to be set to in order to be considered a match - * - * \return TRUE if the locally cached CIB has the specified node attribute - */ -gboolean -node_has_attr(const char *node, const char *name, const char *value) -{ - GString *xpath = NULL; - xmlNode *match; - - CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL) - && (value != NULL), return FALSE); - - /* Search for the node's attributes in the CIB. While the schema allows - * multiple sets of instance attributes, and allows instance attributes to - * use id-ref to reference values elsewhere, that is intended for resources, - * so we ignore that here. - */ - xpath = g_string_sized_new(256); - pcmk__g_strcat(xpath, - "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE - "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS - "/" XML_CIB_TAG_NVPAIR - "[@" XML_NVPAIR_ATTR_NAME "='", name, "' " - "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL); - - match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER); - - g_string_free(xpath, TRUE); - return (match != NULL); -} - /*! * \internal * \brief Check whether a node does watchdog-fencing @@ -1043,201 +448,7 @@ node_does_watchdog_fencing(const char *node) pcmk__str_in_list(node, stonith_watchdog_targets, pcmk__str_casei)); } - -static void -update_fencing_topology(const char *event, xmlNode * msg) -{ - int format = 1; - const char *xpath; - xmlXPathObjectPtr xpathObj = NULL; - xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); - - CRM_ASSERT(patchset); - crm_element_value_int(patchset, "format", &format); - - if(format == 1) { - /* Process deletions (only) */ - xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL; - xpathObj = xpath_search(msg, xpath); - - remove_fencing_topology(xpathObj); - freeXpathObject(xpathObj); - - /* Process additions and changes */ - xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL; - xpathObj = xpath_search(msg, xpath); - - register_fencing_topology(xpathObj); - freeXpathObject(xpathObj); - - } else if(format == 2) { - xmlNode *change = NULL; - int add[] = { 0, 0, 0 }; - int del[] = { 0, 0, 0 }; - - xml_patch_versions(patchset, add, del); - - for (change = pcmk__xml_first_child(patchset); change != NULL; - change = pcmk__xml_next(change)) { - const char *op = crm_element_value(change, XML_DIFF_OP); - const char *xpath = crm_element_value(change, XML_DIFF_PATH); - - if(op == NULL) { - continue; - - } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) { - /* Change to a specific entry */ - - crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); - if(strcmp(op, "move") == 0) { - continue; - - } else if(strcmp(op, "create") == 0) { - add_topology_level(change->children); - - } else if(strcmp(op, "modify") == 0) { - xmlNode *match = first_named_child(change, XML_DIFF_RESULT); - - if(match) { - remove_topology_level(match->children); - add_topology_level(match->children); - } - - } else if(strcmp(op, "delete") == 0) { - /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */ - crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s", - op, add[0], add[1], add[2], xpath); - fencing_topology_init(); - return; - } - - } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) { - /* Change to the topology in general */ - crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s", - op, add[0], add[1], add[2], xpath); - fencing_topology_init(); - return; - - } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) { - /* Changes to the whole config section, possibly including the topology as a whild */ - if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) { - crm_trace("Nothing for us in %s operation %d.%d.%d for %s.", - op, add[0], add[1], add[2], xpath); - - } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) { - crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.", - op, add[0], add[1], add[2], xpath); - fencing_topology_init(); - return; - } - - } else { - crm_trace("Nothing for us in %s operation %d.%d.%d for %s", - op, add[0], add[1], add[2], xpath); - } - } - - } else { - crm_warn("Unknown patch format: %d", format); - } -} -static bool have_cib_devices = FALSE; - -static void -update_cib_cache_cb(const char *event, xmlNode * msg) -{ - int rc = pcmk_ok; - long timeout_ms_saved = stonith_watchdog_timeout_ms; - bool need_full_refresh = false; - - if(!have_cib_devices) { - crm_trace("Skipping updates until we get a full dump"); - return; - - } else if(msg == NULL) { - crm_trace("Missing %s update", event); - return; - } - - /* Maintain a local copy of the CIB so that we have full access - * to device definitions, location constraints, and node attributes - */ - if (local_cib != NULL) { - int rc = pcmk_ok; - xmlNode *patchset = NULL; - - crm_element_value_int(msg, F_CIB_RC, &rc); - if (rc != pcmk_ok) { - return; - } - - patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); - pcmk__output_set_log_level(logger_out, LOG_TRACE); - out->message(out, "xml-patchset", patchset); - rc = xml_apply_patchset(local_cib, patchset, TRUE); - switch (rc) { - case pcmk_ok: - case -pcmk_err_old_data: - break; - case -pcmk_err_diff_resync: - case -pcmk_err_diff_failed: - crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc); - free_xml(local_cib); - local_cib = NULL; - break; - default: - crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc); - free_xml(local_cib); - local_cib = NULL; - } - } - - if (local_cib == NULL) { - crm_trace("Re-requesting full CIB"); - rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call); - if(rc != pcmk_ok) { - crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc); - return; - } - CRM_ASSERT(local_cib != NULL); - need_full_refresh = true; - } - - pcmk__refresh_node_caches_from_cib(local_cib); - update_stonith_watchdog_timeout_ms(local_cib); - - if (timeout_ms_saved != stonith_watchdog_timeout_ms) { - need_full_refresh = true; - } - - if (need_full_refresh) { - fencing_topology_init(); - cib_devices_update(); - } else { - // Partial refresh - update_fencing_topology(event, msg); - update_cib_stonith_devices(event, msg); - } - - watchdog_device_update(); -} - -static void -init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) -{ - crm_info("Updating device list from CIB"); - have_cib_devices = TRUE; - local_cib = copy_xml(output); - - pcmk__refresh_node_caches_from_cib(local_cib); - update_stonith_watchdog_timeout_ms(local_cib); - - fencing_topology_init(); - cib_devices_update(); - watchdog_device_update(); -} - -static void +void stonith_shutdown(int nsig) { crm_info("Terminating with %d clients", pcmk__ipc_client_count()); @@ -1247,29 +458,10 @@ stonith_shutdown(int nsig) } } -static void -cib_connection_destroy(gpointer user_data) -{ - if (stonith_shutdown_flag) { - crm_info("Connection to the CIB manager closed"); - return; - } else { - crm_crit("Lost connection to the CIB manager, shutting down"); - } - if (cib_api) { - cib_api->cmds->signoff(cib_api); - } - stonith_shutdown(0); -} - static void stonith_cleanup(void) { - if (cib_api) { - cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb); - cib_api->cmds->signoff(cib_api); - } - + fenced_cib_cleanup(); if (ipcs) { qb_ipcs_destroy(ipcs); } @@ -1284,9 +476,6 @@ stonith_cleanup(void) free(stonith_our_uname); stonith_our_uname = NULL; - - free_xml(local_cib); - local_cib = NULL; } static gboolean @@ -1298,38 +487,6 @@ stand_alone_cpg_cb(const gchar *option_name, const gchar *optarg, gpointer data, return TRUE; } -static void -setup_cib(void) -{ - int rc, retries = 0; - - cib_api = cib_new(); - if (cib_api == NULL) { - crm_err("No connection to the CIB manager"); - return; - } - - do { - sleep(retries); - rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command); - } while (rc == -ENOTCONN && ++retries < 5); - - if (rc != pcmk_ok) { - crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc); - - } else if (pcmk_ok != - cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) { - crm_err("Could not set CIB notification callback"); - - } else { - rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local); - cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb", - init_cib_cache_cb); - cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy); - crm_info("Watching for fencing topology changes"); - } -} - struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = st_ipc_accept, .connection_created = NULL, @@ -1435,10 +592,11 @@ static pcmk__cluster_option_t fencer_options[] = { "Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.") }, { - "pcmk_reboot_action",NULL, "string", NULL, "reboot", NULL, - N_("Advanced use only: An alternate command to run instead of 'reboot'"), + "pcmk_reboot_action", NULL, "string", NULL, + PCMK_ACTION_REBOOT, NULL, + N_("Advanced use only: An alternate command to run instead of 'reboot'"), N_("Some devices do not support the standard commands or may provide additional ones.\n" - "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.") + "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.") }, { "pcmk_reboot_timeout",NULL, "time", NULL, "60s", NULL, @@ -1454,10 +612,11 @@ static pcmk__cluster_option_t fencer_options[] = { " Use this option to alter the number of times Pacemaker retries \'reboot\' actions before giving up.") }, { - "pcmk_off_action",NULL, "string", NULL, "off", NULL, - N_("Advanced use only: An alternate command to run instead of \'off\'"), + "pcmk_off_action", NULL, "string", NULL, + PCMK_ACTION_OFF, NULL, + N_("Advanced use only: An alternate command to run instead of \'off\'"), N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'off\' action.") + "Use this to specify an alternate, device-specific, command that implements the \'off\' action.") }, { "pcmk_off_timeout",NULL, "time", NULL, "60s", NULL, @@ -1473,10 +632,11 @@ static pcmk__cluster_option_t fencer_options[] = { " Use this option to alter the number of times Pacemaker retries \'off\' actions before giving up.") }, { - "pcmk_on_action",NULL, "string", NULL, "on", NULL, - N_("Advanced use only: An alternate command to run instead of 'on'"), + "pcmk_on_action", NULL, "string", NULL, + PCMK_ACTION_ON, NULL, + N_("Advanced use only: An alternate command to run instead of 'on'"), N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'on\' action.") + "Use this to specify an alternate, device-specific, command that implements the \'on\' action.") }, { "pcmk_on_timeout",NULL, "time", NULL, "60s", NULL, @@ -1492,10 +652,11 @@ static pcmk__cluster_option_t fencer_options[] = { " Use this option to alter the number of times Pacemaker retries \'on\' actions before giving up.") }, { - "pcmk_list_action",NULL, "string", NULL, "list", NULL, - N_("Advanced use only: An alternate command to run instead of \'list\'"), + "pcmk_list_action",NULL, "string", NULL, + PCMK_ACTION_LIST, NULL, + N_("Advanced use only: An alternate command to run instead of \'list\'"), N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'list\' action.") + "Use this to specify an alternate, device-specific, command that implements the \'list\' action.") }, { "pcmk_list_timeout",NULL, "time", NULL, "60s", NULL, @@ -1511,7 +672,8 @@ static pcmk__cluster_option_t fencer_options[] = { " Use this option to alter the number of times Pacemaker retries \'list\' actions before giving up.") }, { - "pcmk_monitor_action",NULL, "string", NULL, "monitor", NULL, + "pcmk_monitor_action", NULL, "string", NULL, + PCMK_ACTION_MONITOR, NULL, N_("Advanced use only: An alternate command to run instead of \'monitor\'"), N_("Some devices do not support the standard commands or may provide additional ones." "Use this to specify an alternate, device-specific, command that implements the \'monitor\' action.") @@ -1530,10 +692,11 @@ static pcmk__cluster_option_t fencer_options[] = { " Use this option to alter the number of times Pacemaker retries \'monitor\' actions before giving up.") }, { - "pcmk_status_action",NULL, "string", NULL, "status", NULL, - N_("Advanced use only: An alternate command to run instead of \'status\'"), + "pcmk_status_action", NULL, "string", NULL, + PCMK_ACTION_STATUS, NULL, + N_("Advanced use only: An alternate command to run instead of \'status\'"), N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'status\' action.") + "Use this to specify an alternate, device-specific, command that implements the \'status\' action.") }, { "pcmk_status_timeout",NULL, "time", NULL, "60s", NULL, @@ -1568,13 +731,13 @@ fencer_metadata(void) static GOptionEntry entries[] = { { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone, - "Deprecated (will be removed in a future release)", NULL }, + N_("Deprecated (will be removed in a future release)"), NULL }, { "stand-alone-w-cpg", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, - stand_alone_cpg_cb, "Intended for use in regression testing only", NULL }, + stand_alone_cpg_cb, N_("Intended for use in regression testing only"), NULL }, { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY, - &options.log_files, "Send logs to the additional named logfile", NULL }, + &options.log_files, N_("Send logs to the additional named logfile"), NULL }, { NULL } }; @@ -1649,7 +812,7 @@ main(int argc, char **argv) goto done; } - if (crm_ipc_connect(old_instance)) { + if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) { // IPC endpoint already up crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); @@ -1665,26 +828,15 @@ main(int argc, char **argv) crm_peer_init(); - fenced_data_set = pe_new_working_set(); - CRM_ASSERT(fenced_data_set != NULL); - - cluster = pcmk_cluster_new(); - - /* Initialize the logger prior to setup_cib(). update_cib_cache_cb() may - * call the "xml-patchset" message function, which needs the logger, after - * setup_cib() has run. - */ - rc = pcmk__log_output_new(&logger_out) != pcmk_rc_ok; + rc = fenced_scheduler_init(); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_FATAL; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - "Error creating output format log: %s", pcmk_rc_str(rc)); + "Error initializing scheduler data: %s", pcmk_rc_str(rc)); goto done; } - pe__register_messages(logger_out); - pcmk__register_lib_messages(logger_out); - pcmk__output_set_log_level(logger_out, LOG_TRACE); - fenced_data_set->priv = logger_out; + + cluster = pcmk_cluster_new(); if (!stand_alone) { #if SUPPORT_COROSYNC @@ -1732,15 +884,10 @@ done: stonith_cleanup(); pcmk_cluster_free(cluster); - pe_free_working_set(fenced_data_set); + fenced_scheduler_cleanup(); pcmk__output_and_clear_error(&error, out); - if (logger_out != NULL) { - logger_out->finish(logger_out, exit_code, true, NULL); - pcmk__output_free(logger_out); - } - if (out != NULL) { out->finish(out, exit_code, true, NULL); pcmk__output_free(out); diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h index a3d2e17..220978a 100644 --- a/daemons/fenced/pacemaker-fenced.h +++ b/daemons/fenced/pacemaker-fenced.h @@ -6,7 +6,12 @@ */ #include // uint32_t, uint64_t +#include // xmlNode + #include +#include +#include +#include /*! * \internal @@ -104,9 +109,12 @@ typedef struct remote_fencing_op_s { * values associated with the devices this fencing operation may call */ gint total_timeout; - /*! Requested fencing delay. - * Value -1 means disable any static/random fencing delays. */ - int delay; + /*! + * Fencing delay (in seconds) requested by API client (used by controller to + * implement priority-fencing-delay). A value of -1 means disable all + * configured delays. + */ + int client_delay; /*! Delegate is the node being asked to perform a fencing action * on behalf of the node that owns the remote operation. Some operations @@ -205,6 +213,8 @@ typedef struct stonith_topology_s { } stonith_topology_t; +void stonith_shutdown(int nsig); + void init_device_list(void); void free_device_list(void); void init_topology_list(void); @@ -231,7 +241,7 @@ void fenced_unregister_level(xmlNode *msg, char **desc, stonith_topology_t *find_topology_for_host(const char *host); -void do_local_reply(xmlNode *notify_src, pcmk__client_t *client, +void do_local_reply(const xmlNode *notify_src, pcmk__client_t *client, int call_options); xmlNode *fenced_construct_reply(const xmlNode *request, xmlNode *data, @@ -280,6 +290,14 @@ gboolean node_has_attr(const char *node, const char *name, const char *value); gboolean node_does_watchdog_fencing(const char *node); +void fencing_topology_init(void); +void setup_cib(void); +void fenced_cib_cleanup(void); + +int fenced_scheduler_init(void); +void fenced_scheduler_cleanup(void); +void fenced_scheduler_run(xmlNode *cib); + static inline void fenced_set_protocol_error(pcmk__action_result_t *result) { @@ -299,7 +317,7 @@ fenced_set_protocol_error(pcmk__action_result_t *result) static inline uint32_t fenced_support_flag(const char *action) { - if (pcmk__str_eq(action, "on", pcmk__str_none)) { + if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { return st_device_supports_on; } return st_device_supports_none; @@ -311,5 +329,6 @@ extern GHashTable *device_list; extern GHashTable *topology; extern long stonith_watchdog_timeout_ms; extern GList *stonith_watchdog_targets; - extern GHashTable *stonith_remote_op_list; +extern crm_exit_t exit_code; +extern gboolean stonith_shutdown_flag; diff --git a/daemons/pacemakerd/Makefile.am b/daemons/pacemakerd/Makefile.am index fc0e014..78e7c37 100644 --- a/daemons/pacemakerd/Makefile.am +++ b/daemons/pacemakerd/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2021 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -25,8 +25,10 @@ noinst_HEADERS = pacemakerd.h pacemakerd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemakerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la $(top_builddir)/lib/common/libcrmcommon.la -pacemakerd_LDADD += $(CLUSTERLIBS) +pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la +pacemakerd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +pacemakerd_LDADD += $(CLUSTERLIBS) + pacemakerd_SOURCES = pacemakerd.c if BUILD_CS_SUPPORT pacemakerd_SOURCES += pcmkd_corosync.c diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c index 9f77ccc..365b743 100644 --- a/daemons/pacemakerd/pacemakerd.c +++ b/daemons/pacemakerd/pacemakerd.c @@ -92,7 +92,7 @@ pid_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **er static gboolean standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.standby = TRUE; - pcmk__set_env_option("node_start_state", "standby"); + pcmk__set_env_option(PCMK__ENV_NODE_START_STATE, "standby", false); return TRUE; } @@ -129,7 +129,7 @@ pcmk_sigquit(int nsig) } static void -mcp_chown(const char *path, uid_t uid, gid_t gid) +pacemakerd_chown(const char *path, uid_t uid, gid_t gid) { int rc = chown(path, uid, gid); @@ -166,7 +166,7 @@ create_pcmk_dirs(void) crm_warn("Could not create directory " CRM_STATE_DIR ": %s", pcmk_rc_str(errno)); } else { - mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid); + pacemakerd_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid); } for (int i = 0; dirs[i] != NULL; ++i) { @@ -176,7 +176,7 @@ create_pcmk_dirs(void) crm_warn("Could not create directory %s: %s", dirs[i], pcmk_rc_str(rc)); } else { - mcp_chown(dirs[i], pcmk_uid, pcmk_gid); + pacemakerd_chown(dirs[i], pcmk_uid, pcmk_gid); } } } @@ -312,7 +312,8 @@ main(int argc, char **argv) goto done; } - pcmk__set_env_option("mcp", "true"); + // @COMPAT Drop at 3.0.0; likely last used in 1.1.24 + pcmk__set_env_option(PCMK__ENV_MCP, "true", true); if (options.shutdown) { pcmk__cli_init_logging("pacemakerd", args->verbosity); @@ -330,7 +331,11 @@ main(int argc, char **argv) } pcmk_register_ipc_callback(old_instance, pacemakerd_event_cb, NULL); - rc = pcmk_connect_ipc(old_instance, pcmk_ipc_dispatch_sync); + rc = pcmk__connect_ipc(old_instance, pcmk_ipc_dispatch_sync, 2); + if (rc != pcmk_rc_ok) { + crm_debug("No existing %s instance found: %s", + pcmk_ipc_name(old_instance, true), pcmk_rc_str(rc)); + } old_instance_connected = pcmk_ipc_is_connected(old_instance); if (options.shutdown) { @@ -388,7 +393,7 @@ main(int argc, char **argv) } #ifdef SUPPORT_COROSYNC - if (mcp_read_config() == FALSE) { + if (pacemakerd_read_config() == FALSE) { crm_exit(CRM_EX_UNAVAILABLE); } #endif @@ -399,7 +404,7 @@ main(int argc, char **argv) if (!pcmk__str_eq(facility, PCMK__VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches)) { - setenv("HA_LOGFACILITY", facility, 1); + pcmk__set_env_option("LOGFACILITY", facility, true); } } @@ -409,7 +414,7 @@ main(int argc, char **argv) remove_core_file_limit(); create_pcmk_dirs(); - pcmk__serve_pacemakerd_ipc(&ipcs, &mcp_ipc_callbacks); + pcmk__serve_pacemakerd_ipc(&ipcs, &pacemakerd_ipc_callbacks); #ifdef SUPPORT_COROSYNC /* Allows us to block shutdown */ @@ -420,10 +425,7 @@ main(int argc, char **argv) #endif if (pcmk__locate_sbd() > 0) { - setenv("PCMK_watchdog", "true", 1); running_with_sbd = TRUE; - } else { - setenv("PCMK_watchdog", "false", 1); } switch (find_and_track_existing_processes()) { diff --git a/daemons/pacemakerd/pacemakerd.h b/daemons/pacemakerd/pacemakerd.h index b2a6864..ee6facf 100644 --- a/daemons/pacemakerd/pacemakerd.h +++ b/daemons/pacemakerd/pacemakerd.h @@ -1,5 +1,5 @@ /* - * Copyright 2010-2022 the Pacemaker project contributors + * Copyright 2010-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,7 @@ #define MAX_RESPAWN 100 extern GMainLoop *mainloop; -extern struct qb_ipcs_service_handlers mcp_ipc_callbacks; +extern struct qb_ipcs_service_handlers pacemakerd_ipc_callbacks; extern const char *pacemakerd_state; extern gboolean running_with_sbd; extern unsigned int shutdown_complete_state_reported_to; @@ -23,7 +23,7 @@ extern crm_trigger_t *shutdown_trigger; extern crm_trigger_t *startup_trigger; extern time_t subdaemon_check_progress; -gboolean mcp_read_config(void); +gboolean pacemakerd_read_config(void); gboolean cluster_connect_cfg(void); void cluster_disconnect_cfg(void); diff --git a/daemons/pacemakerd/pcmkd_corosync.c b/daemons/pacemakerd/pcmkd_corosync.c index 2648756..8a1a867 100644 --- a/daemons/pacemakerd/pcmkd_corosync.c +++ b/daemons/pacemakerd/pcmkd_corosync.c @@ -1,5 +1,5 @@ /* - * Copyright 2010-2022 the Pacemaker project contributors + * Copyright 2010-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -82,7 +82,7 @@ cluster_reconnect_cb(gpointer data) mainloop_timer_del(reconnect_timer); reconnect_timer = NULL; crm_notice("Cluster reconnect succeeded"); - mcp_read_config(); + pacemakerd_read_config(); restart_cluster_subdaemons(); return G_SOURCE_REMOVE; } else { @@ -260,7 +260,7 @@ get_config_opt(uint64_t unused, cmap_handle_t object_handle, const char *key, ch } gboolean -mcp_read_config(void) +pacemakerd_read_config(void) { cs_error_t rc = CS_OK; int retries = 0; @@ -327,8 +327,10 @@ mcp_read_config(void) crm_info("Reading configuration for %s stack", name_for_cluster_type(stack)); - pcmk__set_env_option(PCMK__ENV_CLUSTER_TYPE, "corosync"); - pcmk__set_env_option(PCMK__ENV_QUORUM_TYPE, "corosync"); + pcmk__set_env_option(PCMK__ENV_CLUSTER_TYPE, "corosync", true); + + // @COMPAT Drop at 3.0.0; added unused in 1.1.9 + pcmk__set_env_option(PCMK__ENV_QUORUM_TYPE, "corosync", true); // If debug logging is not configured, check whether corosync has it if (pcmk__env_option(PCMK__ENV_DEBUG) == NULL) { @@ -337,13 +339,13 @@ mcp_read_config(void) get_config_opt(config, local_handle, "logging.debug", &debug_enabled, "off"); if (crm_is_true(debug_enabled)) { - pcmk__set_env_option(PCMK__ENV_DEBUG, "1"); + pcmk__set_env_option(PCMK__ENV_DEBUG, "1", true); if (get_crm_log_level() < LOG_DEBUG) { set_crm_log_level(LOG_DEBUG); } } else { - pcmk__set_env_option(PCMK__ENV_DEBUG, "0"); + pcmk__set_env_option(PCMK__ENV_DEBUG, "0", true); } free(debug_enabled); diff --git a/daemons/pacemakerd/pcmkd_messages.c b/daemons/pacemakerd/pcmkd_messages.c index 7ed9899..4e6f822 100644 --- a/daemons/pacemakerd/pcmkd_messages.c +++ b/daemons/pacemakerd/pcmkd_messages.c @@ -269,7 +269,7 @@ pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) return 0; } -struct qb_ipcs_service_handlers mcp_ipc_callbacks = { +struct qb_ipcs_service_handlers pacemakerd_ipc_callbacks = { .connection_accept = pcmk_ipc_accept, .connection_created = NULL, .msg_process = pcmk_ipc_dispatch, diff --git a/daemons/pacemakerd/pcmkd_subdaemons.c b/daemons/pacemakerd/pcmkd_subdaemons.c index 3b08ecc..21e432e 100644 --- a/daemons/pacemakerd/pcmkd_subdaemons.c +++ b/daemons/pacemakerd/pcmkd_subdaemons.c @@ -1,5 +1,5 @@ /* - * Copyright 2010-2022 the Pacemaker project contributors + * Copyright 2010-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -307,7 +307,7 @@ pcmk_process_exit(pcmk_child_t * child) } else if (!child->respawn) { /* nothing to do */ - } else if (crm_is_true(getenv("PCMK_fail_fast"))) { + } else if (crm_is_true(pcmk__env_option(PCMK__ENV_FAIL_FAST))) { crm_err("Rebooting system because of %s", child->name); pcmk__panic(__func__); @@ -353,8 +353,8 @@ pcmk_shutdown_worker(gpointer user_data) " if it vitally depends on some other daemons" " going down in a controlled way already," " or locate and kill the correct %s process" - " on your own; set PCMK_fail_fast=1 to avoid" - " this altogether next time around", + " on your own; set PCMK_" PCMK__ENV_FAIL_FAST "=1" + " to avoid this altogether next time around", child->name, (long) SHUTDOWN_ESCALATION_PERIOD, child->command); } @@ -389,6 +389,7 @@ pcmk_shutdown_worker(gpointer user_data) return TRUE; } + // @COMPAT Drop shutdown delay at 3.0.0 { const char *delay = pcmk__env_option(PCMK__ENV_SHUTDOWN_DELAY); if(delay) { @@ -423,8 +424,8 @@ start_child(pcmk_child_t * child) gid_t gid = 0; gboolean use_valgrind = FALSE; gboolean use_callgrind = FALSE; - const char *env_valgrind = getenv("PCMK_valgrind_enabled"); - const char *env_callgrind = getenv("PCMK_callgrind_enabled"); + const char *env_valgrind = pcmk__env_option(PCMK__ENV_VALGRIND_ENABLED); + const char *env_callgrind = pcmk__env_option(PCMK__ENV_CALLGRIND_ENABLED); child->active_before_startup = false; child->check_count = 0; @@ -712,14 +713,16 @@ find_and_track_existing_processes(void) continue; } + // @TODO Functionize more of this to reduce nesting pcmk_children[i].respawn_count = rounds; switch (rc) { case pcmk_rc_ok: if (pcmk_children[i].pid == PCMK__SPECIAL_PID) { - if (crm_is_true(getenv("PCMK_fail_fast"))) { + if (crm_is_true(pcmk__env_option(PCMK__ENV_FAIL_FAST))) { crm_crit("Cannot reliably track pre-existing" " authentic process behind %s IPC on this" - " platform and PCMK_fail_fast requested", + " platform and PCMK_" PCMK__ENV_FAIL_FAST + " requested", pcmk_children[i].endpoint); return EOPNOTSUPP; } else if (pcmk_children[i].respawn_count == WAIT_TRIES) { @@ -727,9 +730,9 @@ find_and_track_existing_processes(void) " on this platform untrackable, process" " behind %s IPC is stable (was in %d" " previous samples) so rather than" - " bailing out (PCMK_fail_fast not" - " requested), we just switch to a less" - " optimal IPC liveness monitoring" + " bailing out (PCMK_" PCMK__ENV_FAIL_FAST + " not requested), we just switch to a" + " less optimal IPC liveness monitoring" " (not very suitable for heavy load)", pcmk_children[i].name, WAIT_TRIES - 1); crm_warn("The process behind %s IPC cannot be" @@ -822,7 +825,7 @@ init_children_processes(void *user_data) * * This may be useful for the daemons to know */ - setenv("PCMK_respawned", "true", 1); + pcmk__set_env_option(PCMK__ENV_RESPAWNED, "true", false); pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_RUNNING; return TRUE; } diff --git a/daemons/schedulerd/Makefile.am b/daemons/schedulerd/Makefile.am index 57e819b..fab8e1a 100644 --- a/daemons/schedulerd/Makefile.am +++ b/daemons/schedulerd/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2021 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -10,7 +10,8 @@ include $(top_srcdir)/mk/common.mk include $(top_srcdir)/mk/man.mk -AM_CPPFLAGS += -I$(top_builddir) -I$(top_srcdir) +AM_CPPFLAGS += -I$(top_builddir) \ + -I$(top_srcdir) halibdir = $(CRM_DAEMON_DIR) @@ -26,27 +27,34 @@ endif noinst_HEADERS = pacemaker-schedulerd.h -pacemaker_schedulerd_CFLAGS = $(CFLAGS_HARDENED_EXE) +pacemaker_schedulerd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_schedulerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) -pacemaker_schedulerd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la +pacemaker_schedulerd_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +pacemaker_schedulerd_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +pacemaker_schedulerd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la + # libcib for get_object_root() pacemaker_schedulerd_SOURCES = pacemaker-schedulerd.c pacemaker_schedulerd_SOURCES += schedulerd_messages.c +.PHONY: install-exec-local install-exec-local: $(INSTALL) -d -m 750 $(DESTDIR)/$(PE_STATE_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(PE_STATE_DIR) -if BUILD_LEGACY_LINKS +.PHONY: install-exec-hook install-exec-hook: +if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f pengine && $(LN_S) pacemaker-schedulerd pengine +endif +.PHONY: uninstall-hook uninstall-hook: +if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f pengine endif +.PHONY: uninstall-local uninstall-local: -rmdir $(DESTDIR)/$(PE_STATE_DIR) diff --git a/daemons/schedulerd/pacemaker-schedulerd.h b/daemons/schedulerd/pacemaker-schedulerd.h index cbb07e1..75b7d38 100644 --- a/daemons/schedulerd/pacemaker-schedulerd.h +++ b/daemons/schedulerd/pacemaker-schedulerd.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,7 +11,7 @@ #define PCMK__PACEMAKER_SCHEDULERD__H #include -#include +#include extern pcmk__output_t *logger_out; extern pcmk__output_t *out; diff --git a/daemons/schedulerd/schedulerd_messages.c b/daemons/schedulerd/schedulerd_messages.c index 1c124d2..5a97365 100644 --- a/daemons/schedulerd/schedulerd_messages.c +++ b/daemons/schedulerd/schedulerd_messages.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -22,12 +22,12 @@ static GHashTable *schedulerd_handlers = NULL; -static pe_working_set_t * +static pcmk_scheduler_t * init_working_set(void) { - pe_working_set_t *data_set = pe_new_working_set(); + pcmk_scheduler_t *scheduler = pe_new_working_set(); - CRM_ASSERT(data_set != NULL); + CRM_ASSERT(scheduler != NULL); crm_config_error = FALSE; crm_config_warning = FALSE; @@ -35,8 +35,8 @@ init_working_set(void) was_processing_error = FALSE; was_processing_warning = FALSE; - data_set->priv = logger_out; - return data_set; + scheduler->priv = logger_out; + return scheduler; } static xmlNode * @@ -72,7 +72,7 @@ handle_pecalc_request(pcmk__request_t *request) xmlNode *reply = NULL; bool is_repoke = false; bool process = true; - pe_working_set_t *data_set = init_working_set(); + pcmk_scheduler_t *scheduler = init_working_set(); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, "ack", NULL, CRM_EX_INDETERMINATE); @@ -81,9 +81,9 @@ handle_pecalc_request(pcmk__request_t *request) CRM_FEATURE_SET); converted = copy_xml(xml_data); if (!cli_config_update(&converted, NULL, TRUE)) { - data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH); - crm_xml_add_int(data_set->graph, "transition_id", 0); - crm_xml_add_int(data_set->graph, "cluster-delay", 0); + scheduler->graph = create_xml_node(NULL, XML_TAG_GRAPH); + crm_xml_add_int(scheduler->graph, "transition_id", 0); + crm_xml_add_int(scheduler->graph, "cluster-delay", 0); process = false; free(digest); @@ -98,9 +98,9 @@ handle_pecalc_request(pcmk__request_t *request) if (process) { pcmk__schedule_actions(converted, - pe_flag_no_counts - |pe_flag_no_compat - |pe_flag_show_utilization, data_set); + pcmk_sched_no_counts + |pcmk_sched_no_compat + |pcmk_sched_show_utilization, scheduler); } // Get appropriate index into series[] array @@ -112,7 +112,7 @@ handle_pecalc_request(pcmk__request_t *request) series_id = 2; } - value = pe_pref(data_set->config_hash, series[series_id].param); + value = pe_pref(scheduler->config_hash, series[series_id].param); if ((value == NULL) || (pcmk__scan_min_int(value, &series_wrap, -1) != pcmk_rc_ok)) { series_wrap = series[series_id].wrap; @@ -126,8 +126,8 @@ handle_pecalc_request(pcmk__request_t *request) crm_trace("Series %s: wrap=%d, seq=%u, pref=%s", series[series_id].name, series_wrap, seq, value); - data_set->input = NULL; - reply = create_reply(msg, data_set->graph); + scheduler->input = NULL; + reply = create_reply(msg, scheduler->graph); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, @@ -172,7 +172,7 @@ handle_pecalc_request(pcmk__request_t *request) done: free_xml(converted); - pe_free_working_set(data_set); + pe_free_working_set(scheduler); return reply; } diff --git a/devel/Makefile.am b/devel/Makefile.am index 94581e1..b50f097 100644 --- a/devel/Makefile.am +++ b/devel/Makefile.am @@ -35,10 +35,12 @@ COCCI_FILES ?= coccinelle/string-any-of.cocci \ dist_noinst_SCRIPTS = coccinelle/test/testrunner.sh -EXTRA_DIST = README gdbhelpers $(COCCI_FILES) \ - coccinelle/ref-passed-variables-inited.cocci \ - coccinelle/rename-fn.cocci \ - coccinelle/test/ref-passed-variables-inited.input.c \ +EXTRA_DIST = README \ + gdbhelpers \ + $(COCCI_FILES) \ + coccinelle/ref-passed-variables-inited.cocci \ + coccinelle/rename-fn.cocci \ + coccinelle/test/ref-passed-variables-inited.input.c \ coccinelle/test/ref-passed-variables-inited.output # Any file in this list is allowed to use any of the pcmk__ internal functions. @@ -51,6 +53,7 @@ MAY_USE_INTERNAL_FILES = $(shell find .. -path "../lib/*.c" -o -path "../lib/*pr # may be applied. OTHER_FILES = $(shell find ../include -name '*h' -a \! -name '*internal.h' -a \! -path '../include/pcmki/*') +.PHONY: cocci cocci: -for cf in $(COCCI_FILES); do \ for f in $(MAY_USE_INTERNAL_FILES); do \ @@ -61,9 +64,11 @@ cocci: done ; \ done +.PHONY: cocci-inplace cocci-inplace: $(MAKE) $(AM_MAKEFLAGS) _SPATCH_FLAGS=--in-place cocci +.PHONY: cocci-test cocci-test: for f in coccinelle/test/*.c; do \ coccinelle/test/testrunner.sh $$f; \ @@ -78,6 +83,7 @@ cocci-test: # See scan-build(1) for possible checkers (leave empty to use default set) CLANG_checkers ?= +.PHONY: clang clang: OUT=$$(cd $(top_builddir) \ && scan-build $(CLANG_checkers:%=-enable-checker %) \ @@ -158,6 +164,8 @@ coverity-clean: ## cppcheck +GLIB_CFLAGS ?= $(pkg-config --cflags glib-2.0) + # Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.: # --enable={warning,style,performance,portability,information,all} # --inconclusive --std=posix @@ -167,6 +175,7 @@ CPPCHECK_ARGS ?= CPPCHECK_DIRS = replace lib daemons tools CPPCHECK_OUT = $(abs_top_builddir)/cppcheck.out +.PHONY: cppcheck cppcheck: cppcheck $(CPPCHECK_ARGS) -I $(top_srcdir)/include \ --output-file=$(CPPCHECK_OUT) \ @@ -191,21 +200,26 @@ COVERAGE_DIR = $(top_builddir)/coverage .PHONY: coverage coverage: coverage-partial-clean cd $(top_builddir) \ - && $(MAKE) $(AM_MAKEFLAGS) core \ + && $(MAKE) $(AM_MAKEFLAGS) \ && lcov --no-external --exclude='*_test.c' -c -i -d . \ -o pacemaker_base.info \ && $(MAKE) $(AM_MAKEFLAGS) check \ && lcov --no-external --exclude='*_test.c' -c -d . \ -o pacemaker_test.info \ && lcov -a pacemaker_base.info -a pacemaker_test.info \ - -o pacemaker_total.info - genhtml $(top_builddir)/pacemaker_total.info -o $(COVERAGE_DIR) -s + -o pacemaker_total.info \ + && lcov --remove pacemaker_total.info -o pacemaker_filtered.info\ + "$(abs_top_builddir)/tools/*" \ + "$(abs_top_builddir)/daemons/*/*" \ + "$(abs_top_builddir)/replace/*" \ + "$(abs_top_builddir)/lib/gnu/*" + genhtml $(top_builddir)/pacemaker_filtered.info -o $(COVERAGE_DIR) -s -t "Pacemaker code coverage" # Check coverage of CLI regression tests .PHONY: coverage-cts coverage-cts: coverage-partial-clean cd $(top_builddir) \ - && $(MAKE) $(AM_MAKEFLAGS) core \ + && $(MAKE) $(AM_MAKEFLAGS) \ && lcov --no-external -c -i -d tools -o pacemaker_base.info \ && cts/cts-cli \ && lcov --no-external -c -d tools -o pacemaker_test.info \ @@ -277,12 +291,37 @@ INDENT_PACEMAKER_STYLE = --blank-lines-after-declarations \ --swallow-optional-blank-lines \ --tab-size8 +.PHONY: indent indent: VERSION_CONTROL=none \ find $(INDENT_DIRS) -type f -name "*.[ch]" \ $(INDENT_IGNORE_PATHS:%= ! -path '%') \ -exec indent $(INDENT_PACEMAKER_STYLE) $(INDENT_OPTS) \{\} \; +# +# Check whether copyrights have been updated appropriately +# (Set COMMIT to desired commit or commit range to check, defaulting to HEAD, +# or set it empty to check uncommitted changes) +# +YEAR = $(shell date +%Y) +MODIFIED_FILES = $(shell case "$(COMMIT)" in \ + [0-9a-f]*$(rparen) \ + git diff-tree --no-commit-id \ + --name-only "$(COMMIT)" -r ;; \ + *$(rparen) \ + cd "$(top_srcdir)"; \ + git ls-files --modified ;; \ + esac) + +.PHONY: copyright +copyright: + @cd "$(top_srcdir)" && for file in $(MODIFIED_FILES); do \ + if ! grep 'opyright .*$(YEAR).* Pacemaker' "$$file" \ + >/dev/null 2>&1; then \ + echo "$$file"; \ + fi; \ + done + # # Scratch file for ad-hoc testing # @@ -291,5 +330,6 @@ EXTRA_PROGRAMS = scratch nodist_scratch_SOURCES = scratch.c scratch_LDADD = $(top_builddir)/lib/common/libcrmcommon.la +.PHONY: clean-local clean-local: coverage-clean coverity-clean cppcheck-clean -rm -f $(EXTRA_PROGRAMS) diff --git a/doc/Makefile.am b/doc/Makefile.am index 1400145..a40ddfe 100644 --- a/doc/Makefile.am +++ b/doc/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2003-2021 the Pacemaker project contributors +# Copyright 2003-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -13,7 +13,10 @@ include $(top_srcdir)/mk/release.mk # What formats to use for book uploads (i.e. "make www"; # use BOOK_FORMATS in sphinx subdirectory to change local builds) -BOOK_FORMATS ?= html singlehtml pdf epub +BOOK_FORMATS ?= html \ + singlehtml \ + pdf \ + epub # SNMP MIB mibdir = $(datadir)/snmp/mibs @@ -25,7 +28,8 @@ DEPRECATED_GENERATED = if BUILD_ASCIIDOC DEPRECATED_GENERATED += $(DEPRECATED_ORIGINAL:%.txt=%.html) endif -DEPRECATED_ALL = $(DEPRECATED_ORIGINAL) $(DEPRECATED_GENERATED) +DEPRECATED_ALL = $(DEPRECATED_ORIGINAL) \ + $(DEPRECATED_GENERATED) doc_DATA = $(DEPRECATED_ALL) noinst_SCRIPTS = abi-check @@ -73,14 +77,17 @@ deprecated-clean: # Annotated source code as HTML # Cleaning first ensures we don't index unrelated stuff like RPM sources +.PHONY: global global: $(MAKE) $(AM_MAKEFLAGS) -C .. clean-generic $(MAKE) $(AM_MAKEFLAGS) -C ../rpm rpm-clean cd .. && gtags -q && htags -sanhIT doc +.PHONY: global-upload global-upload: global rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/$(PACKAGE)/global/$(TAG)/" +.PHONY: global-clean global-clean: -rm -rf HTML @@ -93,43 +100,53 @@ global-clean: %.7.html: %.7 groff -mandoc `man -w ./$<` -T html > $@ +.PHONY: manhtml manhtml: $(MAKE) $(AM_MAKEFLAGS) -C .. all find .. -name "[a-z]*.[78]" -exec $(MAKE) $(AM_MAKEFLAGS) \{\}.html \; +.PHONY: manhtml-upload manhtml-upload: manhtml find .. -name "[a-z]*.[78].html" -exec \ rsync $(RSYNC_OPTS) \{\} "$(RSYNC_DEST)/$(PACKAGE)/man/" \; +.PHONY: manhtml-clean manhtml-clean: -find .. -name "[a-z]*.[78].html" -exec rm \{\} \; # API documentation as HTML +.PHONY: doxygen doxygen: Doxyfile doxygen Doxyfile +.PHONY: doxygen-upload doxygen-upload: doxygen rsync $(RSYNC_OPTS) api/html/ "$(RSYNC_DEST)/$(PACKAGE)/doxygen/$(TAG)/" +.PHONY: doxygen-clean doxygen-clean: -rm -rf api # ABI compatibility report as HTML +.PHONY: abi abi: abi-check ./abi-check $(PACKAGE) $(LAST_RELEASE) $(TAG) +.PHONY: abi-www abi-www: export RSYNC_DEST=$(RSYNC_DEST); ./abi-check -u $(PACKAGE) $(LAST_RELEASE) $(TAG) +.PHONY: abi-clean abi-clean: -rm -rf abi_dumps compat_reports # The main documentation books (which are actually in the sphinx subdirectory) +.PHONY: books-upload books-upload: $(MAKE) $(AM_MAKEFLAGS) -C sphinx clean $(MAKE) $(AM_MAKEFLAGS) -C sphinx \ @@ -142,11 +159,13 @@ books-upload: .PHONY: www www: clean-local deprecated-upload manhtml-upload global-upload doxygen-upload books-upload +.PHONY: clean-local clean-local: global-clean manhtml-clean doxygen-clean abi-clean deprecated-clean # "make check" will cause "make all" to be run, which means docs will get built # as a part of running tests if they haven't already. That seems unnecessary, so # override the default check-recursive rule with this one that just returns. If # we ever need to add tests to this directory, this rule will have to come out. +.PHONY: check-recursive check-recursive: @true diff --git a/doc/abi-check.in b/doc/abi-check.in index 5a5e253..6b6a8d3 100755 --- a/doc/abi-check.in +++ b/doc/abi-check.in @@ -1,6 +1,6 @@ #!@BASH_PATH@ # -# Copyright 2011-2022 the Pacemaker project contributors +# Copyright 2011-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -29,6 +29,12 @@ tag() { fi } +sed_in_place() { + cp -p "$1" "$1.$$" + sed -e "$2" "$1" > "$1.$$" + mv "$1.$$" "$1" +} + # Strip anything up to and including a dash from the argument version() { echo "$1" | sed s:.*-:: @@ -103,7 +109,7 @@ extract_one() { # Remove "doc" from SUBDIRS in Makefile (but why?) BUILD_ROOT="$(pwd)/$BUILD_ROOT" - sed -i.sed 's: doc::' "$BUILD_ROOT/Makefile.am" + sed_in_place "$BUILD_ROOT/Makefile.am" 's: doc::' # Run ABI dump abi_config "$PACKAGE" "$VERSION" "$BUILD_ROOT" "$DESC" diff --git a/doc/sphinx/Clusters_from_Scratch/apache.rst b/doc/sphinx/Clusters_from_Scratch/apache.rst index e4eddff..c5c155e 100644 --- a/doc/sphinx/Clusters_from_Scratch/apache.rst +++ b/doc/sphinx/Clusters_from_Scratch/apache.rst @@ -316,7 +316,7 @@ have to worry about whether you can handle the load after a failover. To do this, we create a location constraint. In the location constraint below, we are saying the ``WebSite`` resource -prefers the node ``pcmk-1`` with a score of ``50``. Here, the score indicates +prefers the node ``pcmk-2`` with a score of ``50``. Here, the score indicates how strongly we'd like the resource to run at this location. .. code-block:: console diff --git a/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst b/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst index 0a7a7a5..437b5f8 100644 --- a/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst +++ b/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst @@ -114,14 +114,14 @@ Start and enable the daemon by issuing the following commands on each node: # systemctl enable pcsd.service Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service. -The installed packages will create an ``hacluster`` user with a disabled password. -While this is fine for running ``pcs`` commands locally, +The installed packages will create an |CRM_DAEMON_USER| user with a disabled +password. While this is fine for running ``pcs`` commands locally, the account needs a login password in order to perform such tasks as syncing the Corosync configuration, or starting and stopping the cluster on other nodes. This tutorial will make use of such commands, -so now we will set a password for the ``hacluster`` user, using the same password -on both nodes: +so now we will set a password for the |CRM_DAEMON_USER| user, using the same +password on both nodes: .. code-block:: console diff --git a/doc/sphinx/Makefile.am b/doc/sphinx/Makefile.am index c4ade5c..e48e19a 100644 --- a/doc/sphinx/Makefile.am +++ b/doc/sphinx/Makefile.am @@ -55,7 +55,8 @@ DOTS = $(wildcard shared/images/*.dot) # Vector sources for generated PNGs (including SVG equivalents of DOTS, created # manually using dot) -SVGS = $(wildcard shared/images/pcmk-*.svg) $(DOTS:%.dot=%.svg) +SVGS = $(wildcard shared/images/pcmk-*.svg) \ + $(DOTS:%.dot=%.svg) # PNG images generated from SVGS # @@ -71,28 +72,33 @@ PNGS_Pacemaker_Remote = $(wildcard Pacemaker_Remote/images/*.png) STATIC_FILES = $(wildcard _static/*.css) -EXTRA_DIST = $(wildcard */*.rst) $(DOTS) $(SVGS) \ - $(PNGS_Clusters_from_Scratch) \ - $(PNGS_Pacemaker_Explained) \ - $(PNGS_Pacemaker_Remote) \ - $(wildcard Pacemaker_Python_API/_templates/*rst) \ - $(STATIC_FILES) \ +EXTRA_DIST = $(wildcard */*.rst) $(DOTS) $(SVGS) \ + $(PNGS_Clusters_from_Scratch) \ + $(PNGS_Pacemaker_Explained) \ + $(PNGS_Pacemaker_Remote) \ + $(wildcard Pacemaker_Python_API/_templates/*rst) \ + $(STATIC_FILES) \ conf.py.in # recursive, preserve symlinks/permissions/times, verbose, compress, # don't cross filesystems, sparse, show progress RSYNC_OPTS = -rlptvzxS --progress +PACKAGE_SERIES=$(shell echo "$VERSION" | awk -F. '{ print $1"."$2 }') + BOOK_RSYNC_DEST = $(RSYNC_DEST)/$(PACKAGE)/doc/$(PACKAGE_SERIES) BOOK = none -DEPS_intro = shared/pacemaker-intro.rst $(PNGS_GENERATED) +DEPS_intro = shared/pacemaker-intro.rst \ + $(PNGS_GENERATED) -DEPS_Clusters_from_Scratch = $(DEPS_intro) $(PNGS_Clusters_from_Scratch) +DEPS_Clusters_from_Scratch = $(DEPS_intro) \ + $(PNGS_Clusters_from_Scratch) DEPS_Pacemaker_Administration = $(DEPS_intro) DEPS_Pacemaker_Development = -DEPS_Pacemaker_Explained = $(DEPS_intro) $(PNGS_Pacemaker_Explained) +DEPS_Pacemaker_Explained = $(DEPS_intro) \ + $(PNGS_Pacemaker_Explained) DEPS_Pacemaker_Python_API = ../../python DEPS_Pacemaker_Remote = $(PNGS_Pacemaker_Remote) @@ -120,6 +126,14 @@ $(BOOKS:%=%/conf.py): conf.py.in -e 's/%BOOK_TITLE%/$(subst _, ,$(@:%/conf.py=%))/g' \ -e 's#%SRC_DIR%#$(abs_srcdir)#g' \ -e 's#%ABS_TOP_SRCDIR%#$(abs_top_srcdir)#g' \ + -e 's#%CONFIGDIR%#@CONFIGDIR@#g' \ + -e 's#%CRM_BLACKBOX_DIR%#@CRM_BLACKBOX_DIR@#g' \ + -e 's#%CRM_DAEMON_GROUP%#@CRM_DAEMON_GROUP@#g' \ + -e 's#%CRM_DAEMON_USER%#@CRM_DAEMON_USER@#g' \ + -e 's#%CRM_LOG_DIR%#@CRM_LOG_DIR@#g' \ + -e 's#%CRM_SCHEMA_DIRECTORY%#@CRM_SCHEMA_DIRECTORY@#g' \ + -e 's#%PACEMAKER_CONFIG_DIR%#@PACEMAKER_CONFIG_DIR@#g' \ + -e 's#%PCMK_GNUTLS_PRIORITIES%#@PCMK_GNUTLS_PRIORITIES@#g' \ $(<) > "$@" $(BOOK)/_build: $(STATIC_FILES) $(BOOK)/conf.py $(DEPS_$(BOOK)) $(wildcard $(srcdir)/$(BOOK)/*.rst) @@ -160,15 +174,21 @@ if BUILD_SPHINX_DOCS done @rsync $(RSYNC_OPTS) "$(builddir)/build-$(PACKAGE_SERIES).txt" \ "$(RSYNC_DEST)/$(PACKAGE)/doc" +endif +.PHONY: all-local all-local: +if BUILD_SPHINX_DOCS @for book in $(BOOKS); do \ $(MAKE) $(AM_MAKEFLAGS) BOOK=$$book \ PAPER="$(PAPER)" SPHINXFLAGS="$(SPHINXFLAGS)" \ BOOK_FORMATS="$(BOOK_FORMATS)" $$book/_build; \ done +endif +.PHONY: install-data-local install-data-local: all-local +if BUILD_SPHINX_DOCS $(AM_V_at)for book in $(BOOKS); do \ for format in $(BOOK_FORMATS); do \ formatdir="$$book/_build/$$format"; \ @@ -183,13 +203,17 @@ install-data-local: all-local done; \ done; \ done +endif +.PHONY: uninstall-local uninstall-local: +if BUILD_SPHINX_DOCS $(AM_V_at)for book in $(BOOKS); do \ rm -rf "$(DESTDIR)/$(docdir)/$$book"; \ done endif +.PHONY: clean-local clean-local: $(AM_V_at)-rm -rf \ $(BOOKS:%="$(builddir)/%/_build") \ diff --git a/doc/sphinx/Pacemaker_Administration/administrative.rst b/doc/sphinx/Pacemaker_Administration/administrative.rst new file mode 100644 index 0000000..7c8b346 --- /dev/null +++ b/doc/sphinx/Pacemaker_Administration/administrative.rst @@ -0,0 +1,150 @@ +.. index:: + single: administrative mode + +Administrative Modes +-------------------- + +Intrusive administration can be performed on a Pacemaker cluster without +causing resource failures, recovery, and fencing, by putting the cluster or a +subset of it into an administrative mode. + +Pacemaker supports several administrative modes: + +* Maintenance mode for the entire cluster, specific nodes, or specific + resources +* Unmanaged resources +* Disabled configuration items +* Standby mode for specific nodes + +Rules may be used to automatically set any of these modes for specific times or +other conditions. + + +.. index:: + pair: administrative mode; maintenance mode + +.. _maintenance_mode: + +Maintenance Mode +################ + +In maintenance mode, the cluster will not start or stop resources. Recurring +monitors for affected resources will be paused, except those specifying +``role`` as ``Stopped``. + +To put a specific resource into maintenance mode, set the resource's +``maintenance`` meta-attribute to ``true``. + +To put all active resources on a specific node into maintenance mode, set the +node's ``maintenance`` node attribute to ``true``. When enabled, this overrides +resource-specific maintenance mode. + +.. warning:: + + Restarting Pacemaker on a node that is in single-node maintenance mode will + likely lead to undesirable effects. If ``maintenance`` is set as a transient + attribute, it will be erased when Pacemaker is stopped, which will + immediately take the node out of maintenance mode and likely get it fenced. + If set as a permanent attribute, any resources active on the node will have + their local history erased when Pacemaker is restarted, so the cluster will + no longer consider them running on the node and thus will consider them + managed again, allowing them to be started elsewhere. + +To put all resources in the cluster into maintenance mode, set the +``maintenance-mode`` cluster option to ``true``. When enabled, this overrides +node- or resource- specific maintenance mode. + +Maintenance mode, at any level, overrides other administrative modes. + + +.. index:: + pair: administrative mode; unmanaged resources + +.. _unmanaged_resources: + +Unmanaged Resources +################### + +An unmanaged resource will not be started or stopped by the cluster. A resource +may become unmanaged in several ways: + +* The administrator may set the ``is-managed`` resource meta-attribute to + ``false`` (whether for a specific resource, or all resources without an + explicit setting via ``rsc_defaults``) +* :ref:`Maintenance mode ` causes affected resources to + become unmanaged (and overrides any ``is-managed`` setting) +* Certain types of failure cause affected resources to become unmanaged. These + include: + + * Failed stop operations when the ``stonith-enabled`` cluster property is set + to ``false`` + * Failure of an operation that has ``on-fail`` set to ``block`` + * A resource detected as incorrectly active on more than one node when its + ``multiple-active`` meta-attribute is set to ``block`` + * A resource constrained by a revoked ``rsc_ticket`` with ``loss-policy`` set + to ``freeze`` + * Resources with ``requires`` set (or defaulting) to anything other than + ``nothing`` in a partition that loses quorum when the ``no-quorum-policy`` + cluster option is set to ``freeze`` + +Recurring actions are not affected by unmanaging a resource. + +.. warning:: + + Manually starting an unmanaged resource on a different node is strongly + discouraged. It will at least cause the cluster to consider the resource + failed, and may require the resource's ``target-role`` to be set to + ``Stopped`` then ``Started`` in order for recovery to succeed. + + +.. index:: + pair: administrative mode; disabled configuration + +.. _disabled_configuration: + +Disabled Configuration +###################### + +Some configuration elements disable particular behaviors: + +* The ``stonith-enabled`` cluster option, when set to ``false``, disables node + fencing. This is highly discouraged, as it can lead to data unavailability, + loss, or corruption. + +* The ``stop-all-resources`` cluster option, when set to ``true``, causes all + resources to be stopped. + +* Certain elements support an ``enabled`` meta-attribute, which if set to + ``false``, causes the cluster to act as if the specific element is not + configured. These include ``op``, ``alert`` *(since 2.1.6)*, and + ``recipient`` *(since 2.1.6)*. ``enabled`` may be set for specific ``op`` + elements, or all operations without an explicit setting via ``op_defaults``. + + +.. index:: + pair: administrative mode; standby + +.. _standby: + +Standby Mode +############ + +When a node is put into standby, all resources will be moved away from the +node, and all recurring operations will be stopped on the node, except those +specifying ``role`` as ``Stopped`` (which will be newly initiated if +appropriate). + +A node may be put into standby mode by setting its ``standby`` node attribute +to ``true``. The attribute may be queried and set using the ``crm_standby`` +tool. + + +.. index:: + pair: administrative mode; rules + +Rules +##### + +Rules may be used to set administrative mode options automatically according to +various criteria such as date and time. See the "Rules" chapter of the +*Pacemaker Explained* document for details. diff --git a/doc/sphinx/Pacemaker_Administration/alerts.rst b/doc/sphinx/Pacemaker_Administration/alerts.rst index c0f54c6..42efc8d 100644 --- a/doc/sphinx/Pacemaker_Administration/alerts.rst +++ b/doc/sphinx/Pacemaker_Administration/alerts.rst @@ -287,7 +287,7 @@ Special concerns when writing alert agents: this into consideration, for example by queueing resource-intensive actions into some other instance, instead of directly executing them. -* Alert agents are run as the ``hacluster`` user, which has a minimal set +* Alert agents are run as the |CRM_DAEMON_USER| user, which has a minimal set of permissions. If an agent requires additional privileges, it is recommended to configure ``sudo`` to allow the agent to run the necessary commands as another user with the appropriate privileges. @@ -297,7 +297,7 @@ Special concerns when writing alert agents: user-configured ``timestamp-format``), ``CRM_alert_recipient,`` and all instance attributes. Mostly this is needed simply to protect against configuration errors, but if some user can modify the CIB without having - ``hacluster``-level access to the cluster nodes, it is a potential security + |CRM_DAEMON_USER| access to the cluster nodes, it is a potential security concern as well, to avoid the possibility of code injection. .. note:: **ocf:pacemaker:ClusterMon compatibility** @@ -308,4 +308,4 @@ Special concerns when writing alert agents: passed to alert agents are available prepended with ``CRM_notify_`` as well as ``CRM_alert_``. One break in compatibility is that ``ClusterMon`` ran external scripts as the ``root`` user, while alert agents are run as the - ``hacluster`` user. + |CRM_DAEMON_USER| user. diff --git a/doc/sphinx/Pacemaker_Administration/configuring.rst b/doc/sphinx/Pacemaker_Administration/configuring.rst index 415dd81..295c96a 100644 --- a/doc/sphinx/Pacemaker_Administration/configuring.rst +++ b/doc/sphinx/Pacemaker_Administration/configuring.rst @@ -189,48 +189,53 @@ cluster even if the machine itself is not in the same cluster. To do this, one simply sets up a number of environment variables and runs the same commands as when working on a cluster node. -.. table:: **Environment Variables Used to Connect to Remote Instances of the CIB** - - +----------------------+-----------+------------------------------------------------+ - | Environment Variable | Default | Description | - +======================+===========+================================================+ - | CIB_user | $USER | .. index:: | - | | | single: CIB_user | - | | | single: environment variable; CIB_user | - | | | | - | | | The user to connect as. Needs to be | - | | | part of the ``haclient`` group on | - | | | the target host. | - +----------------------+-----------+------------------------------------------------+ - | CIB_passwd | | .. index:: | - | | | single: CIB_passwd | - | | | single: environment variable; CIB_passwd | - | | | | - | | | The user's password. Read from the | - | | | command line if unset. | - +----------------------+-----------+------------------------------------------------+ - | CIB_server | localhost | .. index:: | - | | | single: CIB_server | - | | | single: environment variable; CIB_server | - | | | | - | | | The host to contact | - +----------------------+-----------+------------------------------------------------+ - | CIB_port | | .. index:: | - | | | single: CIB_port | - | | | single: environment variable; CIB_port | - | | | | - | | | The port on which to contact the server; | - | | | required. | - +----------------------+-----------+------------------------------------------------+ - | CIB_encrypted | TRUE | .. index:: | - | | | single: CIB_encrypted | - | | | single: environment variable; CIB_encrypted | - | | | | - | | | Whether to encrypt network traffic | - +----------------------+-----------+------------------------------------------------+ +.. list-table:: **Environment Variables Used to Connect to Remote Instances of the CIB** + :class: longtable + :widths: 2 2 5 + :header-rows: 1 + + * - Environment Variable + - Default + - Description + * - .. index:: + single: CIB_user + single: environment variable; CIB_user + + CIB_user + - |CRM_DAEMON_USER_RAW| + - The user to connect as. Needs to be part of the |CRM_DAEMON_GROUP| group + on the target host. + * - .. index:: + single: CIB_passwd + single: environment variable; CIB_passwd + + CIB_passwd + - + - The user's password. Read from the command line if unset. + * - .. index:: + single: CIB_server + single: environment variable; CIB_server + + CIB_server + - localhost + - The host to contact + * - .. index:: + single: CIB_port + single: environment variable; CIB_port + + CIB_port + - + - The port on which to contact the server; required + * - .. index:: + single: CIB_encrypted + single: environment variable; CIB_encrypted + + CIB_encrypted + - true + - Whether to encrypt network traffic So, if **c001n01** is an active cluster node and is listening on port 1234 -for connections, and **someuser** is a member of the **haclient** group, +for connections, and **someuser** is a member of the |CRM_DAEMON_GROUP| group, then the following would prompt for **someuser**'s password and return the cluster's current configuration: @@ -243,27 +248,9 @@ For security reasons, the cluster does not listen for remote connections by default. If you wish to allow remote access, you need to set the ``remote-tls-port`` (encrypted) or ``remote-clear-port`` (unencrypted) CIB properties (i.e., those kept in the ``cib`` tag, like ``num_updates`` and -``epoch``). - -.. table:: **Extra top-level CIB properties for remote access** - - +----------------------+-----------+------------------------------------------------------+ - | CIB Property | Default | Description | - +======================+===========+======================================================+ - | remote-tls-port | | .. index:: | - | | | single: remote-tls-port | - | | | single: CIB property; remote-tls-port | - | | | | - | | | Listen for encrypted remote connections | - | | | on this port. | - +----------------------+-----------+------------------------------------------------------+ - | remote-clear-port | | .. index:: | - | | | single: remote-clear-port | - | | | single: CIB property; remote-clear-port | - | | | | - | | | Listen for plaintext remote connections | - | | | on this port. | - +----------------------+-----------+------------------------------------------------------+ +``epoch``). Encrypted communication is keyless, which makes it subject to +man-in-the-middle attacks, and thus either option should be used only on +protected networks. .. important:: diff --git a/doc/sphinx/Pacemaker_Administration/index.rst b/doc/sphinx/Pacemaker_Administration/index.rst index 327ad31..af89380 100644 --- a/doc/sphinx/Pacemaker_Administration/index.rst +++ b/doc/sphinx/Pacemaker_Administration/index.rst @@ -22,6 +22,8 @@ Table of Contents cluster configuring tools + administrative + moving troubleshooting upgrading alerts diff --git a/doc/sphinx/Pacemaker_Administration/moving.rst b/doc/sphinx/Pacemaker_Administration/moving.rst new file mode 100644 index 0000000..3d6a92a --- /dev/null +++ b/doc/sphinx/Pacemaker_Administration/moving.rst @@ -0,0 +1,305 @@ +Moving Resources +---------------- + +.. index:: + single: resource; move + +Moving Resources Manually +######################### + +There are primarily two occasions when you would want to move a resource from +its current location: when the whole node is under maintenance, and when a +single resource needs to be moved. + +.. index:: + single: standby mode + single: node; standby mode + +Standby Mode +____________ + +Since everything eventually comes down to a score, you could create constraints +for every resource to prevent them from running on one node. While Pacemaker +configuration can seem convoluted at times, not even we would require this of +administrators. + +Instead, you can set a special node attribute which tells the cluster "don't +let anything run here". There is even a helpful tool to help query and set it, +called ``crm_standby``. To check the standby status of the current machine, +run: + +.. code-block:: none + + # crm_standby -G + +A value of ``on`` indicates that the node is *not* able to host any resources, +while a value of ``off`` says that it *can*. + +You can also check the status of other nodes in the cluster by specifying the +`--node` option: + +.. code-block:: none + + # crm_standby -G --node sles-2 + +To change the current node's standby status, use ``-v`` instead of ``-G``: + +.. code-block:: none + + # crm_standby -v on + +Again, you can change another host's value by supplying a hostname with +``--node``. + +A cluster node in standby mode will not run resources, but still contributes to +quorum, and may fence or be fenced by nodes. + +Moving One Resource +___________________ + +When only one resource is required to move, we could do this by creating +location constraints. However, once again we provide a user-friendly shortcut +as part of the ``crm_resource`` command, which creates and modifies the extra +constraints for you. If ``Email`` were running on ``sles-1`` and you wanted it +moved to a specific location, the command would look something like: + +.. code-block:: none + + # crm_resource -M -r Email -H sles-2 + +Behind the scenes, the tool will create the following location constraint: + +.. code-block:: xml + + + +It is important to note that subsequent invocations of ``crm_resource -M`` are +not cumulative. So, if you ran these commands: + +.. code-block:: none + + # crm_resource -M -r Email -H sles-2 + # crm_resource -M -r Email -H sles-3 + +then it is as if you had never performed the first command. + +To allow the resource to move back again, use: + +.. code-block:: none + + # crm_resource -U -r Email + +Note the use of the word *allow*. The resource *can* move back to its original +location, but depending on ``resource-stickiness``, location constraints, and +so forth, it might stay where it is. + +To be absolutely certain that it moves back to ``sles-1``, move it there before +issuing the call to ``crm_resource -U``: + +.. code-block:: none + + # crm_resource -M -r Email -H sles-1 + # crm_resource -U -r Email + +Alternatively, if you only care that the resource should be moved from its +current location, try: + +.. code-block:: none + + # crm_resource -B -r Email + +which will instead create a negative constraint, like: + +.. code-block:: xml + + + +This will achieve the desired effect, but will also have long-term +consequences. As the tool will warn you, the creation of a ``-INFINITY`` +constraint will prevent the resource from running on that node until +``crm_resource -U`` is used. This includes the situation where every other +cluster node is no longer available! + +In some cases, such as when ``resource-stickiness`` is set to ``INFINITY``, it +is possible that you will end up with nodes with the same score, forcing the +cluster to choose one (which may not be the one you want). The tool can detect +some of these cases and deals with them by creating both positive and negative +constraints. For example: + +.. code-block:: xml + + + + +which has the same long-term consequences as discussed earlier. + +Moving Resources Due to Connectivity Changes +############################################ + +You can configure the cluster to move resources when external connectivity is +lost in two steps. + +.. index:: + single: ocf:pacemaker:ping resource + single: ping resource + +Tell Pacemaker to Monitor Connectivity +______________________________________ + +First, add an ``ocf:pacemaker:ping`` resource to the cluster. The ``ping`` +resource uses the system utility of the same name to a test whether a list of +machines (specified by DNS hostname or IP address) are reachable, and uses the +results to maintain a node attribute. + +The node attribute is called ``pingd`` by default, but is customizable in order +to allow multiple ping groups to be defined. + +Normally, the ping resource should run on all cluster nodes, which means that +you'll need to create a clone. A template for this can be found below, along +with a description of the most interesting parameters. + +.. table:: **Commonly Used ocf:pacemaker:ping Resource Parameters** + :widths: 1 4 + + +--------------------+--------------------------------------------------------------+ + | Resource Parameter | Description | + +====================+==============================================================+ + | dampen | .. index:: | + | | single: ocf:pacemaker:ping resource; dampen parameter | + | | single: dampen; ocf:pacemaker:ping resource parameter | + | | | + | | The time to wait (dampening) for further changes to occur. | + | | Use this to prevent a resource from bouncing around the | + | | cluster when cluster nodes notice the loss of connectivity | + | | at slightly different times. | + +--------------------+--------------------------------------------------------------+ + | multiplier | .. index:: | + | | single: ocf:pacemaker:ping resource; multiplier parameter | + | | single: multiplier; ocf:pacemaker:ping resource parameter | + | | | + | | The number of connected ping nodes gets multiplied by this | + | | value to get a score. Useful when there are multiple ping | + | | nodes configured. | + +--------------------+--------------------------------------------------------------+ + | host_list | .. index:: | + | | single: ocf:pacemaker:ping resource; host_list parameter | + | | single: host_list; ocf:pacemaker:ping resource parameter | + | | | + | | The machines to contact in order to determine the current | + | | connectivity status. Allowed values include resolvable DNS | + | | connectivity host names, IPv4 addresses, and IPv6 addresses. | + +--------------------+--------------------------------------------------------------+ + +.. topic:: Example ping resource that checks node connectivity once every minute + + .. code-block:: xml + + + + + + + + + + + + + + +.. important:: + + You're only half done. The next section deals with telling Pacemaker how to + deal with the connectivity status that ``ocf:pacemaker:ping`` is recording. + +Tell Pacemaker How to Interpret the Connectivity Data +_____________________________________________________ + +.. important:: + + Before attempting the following, make sure you understand rules. See the + "Rules" chapter of the *Pacemaker Explained* document for details. + +There are a number of ways to use the connectivity data. + +The most common setup is for people to have a single ping target (for example, +the service network's default gateway), to prevent the cluster from running a +resource on any unconnected node. + +.. topic:: Don't run a resource on unconnected nodes + + .. code-block:: xml + + + + + + + +A more complex setup is to have a number of ping targets configured. You can +require the cluster to only run resources on nodes that can connect to all (or +a minimum subset) of them. + +.. topic:: Run only on nodes connected to three or more ping targets + + .. code-block:: xml + + + ... + + ... + + ... + + + + + + +Alternatively, you can tell the cluster only to *prefer* nodes with the best +connectivity, by using ``score-attribute`` in the rule. Just be sure to set +``multiplier`` to a value higher than that of ``resource-stickiness`` (and +don't set either of them to ``INFINITY``). + +.. topic:: Prefer node with most connected ping nodes + + .. code-block:: xml + + + + + + + +It is perhaps easier to think of this in terms of the simple constraints that +the cluster translates it into. For example, if ``sles-1`` is connected to all +five ping nodes but ``sles-2`` is only connected to two, then it would be as if +you instead had the following constraints in your configuration: + +.. topic:: How the cluster translates the above location constraint + + .. code-block:: xml + + + + +The advantage is that you don't have to manually update any constraints +whenever your network connectivity changes. + +You can also combine the concepts above into something even more complex. The +example below shows how you can prefer the node with the most connected ping +nodes provided they have connectivity to at least three (again assuming that +``multiplier`` is set to 1000). + +.. topic:: More complex example of choosing location based on connectivity + + .. code-block:: xml + + + + + + + + + diff --git a/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst b/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst index 61ab4e6..3eda60a 100644 --- a/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst +++ b/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst @@ -118,14 +118,11 @@ Manage Resources .. topic:: Create a Resource .. code-block:: none - - crmsh # crm configure primitive ClusterIP ocf:heartbeat:IPaddr2 \ - params ip=192.168.122.120 cidr_netmask=24 \ - op monitor interval=30s + crmsh # crm configure primitive ClusterIP IPaddr2 params ip=192.168.122.120 cidr_netmask=24 pcs # pcs resource create ClusterIP IPaddr2 ip=192.168.122.120 cidr_netmask=24 -pcs determines the standard and provider (``ocf:heartbeat``) automatically -since ``IPaddr2`` is unique, and automatically creates operations (including +Both crmsh and pcs determine the standard and provider (``ocf:heartbeat``) automatically +since ``IPaddr2`` is unique, and automatically create operations (including monitor) based on the agent's meta-data. .. topic:: Show Configuration of All Resources @@ -270,6 +267,10 @@ edited and verified before committing to the live configuration: crmsh # crm configure ms WebDataClone WebData \ meta master-max=1 master-node-max=1 \ clone-max=2 clone-node-max=1 notify=true + crmsh # crm configure clone WebDataClone WebData \ + meta promotable=true \ + promoted-max=1 promoted-node-max=1 \ + clone-max=2 clone-node-max=1 notify=true pcs-0.9 # pcs resource master WebDataClone WebData \ master-max=1 master-node-max=1 \ clone-max=2 clone-node-max=1 notify=true @@ -277,6 +278,7 @@ edited and verified before committing to the live configuration: promoted-max=1 promoted-node-max=1 \ clone-max=2 clone-node-max=1 notify=true +crmsh supports both ways ('configure ms' is deprecated) to configure promotable clone since crmsh 4.4.0. pcs will generate the clone name automatically if it is omitted from the command line. diff --git a/doc/sphinx/Pacemaker_Development/c.rst b/doc/sphinx/Pacemaker_Development/c.rst index 66ce3b2..b03ddae 100644 --- a/doc/sphinx/Pacemaker_Development/c.rst +++ b/doc/sphinx/Pacemaker_Development/c.rst @@ -225,8 +225,8 @@ a ``GHashTable *`` member, the argument should be marked as ``[in,out]`` if the function inserts data into the table, even if the struct members themselves are not changed. However, an argument is not ``[in,out]`` if something reachable via the argument is modified via a separate argument. For example, both -``pe_resource_t`` and ``pe_node_t`` contain pointers to their -``pe_working_set_t`` and thus indirectly to each other, but if the function +``pcmk_resource_t`` and ``pcmk_node_t`` contain pointers to their +``pcmk_scheduler_t`` and thus indirectly to each other, but if the function modifies the resource via the resource argument, the node argument does not have to be ``[in,out]``. @@ -745,10 +745,20 @@ readability and logging consistency. Functions ######### +Function Naming +_______________ + Function names should be unique across the entire project, to allow for individual tracing via ``PCMK_trace_functions``, and make it easier to search code and follow detail logs. +A common function signature is a comparison function that returns 0 if its +arguments are equal for sorting purposes, -1 if the first argument should sort +first, and 1 is the second argument should sort first. Such a function should +have ``cmp`` in its name, to parallel ``strcmp()``; ``sort`` should only be +used in the names of functions that sort an entire list (typically using a +``cmp`` function). + Function Definitions ____________________ diff --git a/doc/sphinx/Pacemaker_Development/components.rst b/doc/sphinx/Pacemaker_Development/components.rst index e14df26..5086fa8 100644 --- a/doc/sphinx/Pacemaker_Development/components.rst +++ b/doc/sphinx/Pacemaker_Development/components.rst @@ -301,7 +301,7 @@ directly. This allows them to run using a ``CIB_file`` without the cluster needing to be active. The main entry point for the scheduler code is -``lib/pacemaker/pcmk_sched_allocate.c:pcmk__schedule_actions()``. It sets +``lib/pacemaker/pcmk_scheduler.c:pcmk__schedule_actions()``. It sets defaults and calls a series of functions for the scheduling. Some key steps: * ``unpack_cib()`` parses most of the CIB XML into data structures, and @@ -315,7 +315,7 @@ defaults and calls a series of functions for the scheduling. Some key steps: the CIB status section. This is used to decide whether certain actions need to be done, such as deleting orphan resources, forcing a restart when a resource definition changes, etc. -* ``allocate_resources()`` assigns resources to nodes. +* ``assign_resources()`` assigns resources to nodes. * ``schedule_resource_actions()`` schedules resource-specific actions (which might or might not end up in the final graph). * ``pcmk__apply_orderings()`` processes ordering constraints in order to modify @@ -335,7 +335,7 @@ Working with the scheduler is difficult. Challenges include: * It produces an insane amount of log messages at debug and trace levels. You can put resource ID(s) in the ``PCMK_trace_tags`` environment variable to enable trace-level messages only when related to specific resources. -* Different parts of the main ``pe_working_set_t`` structure are finalized at +* Different parts of the main ``pcmk_scheduler_t`` structure are finalized at different points in the scheduling process, so you have to keep in mind whether information you're using at one point of the code can possibly change later. For example, data unpacked from the CIB can safely be used anytime @@ -347,24 +347,24 @@ Working with the scheduler is difficult. Challenges include: .. index:: - single: pe_working_set_t + single: pcmk_scheduler_t Cluster Working Set ___________________ -The main data object for the scheduler is ``pe_working_set_t``, which contains +The main data object for the scheduler is ``pcmk_scheduler_t``, which contains all information needed about nodes, resources, constraints, etc., both as the raw CIB XML and parsed into more usable data structures, plus the resulting -transition graph XML. The variable name is usually ``data_set``. +transition graph XML. The variable name is usually ``scheduler``. .. index:: - single: pe_resource_t + single: pcmk_resource_t Resources _________ -``pe_resource_t`` is the data object representing cluster resources. A resource -has a variant: primitive (a.k.a. native), group, clone, or bundle. +``pcmk_resource_t`` is the data object representing cluster resources. A +resource has a variant: primitive (a.k.a. native), group, clone, or bundle. The resource object has members for two sets of methods, ``resource_object_functions_t`` from the ``libpe_status`` public API, and @@ -374,45 +374,45 @@ The resource object has members for two sets of methods, The object functions have basic capabilities such as unpacking the resource XML, and determining the current or planned location of the resource. -The allocation functions have more obscure capabilities needed for scheduling, +The assignment functions have more obscure capabilities needed for scheduling, such as processing location and ordering constraints. For example, ``pcmk__create_internal_constraints()`` simply calls the ``internal_constraints()`` method for each top-level resource in the cluster. .. index:: - single: pe_node_t + single: pcmk_node_t Nodes _____ -Allocation of resources to nodes is done by choosing the node with the highest +Assignment of resources to nodes is done by choosing the node with the highest score for a given resource. The scheduler does a bunch of processing to -generate the scores, then the actual allocation is straightforward. +generate the scores, then the actual assignment is straightforward. -Node lists are frequently used. For example, ``pe_working_set_t`` has a +Node lists are frequently used. For example, ``pcmk_scheduler_t`` has a ``nodes`` member which is a list of all nodes in the cluster, and -``pe_resource_t`` has a ``running_on`` member which is a list of all nodes on -which the resource is (or might be) active. These are lists of ``pe_node_t`` +``pcmk_resource_t`` has a ``running_on`` member which is a list of all nodes on +which the resource is (or might be) active. These are lists of ``pcmk_node_t`` objects. -The ``pe_node_t`` object contains a ``struct pe_node_shared_s *details`` member -with all node information that is independent of resource allocation (the node -name, etc.). +The ``pcmk_node_t`` object contains a ``struct pe_node_shared_s *details`` +member with all node information that is independent of resource assignment +(the node name, etc.). The working set's ``nodes`` member contains the original of this information. -All other node lists contain copies of ``pe_node_t`` where only the ``details`` -member points to the originals in the working set's ``nodes`` list. In this -way, the other members of ``pe_node_t`` (such as ``weight``, which is the node -score) may vary by node list, while the common details are shared. +All other node lists contain copies of ``pcmk_node_t`` where only the +``details`` member points to the originals in the working set's ``nodes`` list. +In this way, the other members of ``pcmk_node_t`` (such as ``weight``, which is +the node score) may vary by node list, while the common details are shared. .. index:: - single: pe_action_t + single: pcmk_action_t single: pe_action_flags Actions _______ -``pe_action_t`` is the data object representing actions that might need to be +``pcmk_action_t`` is the data object representing actions that might need to be taken. These could be resource actions, cluster-wide actions such as fencing a node, or "pseudo-actions" which are abstractions used as convenient points for ordering other actions against. @@ -443,7 +443,7 @@ Colocation constraints come into play in these parts of the scheduler code: * When choosing roles for promotable clone instances, so colocations involving a specific role can affect which instances are promoted -The resource allocation functions have several methods related to colocations: +The resource assignment functions have several methods related to colocations: * ``apply_coloc_score():`` This applies a colocation's score to either the dependent's allowed node scores (if called while resources are being diff --git a/doc/sphinx/Pacemaker_Development/helpers.rst b/doc/sphinx/Pacemaker_Development/helpers.rst index 3fcb48d..6bd1926 100644 --- a/doc/sphinx/Pacemaker_Development/helpers.rst +++ b/doc/sphinx/Pacemaker_Development/helpers.rst @@ -476,14 +476,13 @@ The Pacemaker build process uses ``lcov`` and special make targets to generate an HTML coverage report that can be inspected with any web browser. To start, you'll need to install the ``lcov`` package which is included in most -distributions. Next, reconfigure and rebuild the source tree: +distributions. Next, reconfigure the source tree: .. code-block:: none $ ./configure --with-coverage - $ make -Then simply run ``make coverage``. This will do the same thing as ``make check``, +Then run ``make -C devel coverage``. This will do the same thing as ``make check``, but will generate a bunch of intermediate files as part of the compiler's output. Essentially, the coverage tools run all the unit tests and make a note if a given line if code is executed as a part of some test program. This will include not diff --git a/doc/sphinx/Pacemaker_Explained/acls.rst b/doc/sphinx/Pacemaker_Explained/acls.rst index 67d5d15..c3de39d 100644 --- a/doc/sphinx/Pacemaker_Explained/acls.rst +++ b/doc/sphinx/Pacemaker_Explained/acls.rst @@ -6,9 +6,9 @@ Access Control Lists (ACLs) --------------------------- -By default, the ``root`` user or any user in the ``haclient`` group can modify -Pacemaker's CIB without restriction. Pacemaker offers *access control lists -(ACLs)* to provide more fine-grained authorization. +By default, the ``root`` user or any user in the |CRM_DAEMON_GROUP| group can +modify Pacemaker's CIB without restriction. Pacemaker offers *access control +lists (ACLs)* to provide more fine-grained authorization. .. important:: @@ -24,7 +24,7 @@ In order to use ACLs: * The ``enable-acl`` :ref:`cluster option ` must be set to true. -* Desired users must have user accounts in the ``haclient`` group on all +* Desired users must have user accounts in the |CRM_DAEMON_GROUP| group on all cluster nodes in the cluster. * If your CIB was created before Pacemaker 1.1.12, it might need to be updated @@ -275,9 +275,9 @@ elements. .. important:: - The ``root`` and ``hacluster`` user accounts always have full access to the - CIB, regardless of ACLs. For all other user accounts, when ``enable-acl`` is - true, permission to all parts of the CIB is denied by default (permissions + The ``root`` and |CRM_DAEMON_USER| user accounts always have full access to + the CIB, regardless of ACLs. For all other user accounts, when ``enable-acl`` + is true, permission to all parts of the CIB is denied by default (permissions must be explicitly granted). ACL Examples @@ -436,8 +436,8 @@ the CIB, such as ``crm_attribute`` when managing permanent node attributes, ``crm_mon``, and ``cibadmin``. However, command-line tools that communicate directly with Pacemaker daemons -via IPC are not affected by ACLs. For example, users in the ``haclient`` group -may still do the following, regardless of ACLs: +via IPC are not affected by ACLs. For example, users in the |CRM_DAEMON_GROUP| +group may still do the following, regardless of ACLs: * Query transient node attribute values using ``crm_attribute`` and ``attrd_updater``. diff --git a/doc/sphinx/Pacemaker_Explained/advanced-options.rst b/doc/sphinx/Pacemaker_Explained/advanced-options.rst deleted file mode 100644 index 20ab79e..0000000 --- a/doc/sphinx/Pacemaker_Explained/advanced-options.rst +++ /dev/null @@ -1,586 +0,0 @@ -Advanced Configuration ----------------------- - -.. index:: - single: start-delay; operation attribute - single: interval-origin; operation attribute - single: interval; interval-origin - single: operation; interval-origin - single: operation; start-delay - -Specifying When Recurring Actions are Performed -############################################### - -By default, recurring actions are scheduled relative to when the resource -started. In some cases, you might prefer that a recurring action start relative -to a specific date and time. For example, you might schedule an in-depth -monitor to run once every 24 hours, and want it to run outside business hours. - -To do this, set the operation's ``interval-origin``. The cluster uses this point -to calculate the correct ``start-delay`` such that the operation will occur -at ``interval-origin`` plus a multiple of the operation interval. - -For example, if the recurring operation's interval is 24h, its -``interval-origin`` is set to 02:00, and it is currently 14:32, then the -cluster would initiate the operation after 11 hours and 28 minutes. - -The value specified for ``interval`` and ``interval-origin`` can be any -date/time conforming to the -`ISO8601 standard `_. By way of -example, to specify an operation that would run on the first Monday of -2021 and every Monday after that, you would add: - -.. topic:: Example recurring action that runs relative to base date/time - - .. code-block:: xml - - - -.. index:: - single: resource; failure recovery - single: operation; failure recovery - -.. _failure-handling: - -Handling Resource Failure -######################### - -By default, Pacemaker will attempt to recover failed resources by restarting -them. However, failure recovery is highly configurable. - -.. index:: - single: resource; failure count - single: operation; failure count - -Failure Counts -______________ - -Pacemaker tracks resource failures for each combination of node, resource, and -operation (start, stop, monitor, etc.). - -You can query the fail count for a particular node, resource, and/or operation -using the ``crm_failcount`` command. For example, to see how many times the -10-second monitor for ``myrsc`` has failed on ``node1``, run: - -.. code-block:: none - - # crm_failcount --query -r myrsc -N node1 -n monitor -I 10s - -If you omit the node, ``crm_failcount`` will use the local node. If you omit -the operation and interval, ``crm_failcount`` will display the sum of the fail -counts for all operations on the resource. - -You can use ``crm_resource --cleanup`` or ``crm_failcount --delete`` to clear -fail counts. For example, to clear the above monitor failures, run: - -.. code-block:: none - - # crm_resource --cleanup -r myrsc -N node1 -n monitor -I 10s - -If you omit the resource, ``crm_resource --cleanup`` will clear failures for -all resources. If you omit the node, it will clear failures on all nodes. If -you omit the operation and interval, it will clear the failures for all -operations on the resource. - -.. note:: - - Even when cleaning up only a single operation, all failed operations will - disappear from the status display. This allows us to trigger a re-check of - the resource's current status. - -Higher-level tools may provide other commands for querying and clearing -fail counts. - -The ``crm_mon`` tool shows the current cluster status, including any failed -operations. To see the current fail counts for any failed resources, call -``crm_mon`` with the ``--failcounts`` option. This shows the fail counts per -resource (that is, the sum of any operation fail counts for the resource). - -.. index:: - single: migration-threshold; resource meta-attribute - single: resource; migration-threshold - -Failure Response -________________ - -Normally, if a running resource fails, pacemaker will try to stop it and start -it again. Pacemaker will choose the best location to start it each time, which -may be the same node that it failed on. - -However, if a resource fails repeatedly, it is possible that there is an -underlying problem on that node, and you might desire trying a different node -in such a case. Pacemaker allows you to set your preference via the -``migration-threshold`` resource meta-attribute. [#]_ - -If you define ``migration-threshold`` to *N* for a resource, it will be banned -from the original node after *N* failures there. - -.. note:: - - The ``migration-threshold`` is per *resource*, even though fail counts are - tracked per *operation*. The operation fail counts are added together - to compare against the ``migration-threshold``. - -By default, fail counts remain until manually cleared by an administrator -using ``crm_resource --cleanup`` or ``crm_failcount --delete`` (hopefully after -first fixing the failure's cause). It is possible to have fail counts expire -automatically by setting the ``failure-timeout`` resource meta-attribute. - -.. important:: - - A successful operation does not clear past failures. If a recurring monitor - operation fails once, succeeds many times, then fails again days later, its - fail count is 2. Fail counts are cleared only by manual intervention or - failure timeout. - -For example, setting ``migration-threshold`` to 2 and ``failure-timeout`` to -``60s`` would cause the resource to move to a new node after 2 failures, and -allow it to move back (depending on stickiness and constraint scores) after one -minute. - -.. note:: - - ``failure-timeout`` is measured since the most recent failure. That is, older - failures do not individually time out and lower the fail count. Instead, all - failures are timed out simultaneously (and the fail count is reset to 0) if - there is no new failure for the timeout period. - -There are two exceptions to the migration threshold: when a resource either -fails to start or fails to stop. - -If the cluster property ``start-failure-is-fatal`` is set to ``true`` (which is -the default), start failures cause the fail count to be set to ``INFINITY`` and -thus always cause the resource to move immediately. - -Stop failures are slightly different and crucial. If a resource fails to stop -and fencing is enabled, then the cluster will fence the node in order to be -able to start the resource elsewhere. If fencing is disabled, then the cluster -has no way to continue and will not try to start the resource elsewhere, but -will try to stop it again after any failure timeout or clearing. - -.. index:: - single: resource; move - -Moving Resources -################ - -Moving Resources Manually -_________________________ - -There are primarily two occasions when you would want to move a resource from -its current location: when the whole node is under maintenance, and when a -single resource needs to be moved. - -.. index:: - single: standby mode - single: node; standby mode - -Standby Mode -~~~~~~~~~~~~ - -Since everything eventually comes down to a score, you could create constraints -for every resource to prevent them from running on one node. While Pacemaker -configuration can seem convoluted at times, not even we would require this of -administrators. - -Instead, you can set a special node attribute which tells the cluster "don't -let anything run here". There is even a helpful tool to help query and set it, -called ``crm_standby``. To check the standby status of the current machine, -run: - -.. code-block:: none - - # crm_standby -G - -A value of ``on`` indicates that the node is *not* able to host any resources, -while a value of ``off`` says that it *can*. - -You can also check the status of other nodes in the cluster by specifying the -`--node` option: - -.. code-block:: none - - # crm_standby -G --node sles-2 - -To change the current node's standby status, use ``-v`` instead of ``-G``: - -.. code-block:: none - - # crm_standby -v on - -Again, you can change another host's value by supplying a hostname with -``--node``. - -A cluster node in standby mode will not run resources, but still contributes to -quorum, and may fence or be fenced by nodes. - -Moving One Resource -~~~~~~~~~~~~~~~~~~~ - -When only one resource is required to move, we could do this by creating -location constraints. However, once again we provide a user-friendly shortcut -as part of the ``crm_resource`` command, which creates and modifies the extra -constraints for you. If ``Email`` were running on ``sles-1`` and you wanted it -moved to a specific location, the command would look something like: - -.. code-block:: none - - # crm_resource -M -r Email -H sles-2 - -Behind the scenes, the tool will create the following location constraint: - -.. code-block:: xml - - - -It is important to note that subsequent invocations of ``crm_resource -M`` are -not cumulative. So, if you ran these commands: - -.. code-block:: none - - # crm_resource -M -r Email -H sles-2 - # crm_resource -M -r Email -H sles-3 - -then it is as if you had never performed the first command. - -To allow the resource to move back again, use: - -.. code-block:: none - - # crm_resource -U -r Email - -Note the use of the word *allow*. The resource *can* move back to its original -location, but depending on ``resource-stickiness``, location constraints, and -so forth, it might stay where it is. - -To be absolutely certain that it moves back to ``sles-1``, move it there before -issuing the call to ``crm_resource -U``: - -.. code-block:: none - - # crm_resource -M -r Email -H sles-1 - # crm_resource -U -r Email - -Alternatively, if you only care that the resource should be moved from its -current location, try: - -.. code-block:: none - - # crm_resource -B -r Email - -which will instead create a negative constraint, like: - -.. code-block:: xml - - - -This will achieve the desired effect, but will also have long-term -consequences. As the tool will warn you, the creation of a ``-INFINITY`` -constraint will prevent the resource from running on that node until -``crm_resource -U`` is used. This includes the situation where every other -cluster node is no longer available! - -In some cases, such as when ``resource-stickiness`` is set to ``INFINITY``, it -is possible that you will end up with the problem described in -:ref:`node-score-equal`. The tool can detect some of these cases and deals with -them by creating both positive and negative constraints. For example: - -.. code-block:: xml - - - - -which has the same long-term consequences as discussed earlier. - -Moving Resources Due to Connectivity Changes -____________________________________________ - -You can configure the cluster to move resources when external connectivity is -lost in two steps. - -.. index:: - single: ocf:pacemaker:ping resource - single: ping resource - -Tell Pacemaker to Monitor Connectivity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, add an ``ocf:pacemaker:ping`` resource to the cluster. The ``ping`` -resource uses the system utility of the same name to a test whether a list of -machines (specified by DNS hostname or IP address) are reachable, and uses the -results to maintain a node attribute. - -The node attribute is called ``pingd`` by default, but is customizable in order -to allow multiple ping groups to be defined. - -Normally, the ping resource should run on all cluster nodes, which means that -you'll need to create a clone. A template for this can be found below, along -with a description of the most interesting parameters. - -.. table:: **Commonly Used ocf:pacemaker:ping Resource Parameters** - :widths: 1 4 - - +--------------------+--------------------------------------------------------------+ - | Resource Parameter | Description | - +====================+==============================================================+ - | dampen | .. index:: | - | | single: ocf:pacemaker:ping resource; dampen parameter | - | | single: dampen; ocf:pacemaker:ping resource parameter | - | | | - | | The time to wait (dampening) for further changes to occur. | - | | Use this to prevent a resource from bouncing around the | - | | cluster when cluster nodes notice the loss of connectivity | - | | at slightly different times. | - +--------------------+--------------------------------------------------------------+ - | multiplier | .. index:: | - | | single: ocf:pacemaker:ping resource; multiplier parameter | - | | single: multiplier; ocf:pacemaker:ping resource parameter | - | | | - | | The number of connected ping nodes gets multiplied by this | - | | value to get a score. Useful when there are multiple ping | - | | nodes configured. | - +--------------------+--------------------------------------------------------------+ - | host_list | .. index:: | - | | single: ocf:pacemaker:ping resource; host_list parameter | - | | single: host_list; ocf:pacemaker:ping resource parameter | - | | | - | | The machines to contact in order to determine the current | - | | connectivity status. Allowed values include resolvable DNS | - | | connectivity host names, IPv4 addresses, and IPv6 addresses. | - +--------------------+--------------------------------------------------------------+ - -.. topic:: Example ping resource that checks node connectivity once every minute - - .. code-block:: xml - - - - - - - - - - - - - - -.. important:: - - You're only half done. The next section deals with telling Pacemaker how to - deal with the connectivity status that ``ocf:pacemaker:ping`` is recording. - -Tell Pacemaker How to Interpret the Connectivity Data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. important:: - - Before attempting the following, make sure you understand - :ref:`rules`. - -There are a number of ways to use the connectivity data. - -The most common setup is for people to have a single ping target (for example, -the service network's default gateway), to prevent the cluster from running a -resource on any unconnected node. - -.. topic:: Don't run a resource on unconnected nodes - - .. code-block:: xml - - - - - - - -A more complex setup is to have a number of ping targets configured. You can -require the cluster to only run resources on nodes that can connect to all (or -a minimum subset) of them. - -.. topic:: Run only on nodes connected to three or more ping targets - - .. code-block:: xml - - - ... - - ... - - ... - - - - - - -Alternatively, you can tell the cluster only to *prefer* nodes with the best -connectivity, by using ``score-attribute`` in the rule. Just be sure to set -``multiplier`` to a value higher than that of ``resource-stickiness`` (and -don't set either of them to ``INFINITY``). - -.. topic:: Prefer node with most connected ping nodes - - .. code-block:: xml - - - - - - - -It is perhaps easier to think of this in terms of the simple constraints that -the cluster translates it into. For example, if ``sles-1`` is connected to all -five ping nodes but ``sles-2`` is only connected to two, then it would be as if -you instead had the following constraints in your configuration: - -.. topic:: How the cluster translates the above location constraint - - .. code-block:: xml - - - - -The advantage is that you don't have to manually update any constraints -whenever your network connectivity changes. - -You can also combine the concepts above into something even more complex. The -example below shows how you can prefer the node with the most connected ping -nodes provided they have connectivity to at least three (again assuming that -``multiplier`` is set to 1000). - -.. topic:: More complex example of choosing location based on connectivity - - .. code-block:: xml - - - - - - - - - - - -.. _live-migration: - -Migrating Resources -___________________ - -Normally, when the cluster needs to move a resource, it fully restarts the -resource (that is, it stops the resource on the current node and starts it on -the new node). - -However, some types of resources, such as many virtual machines, are able to -move to another location without loss of state (often referred to as live -migration or hot migration). In pacemaker, this is called resource migration. -Pacemaker can be configured to migrate a resource when moving it, rather than -restarting it. - -Not all resources are able to migrate; see the -:ref:`migration checklist ` below. Even those that can, -won't do so in all situations. Conceptually, there are two requirements from -which the other prerequisites follow: - -* The resource must be active and healthy at the old location; and -* everything required for the resource to run must be available on both the old - and new locations. - -The cluster is able to accommodate both *push* and *pull* migration models by -requiring the resource agent to support two special actions: ``migrate_to`` -(performed on the current location) and ``migrate_from`` (performed on the -destination). - -In push migration, the process on the current location transfers the resource -to the new location where is it later activated. In this scenario, most of the -work would be done in the ``migrate_to`` action and, if anything, the -activation would occur during ``migrate_from``. - -Conversely for pull, the ``migrate_to`` action is practically empty and -``migrate_from`` does most of the work, extracting the relevant resource state -from the old location and activating it. - -There is no wrong or right way for a resource agent to implement migration, as -long as it works. - -.. _migration_checklist: - -.. topic:: Migration Checklist - - * The resource may not be a clone. - * The resource agent standard must be OCF. - * The resource must not be in a failed or degraded state. - * The resource agent must support ``migrate_to`` and ``migrate_from`` - actions, and advertise them in its meta-data. - * The resource must have the ``allow-migrate`` meta-attribute set to - ``true`` (which is not the default). - -If an otherwise migratable resource depends on another resource via an ordering -constraint, there are special situations in which it will be restarted rather -than migrated. - -For example, if the resource depends on a clone, and at the time the resource -needs to be moved, the clone has instances that are stopping and instances that -are starting, then the resource will be restarted. The scheduler is not yet -able to model this situation correctly and so takes the safer (if less optimal) -path. - -Also, if a migratable resource depends on a non-migratable resource, and both -need to be moved, the migratable resource will be restarted. - - -.. index:: - single: reload - single: reload-agent - -Reloading an Agent After a Definition Change -############################################ - -The cluster automatically detects changes to the configuration of active -resources. The cluster's normal response is to stop the service (using the old -definition) and start it again (with the new definition). This works, but some -resource agents are smarter and can be told to use a new set of options without -restarting. - -To take advantage of this capability, the resource agent must: - -* Implement the ``reload-agent`` action. What it should do depends completely - on your application! - - .. note:: - - Resource agents may also implement a ``reload`` action to make the managed - service reload its own *native* configuration. This is different from - ``reload-agent``, which makes effective changes in the resource's - *Pacemaker* configuration (specifically, the values of the agent's - reloadable parameters). - -* Advertise the ``reload-agent`` operation in the ``actions`` section of its - meta-data. - -* Set the ``reloadable`` attribute to 1 in the ``parameters`` section of - its meta-data for any parameters eligible to be reloaded after a change. - -Once these requirements are satisfied, the cluster will automatically know to -reload the resource (instead of restarting) when a reloadable parameter -changes. - -.. note:: - - Metadata will not be re-read unless the resource needs to be started. If you - edit the agent of an already active resource to set a parameter reloadable, - the resource may restart the first time the parameter value changes. - -.. note:: - - If both a reloadable and non-reloadable parameter are changed - simultaneously, the resource will be restarted. - -.. rubric:: Footnotes - -.. [#] The naming of this option was perhaps unfortunate as it is easily - confused with live migration, the process of moving a resource from one - node to another without stopping it. Xen virtual guests are the most - common example of resources that can be migrated in this manner. diff --git a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst b/doc/sphinx/Pacemaker_Explained/advanced-resources.rst deleted file mode 100644 index a61b76d..0000000 --- a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst +++ /dev/null @@ -1,1629 +0,0 @@ -Advanced Resource Types ------------------------ - -.. index: - single: group resource - single: resource; group - -.. _group-resources: - -Groups - A Syntactic Shortcut -############################# - -One of the most common elements of a cluster is a set of resources -that need to be located together, start sequentially, and stop in the -reverse order. To simplify this configuration, we support the concept -of groups. - -.. topic:: A group of two primitive resources - - .. code-block:: xml - - - - - - - - - - -Although the example above contains only two resources, there is no -limit to the number of resources a group can contain. The example is -also sufficient to explain the fundamental properties of a group: - -* Resources are started in the order they appear in (**Public-IP** first, - then **Email**) -* Resources are stopped in the reverse order to which they appear in - (**Email** first, then **Public-IP**) - -If a resource in the group can't run anywhere, then nothing after that -is allowed to run, too. - -* If **Public-IP** can't run anywhere, neither can **Email**; -* but if **Email** can't run anywhere, this does not affect **Public-IP** - in any way - -The group above is logically equivalent to writing: - -.. topic:: How the cluster sees a group resource - - .. code-block:: xml - - - - - - - - - - - - - - - - -Obviously as the group grows bigger, the reduced configuration effort -can become significant. - -Another (typical) example of a group is a DRBD volume, the filesystem -mount, an IP address, and an application that uses them. - -.. index:: - pair: XML element; group - -Group Properties -________________ - -.. table:: **Properties of a Group Resource** - :widths: 1 4 - - +-------------+------------------------------------------------------------------+ - | Field | Description | - +=============+==================================================================+ - | id | .. index:: | - | | single: group; property, id | - | | single: property; id (group) | - | | single: id; group property | - | | | - | | A unique name for the group | - +-------------+------------------------------------------------------------------+ - | description | .. index:: | - | | single: group; attribute, description | - | | single: attribute; description (group) | - | | single: description; group attribute | - | | | - | | An optional description of the group, for the user's own | - | | purposes. | - | | E.g. ``resources needed for website`` | - +-------------+------------------------------------------------------------------+ - -Group Options -_____________ - -Groups inherit the ``priority``, ``target-role``, and ``is-managed`` properties -from primitive resources. See :ref:`resource_options` for information about -those properties. - -Group Instance Attributes -_________________________ - -Groups have no instance attributes. However, any that are set for the group -object will be inherited by the group's children. - -Group Contents -______________ - -Groups may only contain a collection of cluster resources (see -:ref:`primitive-resource`). To refer to a child of a group resource, just use -the child's ``id`` instead of the group's. - -Group Constraints -_________________ - -Although it is possible to reference a group's children in -constraints, it is usually preferable to reference the group itself. - -.. topic:: Some constraints involving groups - - .. code-block:: xml - - - - - - - -.. index:: - pair: resource-stickiness; group - -Group Stickiness -________________ - -Stickiness, the measure of how much a resource wants to stay where it -is, is additive in groups. Every active resource of the group will -contribute its stickiness value to the group's total. So if the -default ``resource-stickiness`` is 100, and a group has seven members, -five of which are active, then the group as a whole will prefer its -current location with a score of 500. - -.. index:: - single: clone - single: resource; clone - -.. _s-resource-clone: - -Clones - Resources That Can Have Multiple Active Instances -########################################################## - -*Clone* resources are resources that can have more than one copy active at the -same time. This allows you, for example, to run a copy of a daemon on every -node. You can clone any primitive or group resource [#]_. - -Anonymous versus Unique Clones -______________________________ - -A clone resource is configured to be either *anonymous* or *globally unique*. - -Anonymous clones are the simplest. These behave completely identically -everywhere they are running. Because of this, there can be only one instance of -an anonymous clone active per node. - -The instances of globally unique clones are distinct entities. All instances -are launched identically, but one instance of the clone is not identical to any -other instance, whether running on the same node or a different node. As an -example, a cloned IP address can use special kernel functionality such that -each instance handles a subset of requests for the same IP address. - -.. index:: - single: promotable clone - single: resource; promotable - -.. _s-resource-promotable: - -Promotable clones -_________________ - -If a clone is *promotable*, its instances can perform a special role that -Pacemaker will manage via the ``promote`` and ``demote`` actions of the resource -agent. - -Services that support such a special role have various terms for the special -role and the default role: primary and secondary, master and replica, -controller and worker, etc. Pacemaker uses the terms *promoted* and -*unpromoted* to be agnostic to what the service calls them or what they do. - -All that Pacemaker cares about is that an instance comes up in the unpromoted role -when started, and the resource agent supports the ``promote`` and ``demote`` actions -to manage entering and exiting the promoted role. - -.. index:: - pair: XML element; clone - -Clone Properties -________________ - -.. table:: **Properties of a Clone Resource** - :widths: 1 4 - - +-------------+------------------------------------------------------------------+ - | Field | Description | - +=============+==================================================================+ - | id | .. index:: | - | | single: clone; property, id | - | | single: property; id (clone) | - | | single: id; clone property | - | | | - | | A unique name for the clone | - +-------------+------------------------------------------------------------------+ - | description | .. index:: | - | | single: clone; attribute, description | - | | single: attribute; description (clone) | - | | single: description; clone attribute | - | | | - | | An optional description of the clone, for the user's own | - | | purposes. | - | | E.g. ``IP address for website`` | - +-------------+------------------------------------------------------------------+ - -.. index:: - pair: options; clone - -Clone Options -_____________ - -:ref:`Options ` inherited from primitive resources: -``priority, target-role, is-managed`` - -.. table:: **Clone-specific configuration options** - :class: longtable - :widths: 1 1 3 - - +-------------------+-----------------+-------------------------------------------------------+ - | Field | Default | Description | - +===================+=================+=======================================================+ - | globally-unique | false | .. index:: | - | | | single: clone; option, globally-unique | - | | | single: option; globally-unique (clone) | - | | | single: globally-unique; clone option | - | | | | - | | | If **true**, each clone instance performs a | - | | | distinct function | - +-------------------+-----------------+-------------------------------------------------------+ - | clone-max | 0 | .. index:: | - | | | single: clone; option, clone-max | - | | | single: option; clone-max (clone) | - | | | single: clone-max; clone option | - | | | | - | | | The maximum number of clone instances that can | - | | | be started across the entire cluster. If 0, the | - | | | number of nodes in the cluster will be used. | - +-------------------+-----------------+-------------------------------------------------------+ - | clone-node-max | 1 | .. index:: | - | | | single: clone; option, clone-node-max | - | | | single: option; clone-node-max (clone) | - | | | single: clone-node-max; clone option | - | | | | - | | | If ``globally-unique`` is **true**, the maximum | - | | | number of clone instances that can be started | - | | | on a single node | - +-------------------+-----------------+-------------------------------------------------------+ - | clone-min | 0 | .. index:: | - | | | single: clone; option, clone-min | - | | | single: option; clone-min (clone) | - | | | single: clone-min; clone option | - | | | | - | | | Require at least this number of clone instances | - | | | to be runnable before allowing resources | - | | | depending on the clone to be runnable. A value | - | | | of 0 means require all clone instances to be | - | | | runnable. | - +-------------------+-----------------+-------------------------------------------------------+ - | notify | false | .. index:: | - | | | single: clone; option, notify | - | | | single: option; notify (clone) | - | | | single: notify; clone option | - | | | | - | | | Call the resource agent's **notify** action for | - | | | all active instances, before and after starting | - | | | or stopping any clone instance. The resource | - | | | agent must support this action. | - | | | Allowed values: **false**, **true** | - +-------------------+-----------------+-------------------------------------------------------+ - | ordered | false | .. index:: | - | | | single: clone; option, ordered | - | | | single: option; ordered (clone) | - | | | single: ordered; clone option | - | | | | - | | | If **true**, clone instances must be started | - | | | sequentially instead of in parallel. | - | | | Allowed values: **false**, **true** | - +-------------------+-----------------+-------------------------------------------------------+ - | interleave | false | .. index:: | - | | | single: clone; option, interleave | - | | | single: option; interleave (clone) | - | | | single: interleave; clone option | - | | | | - | | | When this clone is ordered relative to another | - | | | clone, if this option is **false** (the default), | - | | | the ordering is relative to *all* instances of | - | | | the other clone, whereas if this option is | - | | | **true**, the ordering is relative only to | - | | | instances on the same node. | - | | | Allowed values: **false**, **true** | - +-------------------+-----------------+-------------------------------------------------------+ - | promotable | false | .. index:: | - | | | single: clone; option, promotable | - | | | single: option; promotable (clone) | - | | | single: promotable; clone option | - | | | | - | | | If **true**, clone instances can perform a | - | | | special role that Pacemaker will manage via the | - | | | resource agent's **promote** and **demote** | - | | | actions. The resource agent must support these | - | | | actions. | - | | | Allowed values: **false**, **true** | - +-------------------+-----------------+-------------------------------------------------------+ - | promoted-max | 1 | .. index:: | - | | | single: clone; option, promoted-max | - | | | single: option; promoted-max (clone) | - | | | single: promoted-max; clone option | - | | | | - | | | If ``promotable`` is **true**, the number of | - | | | instances that can be promoted at one time | - | | | across the entire cluster | - +-------------------+-----------------+-------------------------------------------------------+ - | promoted-node-max | 1 | .. index:: | - | | | single: clone; option, promoted-node-max | - | | | single: option; promoted-node-max (clone) | - | | | single: promoted-node-max; clone option | - | | | | - | | | If ``promotable`` is **true** and ``globally-unique`` | - | | | is **false**, the number of clone instances can be | - | | | promoted at one time on a single node | - +-------------------+-----------------+-------------------------------------------------------+ - -.. note:: **Deprecated Terminology** - - In older documentation and online examples, you may see promotable clones - referred to as *multi-state*, *stateful*, or *master/slave*; these mean the - same thing as *promotable*. Certain syntax is supported for backward - compatibility, but is deprecated and will be removed in a future version: - - * Using a ``master`` tag, instead of a ``clone`` tag with the ``promotable`` - meta-attribute set to ``true`` - * Using the ``master-max`` meta-attribute instead of ``promoted-max`` - * Using the ``master-node-max`` meta-attribute instead of - ``promoted-node-max`` - * Using ``Master`` as a role name instead of ``Promoted`` - * Using ``Slave`` as a role name instead of ``Unpromoted`` - - -Clone Contents -______________ - -Clones must contain exactly one primitive or group resource. - -.. topic:: A clone that runs a web server on all nodes - - .. code-block:: xml - - - - - - - - - -.. warning:: - - You should never reference the name of a clone's child (the primitive or group - resource being cloned). If you think you need to do this, you probably need to - re-evaluate your design. - -Clone Instance Attribute -________________________ - -Clones have no instance attributes; however, any that are set here will be -inherited by the clone's child. - -.. index:: - single: clone; constraint - -Clone Constraints -_________________ - -In most cases, a clone will have a single instance on each active cluster -node. If this is not the case, you can indicate which nodes the -cluster should preferentially assign copies to with resource location -constraints. These constraints are written no differently from those -for primitive resources except that the clone's **id** is used. - -.. topic:: Some constraints involving clones - - .. code-block:: xml - - - - - - - -Ordering constraints behave slightly differently for clones. In the -example above, ``apache-stats`` will wait until all copies of ``apache-clone`` -that need to be started have done so before being started itself. -Only if *no* copies can be started will ``apache-stats`` be prevented -from being active. Additionally, the clone will wait for -``apache-stats`` to be stopped before stopping itself. - -Colocation of a primitive or group resource with a clone means that -the resource can run on any node with an active instance of the clone. -The cluster will choose an instance based on where the clone is running and -the resource's own location preferences. - -Colocation between clones is also possible. If one clone **A** is colocated -with another clone **B**, the set of allowed locations for **A** is limited to -nodes on which **B** is (or will be) active. Placement is then performed -normally. - -.. index:: - single: promotable clone; constraint - -.. _promotable-clone-constraints: - -Promotable Clone Constraints -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For promotable clone resources, the ``first-action`` and/or ``then-action`` fields -for ordering constraints may be set to ``promote`` or ``demote`` to constrain the -promoted role, and colocation constraints may contain ``rsc-role`` and/or -``with-rsc-role`` fields. - -.. topic:: Constraints involving promotable clone resources - - .. code-block:: xml - - - - - - - - - -In the example above, **myApp** will wait until one of the database -copies has been started and promoted before being started -itself on the same node. Only if no copies can be promoted will **myApp** be -prevented from being active. Additionally, the cluster will wait for -**myApp** to be stopped before demoting the database. - -Colocation of a primitive or group resource with a promotable clone -resource means that it can run on any node with an active instance of -the promotable clone resource that has the specified role (``Promoted`` or -``Unpromoted``). In the example above, the cluster will choose a location -based on where database is running in the promoted role, and if there are -multiple promoted instances it will also factor in **myApp**'s own location -preferences when deciding which location to choose. - -Colocation with regular clones and other promotable clone resources is also -possible. In such cases, the set of allowed locations for the **rsc** -clone is (after role filtering) limited to nodes on which the -``with-rsc`` promotable clone resource is (or will be) in the specified role. -Placement is then performed as normal. - -Using Promotable Clone Resources in Colocation Sets -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When a promotable clone is used in a :ref:`resource set ` -inside a colocation constraint, the resource set may take a ``role`` attribute. - -In the following example, an instance of **B** may be promoted only on a node -where **A** is in the promoted role. Additionally, resources **C** and **D** -must be located on a node where both **A** and **B** are promoted. - -.. topic:: Colocate C and D with A's and B's promoted instances - - .. code-block:: xml - - - - - - - - - - - - - - -Using Promotable Clone Resources in Ordered Sets -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When a promotable clone is used in a :ref:`resource set ` -inside an ordering constraint, the resource set may take an ``action`` -attribute. - -.. topic:: Start C and D after first promoting A and B - - .. code-block:: xml - - - - - - - - - - - - - - -In the above example, **B** cannot be promoted until **A** has been promoted. -Additionally, resources **C** and **D** must wait until **A** and **B** have -been promoted before they can start. - -.. index:: - pair: resource-stickiness; clone - -.. _s-clone-stickiness: - -Clone Stickiness -________________ - -To achieve a stable allocation pattern, clones are slightly sticky by -default. If no value for ``resource-stickiness`` is provided, the clone -will use a value of 1. Being a small value, it causes minimal -disturbance to the score calculations of other resources but is enough -to prevent Pacemaker from needlessly moving copies around the cluster. - -.. note:: - - For globally unique clones, this may result in multiple instances of the - clone staying on a single node, even after another eligible node becomes - active (for example, after being put into standby mode then made active again). - If you do not want this behavior, specify a ``resource-stickiness`` of 0 - for the clone temporarily and let the cluster adjust, then set it back - to 1 if you want the default behavior to apply again. - -.. important:: - - If ``resource-stickiness`` is set in the ``rsc_defaults`` section, it will - apply to clone instances as well. This means an explicit ``resource-stickiness`` - of 0 in ``rsc_defaults`` works differently from the implicit default used when - ``resource-stickiness`` is not specified. - -Clone Resource Agent Requirements -_________________________________ - -Any resource can be used as an anonymous clone, as it requires no -additional support from the resource agent. Whether it makes sense to -do so depends on your resource and its resource agent. - -Resource Agent Requirements for Globally Unique Clones -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Globally unique clones require additional support in the resource agent. In -particular, it must only respond with ``${OCF_SUCCESS}`` if the node has that -exact instance active. All other probes for instances of the clone should -result in ``${OCF_NOT_RUNNING}`` (or one of the other OCF error codes if -they are failed). - -Individual instances of a clone are identified by appending a colon and a -numerical offset, e.g. **apache:2**. - -Resource agents can find out how many copies there are by examining -the ``OCF_RESKEY_CRM_meta_clone_max`` environment variable and which -instance it is by examining ``OCF_RESKEY_CRM_meta_clone``. - -The resource agent must not make any assumptions (based on -``OCF_RESKEY_CRM_meta_clone``) about which numerical instances are active. In -particular, the list of active copies will not always be an unbroken -sequence, nor always start at 0. - -Resource Agent Requirements for Promotable Clones -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Promotable clone resources require two extra actions, ``demote`` and ``promote``, -which are responsible for changing the state of the resource. Like **start** and -**stop**, they should return ``${OCF_SUCCESS}`` if they completed successfully or -a relevant error code if they did not. - -The states can mean whatever you wish, but when the resource is -started, it must come up in the unpromoted role. From there, the -cluster will decide which instances to promote. - -In addition to the clone requirements for monitor actions, agents must -also *accurately* report which state they are in. The cluster relies -on the agent to report its status (including role) accurately and does -not indicate to the agent what role it currently believes it to be in. - -.. table:: **Role implications of OCF return codes** - :widths: 1 3 - - +----------------------+--------------------------------------------------+ - | Monitor Return Code | Description | - +======================+==================================================+ - | OCF_NOT_RUNNING | .. index:: | - | | single: OCF_NOT_RUNNING | - | | single: OCF return code; OCF_NOT_RUNNING | - | | | - | | Stopped | - +----------------------+--------------------------------------------------+ - | OCF_SUCCESS | .. index:: | - | | single: OCF_SUCCESS | - | | single: OCF return code; OCF_SUCCESS | - | | | - | | Running (Unpromoted) | - +----------------------+--------------------------------------------------+ - | OCF_RUNNING_PROMOTED | .. index:: | - | | single: OCF_RUNNING_PROMOTED | - | | single: OCF return code; OCF_RUNNING_PROMOTED | - | | | - | | Running (Promoted) | - +----------------------+--------------------------------------------------+ - | OCF_FAILED_PROMOTED | .. index:: | - | | single: OCF_FAILED_PROMOTED | - | | single: OCF return code; OCF_FAILED_PROMOTED | - | | | - | | Failed (Promoted) | - +----------------------+--------------------------------------------------+ - | Other | .. index:: | - | | single: return code | - | | | - | | Failed (Unpromoted) | - +----------------------+--------------------------------------------------+ - -Clone Notifications -~~~~~~~~~~~~~~~~~~~ - -If the clone has the ``notify`` meta-attribute set to **true**, and the resource -agent supports the ``notify`` action, Pacemaker will call the action when -appropriate, passing a number of extra variables which, when combined with -additional context, can be used to calculate the current state of the cluster -and what is about to happen to it. - -.. index:: - single: clone; environment variables - single: notify; environment variables - -.. table:: **Environment variables supplied with Clone notify actions** - :widths: 1 1 - - +----------------------------------------------+-------------------------------------------------------------------------------+ - | Variable | Description | - +==============================================+===============================================================================+ - | OCF_RESKEY_CRM_meta_notify_type | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_type | - | | single: OCF_RESKEY_CRM_meta_notify_type | - | | | - | | Allowed values: **pre**, **post** | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_operation | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_operation | - | | single: OCF_RESKEY_CRM_meta_notify_operation | - | | | - | | Allowed values: **start**, **stop** | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_start_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_start_resource | - | | single: OCF_RESKEY_CRM_meta_notify_start_resource | - | | | - | | Resources to be started | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_stop_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_resource | - | | single: OCF_RESKEY_CRM_meta_notify_stop_resource | - | | | - | | Resources to be stopped | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_active_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_active_resource | - | | single: OCF_RESKEY_CRM_meta_notify_active_resource | - | | | - | | Resources that are running | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_inactive_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_inactive_resource | - | | single: OCF_RESKEY_CRM_meta_notify_inactive_resource | - | | | - | | Resources that are not running | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_start_uname | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_start_uname | - | | single: OCF_RESKEY_CRM_meta_notify_start_uname | - | | | - | | Nodes on which resources will be started | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_stop_uname | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_uname | - | | single: OCF_RESKEY_CRM_meta_notify_stop_uname | - | | | - | | Nodes on which resources will be stopped | - +----------------------------------------------+-------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_active_uname | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_active_uname | - | | single: OCF_RESKEY_CRM_meta_notify_active_uname | - | | | - | | Nodes on which resources are running | - +----------------------------------------------+-------------------------------------------------------------------------------+ - -The variables come in pairs, such as -``OCF_RESKEY_CRM_meta_notify_start_resource`` and -``OCF_RESKEY_CRM_meta_notify_start_uname``, and should be treated as an -array of whitespace-separated elements. - -``OCF_RESKEY_CRM_meta_notify_inactive_resource`` is an exception, as the -matching **uname** variable does not exist since inactive resources -are not running on any node. - -Thus, in order to indicate that **clone:0** will be started on **sles-1**, -**clone:2** will be started on **sles-3**, and **clone:3** will be started -on **sles-2**, the cluster would set: - -.. topic:: Notification variables - - .. code-block:: none - - OCF_RESKEY_CRM_meta_notify_start_resource="clone:0 clone:2 clone:3" - OCF_RESKEY_CRM_meta_notify_start_uname="sles-1 sles-3 sles-2" - -.. note:: - - Pacemaker will log but otherwise ignore failures of notify actions. - -Interpretation of Notification Variables -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Pre-notification (stop):** - -* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` -* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` -* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -**Post-notification (stop) / Pre-notification (start):** - -* Active resources - - * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -* Inactive resources - - * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -**Post-notification (start):** - -* Active resources: - - * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - -* Inactive resources: - - * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - -* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -Extra Notifications for Promotable Clones -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. index:: - single: clone; environment variables - single: promotable; environment variables - -.. table:: **Extra environment variables supplied for promotable clones** - :widths: 1 1 - - +------------------------------------------------+---------------------------------------------------------------------------------+ - | Variable | Description | - +================================================+=================================================================================+ - | OCF_RESKEY_CRM_meta_notify_promoted_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_resource | - | | single: OCF_RESKEY_CRM_meta_notify_promoted_resource | - | | | - | | Resources that are running in the promoted role | - +------------------------------------------------+---------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_unpromoted_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_resource | - | | single: OCF_RESKEY_CRM_meta_notify_unpromoted_resource | - | | | - | | Resources that are running in the unpromoted role | - +------------------------------------------------+---------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_promote_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_resource | - | | single: OCF_RESKEY_CRM_meta_notify_promote_resource | - | | | - | | Resources to be promoted | - +------------------------------------------------+---------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_demote_resource | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_resource | - | | single: OCF_RESKEY_CRM_meta_notify_demote_resource | - | | | - | | Resources to be demoted | - +------------------------------------------------+---------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_promote_uname | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_uname | - | | single: OCF_RESKEY_CRM_meta_notify_promote_uname | - | | | - | | Nodes on which resources will be promoted | - +------------------------------------------------+---------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_demote_uname | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_uname | - | | single: OCF_RESKEY_CRM_meta_notify_demote_uname | - | | | - | | Nodes on which resources will be demoted | - +------------------------------------------------+---------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_promoted_uname | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_uname | - | | single: OCF_RESKEY_CRM_meta_notify_promoted_uname | - | | | - | | Nodes on which resources are running in the promoted role | - +------------------------------------------------+---------------------------------------------------------------------------------+ - | OCF_RESKEY_CRM_meta_notify_unpromoted_uname | .. index:: | - | | single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_uname | - | | single: OCF_RESKEY_CRM_meta_notify_unpromoted_uname | - | | | - | | Nodes on which resources are running in the unpromoted role | - +------------------------------------------------+---------------------------------------------------------------------------------+ - -Interpretation of Promotable Notification Variables -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Pre-notification (demote):** - -* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` -* Promoted resources: ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` -* Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` -* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` -* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` -* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -**Post-notification (demote) / Pre-notification (stop):** - -* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` -* Promoted resources: - - * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` - -* Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` -* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` -* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` -* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` -* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` - -**Post-notification (stop) / Pre-notification (start)** - -* Active resources: - - * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -* Promoted resources: - - * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` - -* Unpromoted resources: - - * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -* Inactive resources: - - * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` -* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` -* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -**Post-notification (start) / Pre-notification (promote)** - -* Active resources: - - * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - -* Promoted resources: - - * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` - -* Unpromoted resources: - - * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - -* Inactive resources: - - * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - -* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` -* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` -* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -**Post-notification (promote)** - -* Active resources: - - * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - -* Promoted resources: - - * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` - -* Unpromoted resources: - - * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` - -* Inactive resources: - - * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` - * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` - -* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` -* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` -* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` -* Resources that were promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` -* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` -* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` - -Monitoring Promotable Clone Resources -_____________________________________ - -The usual monitor actions are insufficient to monitor a promotable clone -resource, because Pacemaker needs to verify not only that the resource is -active, but also that its actual role matches its intended one. - -Define two monitoring actions: the usual one will cover the unpromoted role, -and an additional one with ``role="Promoted"`` will cover the promoted role. - -.. topic:: Monitoring both states of a promotable clone resource - - .. code-block:: xml - - - - - - - - - - - - - -.. important:: - - It is crucial that *every* monitor operation has a different interval! - Pacemaker currently differentiates between operations - only by resource and interval; so if (for example) a promotable clone resource - had the same monitor interval for both roles, Pacemaker would ignore the - role when checking the status -- which would cause unexpected return - codes, and therefore unnecessary complications. - -.. _s-promotion-scores: - -Determining Which Instance is Promoted -______________________________________ - -Pacemaker can choose a promotable clone instance to be promoted in one of two -ways: - -* Promotion scores: These are node attributes set via the ``crm_attribute`` - command using the ``--promotion`` option, which generally would be called by - the resource agent's start action if it supports promotable clones. This tool - automatically detects both the resource and host, and should be used to set a - preference for being promoted. Based on this, ``promoted-max``, and - ``promoted-node-max``, the instance(s) with the highest preference will be - promoted. - -* Constraints: Location constraints can indicate which nodes are most preferred - to be promoted. - -.. topic:: Explicitly preferring node1 to be promoted - - .. code-block:: xml - - - -.. index: - single: bundle - single: resource; bundle - pair: container; Docker - pair: container; podman - pair: container; rkt - -.. _s-resource-bundle: - -Bundles - Containerized Resources -################################# - -Pacemaker supports a special syntax for launching a service inside a -`container `_ -with any infrastructure it requires: the *bundle*. - -Pacemaker bundles support `Docker `_, -`podman `_ *(since 2.0.1)*, and -`rkt `_ container technologies. [#]_ - -.. topic:: A bundle for a containerized web server - - .. code-block:: xml - - - - - - - - - - - - - - -Bundle Prerequisites -____________________ - -Before configuring a bundle in Pacemaker, the user must install the appropriate -container launch technology (Docker, podman, or rkt), and supply a fully -configured container image, on every node allowed to run the bundle. - -Pacemaker will create an implicit resource of type **ocf:heartbeat:docker**, -**ocf:heartbeat:podman**, or **ocf:heartbeat:rkt** to manage a bundle's -container. The user must ensure that the appropriate resource agent is -installed on every node allowed to run the bundle. - -.. index:: - pair: XML element; bundle - -Bundle Properties -_________________ - -.. table:: **XML Attributes of a bundle Element** - :widths: 1 4 - - +-------------+------------------------------------------------------------------+ - | Field | Description | - +=============+==================================================================+ - | id | .. index:: | - | | single: bundle; attribute, id | - | | single: attribute; id (bundle) | - | | single: id; bundle attribute | - | | | - | | A unique name for the bundle (required) | - +-------------+------------------------------------------------------------------+ - | description | .. index:: | - | | single: bundle; attribute, description | - | | single: attribute; description (bundle) | - | | single: description; bundle attribute | - | | | - | | An optional description of the group, for the user's own | - | | purposes. | - | | E.g. ``manages the container that runs the service`` | - +-------------+------------------------------------------------------------------+ - - -A bundle must contain exactly one ``docker``, ``podman``, or ``rkt`` element. - -.. index:: - pair: XML element; docker - pair: XML element; podman - pair: XML element; rkt - -Bundle Container Properties -___________________________ - -.. table:: **XML attributes of a docker, podman, or rkt Element** - :class: longtable - :widths: 2 3 4 - - +-------------------+------------------------------------+---------------------------------------------------+ - | Attribute | Default | Description | - +===================+====================================+===================================================+ - | image | | .. index:: | - | | | single: docker; attribute, image | - | | | single: attribute; image (docker) | - | | | single: image; docker attribute | - | | | single: podman; attribute, image | - | | | single: attribute; image (podman) | - | | | single: image; podman attribute | - | | | single: rkt; attribute, image | - | | | single: attribute; image (rkt) | - | | | single: image; rkt attribute | - | | | | - | | | Container image tag (required) | - +-------------------+------------------------------------+---------------------------------------------------+ - | replicas | Value of ``promoted-max`` | .. index:: | - | | if that is positive, else 1 | single: docker; attribute, replicas | - | | | single: attribute; replicas (docker) | - | | | single: replicas; docker attribute | - | | | single: podman; attribute, replicas | - | | | single: attribute; replicas (podman) | - | | | single: replicas; podman attribute | - | | | single: rkt; attribute, replicas | - | | | single: attribute; replicas (rkt) | - | | | single: replicas; rkt attribute | - | | | | - | | | A positive integer specifying the number of | - | | | container instances to launch | - +-------------------+------------------------------------+---------------------------------------------------+ - | replicas-per-host | 1 | .. index:: | - | | | single: docker; attribute, replicas-per-host | - | | | single: attribute; replicas-per-host (docker) | - | | | single: replicas-per-host; docker attribute | - | | | single: podman; attribute, replicas-per-host | - | | | single: attribute; replicas-per-host (podman) | - | | | single: replicas-per-host; podman attribute | - | | | single: rkt; attribute, replicas-per-host | - | | | single: attribute; replicas-per-host (rkt) | - | | | single: replicas-per-host; rkt attribute | - | | | | - | | | A positive integer specifying the number of | - | | | container instances allowed to run on a | - | | | single node | - +-------------------+------------------------------------+---------------------------------------------------+ - | promoted-max | 0 | .. index:: | - | | | single: docker; attribute, promoted-max | - | | | single: attribute; promoted-max (docker) | - | | | single: promoted-max; docker attribute | - | | | single: podman; attribute, promoted-max | - | | | single: attribute; promoted-max (podman) | - | | | single: promoted-max; podman attribute | - | | | single: rkt; attribute, promoted-max | - | | | single: attribute; promoted-max (rkt) | - | | | single: promoted-max; rkt attribute | - | | | | - | | | A non-negative integer that, if positive, | - | | | indicates that the containerized service | - | | | should be treated as a promotable service, | - | | | with this many replicas allowed to run the | - | | | service in the promoted role | - +-------------------+------------------------------------+---------------------------------------------------+ - | network | | .. index:: | - | | | single: docker; attribute, network | - | | | single: attribute; network (docker) | - | | | single: network; docker attribute | - | | | single: podman; attribute, network | - | | | single: attribute; network (podman) | - | | | single: network; podman attribute | - | | | single: rkt; attribute, network | - | | | single: attribute; network (rkt) | - | | | single: network; rkt attribute | - | | | | - | | | If specified, this will be passed to the | - | | | ``docker run``, ``podman run``, or | - | | | ``rkt run`` command as the network setting | - | | | for the container. | - +-------------------+------------------------------------+---------------------------------------------------+ - | run-command | ``/usr/sbin/pacemaker-remoted`` if | .. index:: | - | | bundle contains a **primitive**, | single: docker; attribute, run-command | - | | otherwise none | single: attribute; run-command (docker) | - | | | single: run-command; docker attribute | - | | | single: podman; attribute, run-command | - | | | single: attribute; run-command (podman) | - | | | single: run-command; podman attribute | - | | | single: rkt; attribute, run-command | - | | | single: attribute; run-command (rkt) | - | | | single: run-command; rkt attribute | - | | | | - | | | This command will be run inside the container | - | | | when launching it ("PID 1"). If the bundle | - | | | contains a **primitive**, this command *must* | - | | | start ``pacemaker-remoted`` (but could, for | - | | | example, be a script that does other stuff, too). | - +-------------------+------------------------------------+---------------------------------------------------+ - | options | | .. index:: | - | | | single: docker; attribute, options | - | | | single: attribute; options (docker) | - | | | single: options; docker attribute | - | | | single: podman; attribute, options | - | | | single: attribute; options (podman) | - | | | single: options; podman attribute | - | | | single: rkt; attribute, options | - | | | single: attribute; options (rkt) | - | | | single: options; rkt attribute | - | | | | - | | | Extra command-line options to pass to the | - | | | ``docker run``, ``podman run``, or ``rkt run`` | - | | | command | - +-------------------+------------------------------------+---------------------------------------------------+ - -.. note:: - - Considerations when using cluster configurations or container images from - Pacemaker 1.1: - - * If the container image has a pre-2.0.0 version of Pacemaker, set ``run-command`` - to ``/usr/sbin/pacemaker_remoted`` (note the underbar instead of dash). - - * ``masters`` is accepted as an alias for ``promoted-max``, but is deprecated since - 2.0.0, and support for it will be removed in a future version. - -Bundle Network Properties -_________________________ - -A bundle may optionally contain one ```` element. - -.. index:: - pair: XML element; network - single: bundle; network - -.. table:: **XML attributes of a network Element** - :widths: 2 1 5 - - +----------------+---------+------------------------------------------------------------+ - | Attribute | Default | Description | - +================+=========+============================================================+ - | add-host | TRUE | .. index:: | - | | | single: network; attribute, add-host | - | | | single: attribute; add-host (network) | - | | | single: add-host; network attribute | - | | | | - | | | If TRUE, and ``ip-range-start`` is used, Pacemaker will | - | | | automatically ensure that ``/etc/hosts`` inside the | - | | | containers has entries for each | - | | | :ref:`replica name ` | - | | | and its assigned IP. | - +----------------+---------+------------------------------------------------------------+ - | ip-range-start | | .. index:: | - | | | single: network; attribute, ip-range-start | - | | | single: attribute; ip-range-start (network) | - | | | single: ip-range-start; network attribute | - | | | | - | | | If specified, Pacemaker will create an implicit | - | | | ``ocf:heartbeat:IPaddr2`` resource for each container | - | | | instance, starting with this IP address, using up to | - | | | ``replicas`` sequential addresses. These addresses can be | - | | | used from the host's network to reach the service inside | - | | | the container, though it is not visible within the | - | | | container itself. Only IPv4 addresses are currently | - | | | supported. | - +----------------+---------+------------------------------------------------------------+ - | host-netmask | 32 | .. index:: | - | | | single: network; attribute; host-netmask | - | | | single: attribute; host-netmask (network) | - | | | single: host-netmask; network attribute | - | | | | - | | | If ``ip-range-start`` is specified, the IP addresses | - | | | are created with this CIDR netmask (as a number of bits). | - +----------------+---------+------------------------------------------------------------+ - | host-interface | | .. index:: | - | | | single: network; attribute; host-interface | - | | | single: attribute; host-interface (network) | - | | | single: host-interface; network attribute | - | | | | - | | | If ``ip-range-start`` is specified, the IP addresses are | - | | | created on this host interface (by default, it will be | - | | | determined from the IP address). | - +----------------+---------+------------------------------------------------------------+ - | control-port | 3121 | .. index:: | - | | | single: network; attribute; control-port | - | | | single: attribute; control-port (network) | - | | | single: control-port; network attribute | - | | | | - | | | If the bundle contains a ``primitive``, the cluster will | - | | | use this integer TCP port for communication with | - | | | Pacemaker Remote inside the container. Changing this is | - | | | useful when the container is unable to listen on the | - | | | default port, for example, when the container uses the | - | | | host's network rather than ``ip-range-start`` (in which | - | | | case ``replicas-per-host`` must be 1), or when the bundle | - | | | may run on a Pacemaker Remote node that is already | - | | | listening on the default port. Any ``PCMK_remote_port`` | - | | | environment variable set on the host or in the container | - | | | is ignored for bundle connections. | - +----------------+---------+------------------------------------------------------------+ - -.. _s-resource-bundle-note-replica-names: - -.. note:: - - Replicas are named by the bundle id plus a dash and an integer counter starting - with zero. For example, if a bundle named **httpd-bundle** has **replicas=2**, its - containers will be named **httpd-bundle-0** and **httpd-bundle-1**. - -.. index:: - pair: XML element; port-mapping - -Additionally, a ``network`` element may optionally contain one or more -``port-mapping`` elements. - -.. table:: **Attributes of a port-mapping Element** - :widths: 2 1 5 - - +---------------+-------------------+------------------------------------------------------+ - | Attribute | Default | Description | - +===============+===================+======================================================+ - | id | | .. index:: | - | | | single: port-mapping; attribute, id | - | | | single: attribute; id (port-mapping) | - | | | single: id; port-mapping attribute | - | | | | - | | | A unique name for the port mapping (required) | - +---------------+-------------------+------------------------------------------------------+ - | port | | .. index:: | - | | | single: port-mapping; attribute, port | - | | | single: attribute; port (port-mapping) | - | | | single: port; port-mapping attribute | - | | | | - | | | If this is specified, connections to this TCP port | - | | | number on the host network (on the container's | - | | | assigned IP address, if ``ip-range-start`` is | - | | | specified) will be forwarded to the container | - | | | network. Exactly one of ``port`` or ``range`` | - | | | must be specified in a ``port-mapping``. | - +---------------+-------------------+------------------------------------------------------+ - | internal-port | value of ``port`` | .. index:: | - | | | single: port-mapping; attribute, internal-port | - | | | single: attribute; internal-port (port-mapping) | - | | | single: internal-port; port-mapping attribute | - | | | | - | | | If ``port`` and this are specified, connections | - | | | to ``port`` on the host's network will be | - | | | forwarded to this port on the container network. | - +---------------+-------------------+------------------------------------------------------+ - | range | | .. index:: | - | | | single: port-mapping; attribute, range | - | | | single: attribute; range (port-mapping) | - | | | single: range; port-mapping attribute | - | | | | - | | | If this is specified, connections to these TCP | - | | | port numbers (expressed as *first_port*-*last_port*) | - | | | on the host network (on the container's assigned IP | - | | | address, if ``ip-range-start`` is specified) will | - | | | be forwarded to the same ports in the container | - | | | network. Exactly one of ``port`` or ``range`` | - | | | must be specified in a ``port-mapping``. | - +---------------+-------------------+------------------------------------------------------+ - -.. note:: - - If the bundle contains a ``primitive``, Pacemaker will automatically map the - ``control-port``, so it is not necessary to specify that port in a - ``port-mapping``. - -.. index: - pair: XML element; storage - pair: XML element; storage-mapping - single: bundle; storage - -.. _s-bundle-storage: - -Bundle Storage Properties -_________________________ - -A bundle may optionally contain one ``storage`` element. A ``storage`` element -has no properties of its own, but may contain one or more ``storage-mapping`` -elements. - -.. table:: **Attributes of a storage-mapping Element** - :widths: 2 1 5 - - +-----------------+---------+-------------------------------------------------------------+ - | Attribute | Default | Description | - +=================+=========+=============================================================+ - | id | | .. index:: | - | | | single: storage-mapping; attribute, id | - | | | single: attribute; id (storage-mapping) | - | | | single: id; storage-mapping attribute | - | | | | - | | | A unique name for the storage mapping (required) | - +-----------------+---------+-------------------------------------------------------------+ - | source-dir | | .. index:: | - | | | single: storage-mapping; attribute, source-dir | - | | | single: attribute; source-dir (storage-mapping) | - | | | single: source-dir; storage-mapping attribute | - | | | | - | | | The absolute path on the host's filesystem that will be | - | | | mapped into the container. Exactly one of ``source-dir`` | - | | | and ``source-dir-root`` must be specified in a | - | | | ``storage-mapping``. | - +-----------------+---------+-------------------------------------------------------------+ - | source-dir-root | | .. index:: | - | | | single: storage-mapping; attribute, source-dir-root | - | | | single: attribute; source-dir-root (storage-mapping) | - | | | single: source-dir-root; storage-mapping attribute | - | | | | - | | | The start of a path on the host's filesystem that will | - | | | be mapped into the container, using a different | - | | | subdirectory on the host for each container instance. | - | | | The subdirectory will be named the same as the | - | | | :ref:`replica name `. | - | | | Exactly one of ``source-dir`` and ``source-dir-root`` | - | | | must be specified in a ``storage-mapping``. | - +-----------------+---------+-------------------------------------------------------------+ - | target-dir | | .. index:: | - | | | single: storage-mapping; attribute, target-dir | - | | | single: attribute; target-dir (storage-mapping) | - | | | single: target-dir; storage-mapping attribute | - | | | | - | | | The path name within the container where the host | - | | | storage will be mapped (required) | - +-----------------+---------+-------------------------------------------------------------+ - | options | | .. index:: | - | | | single: storage-mapping; attribute, options | - | | | single: attribute; options (storage-mapping) | - | | | single: options; storage-mapping attribute | - | | | | - | | | A comma-separated list of file system mount | - | | | options to use when mapping the storage | - +-----------------+---------+-------------------------------------------------------------+ - -.. note:: - - Pacemaker does not define the behavior if the source directory does not already - exist on the host. However, it is expected that the container technology and/or - its resource agent will create the source directory in that case. - -.. note:: - - If the bundle contains a ``primitive``, - Pacemaker will automatically map the equivalent of - ``source-dir=/etc/pacemaker/authkey target-dir=/etc/pacemaker/authkey`` - and ``source-dir-root=/var/log/pacemaker/bundles target-dir=/var/log`` into the - container, so it is not necessary to specify those paths in a - ``storage-mapping``. - -.. important:: - - The ``PCMK_authkey_location`` environment variable must not be set to anything - other than the default of ``/etc/pacemaker/authkey`` on any node in the cluster. - -.. important:: - - If SELinux is used in enforcing mode on the host, you must ensure the container - is allowed to use any storage you mount into it. For Docker and podman bundles, - adding "Z" to the mount options will create a container-specific label for the - mount that allows the container access. - -.. index:: - single: bundle; primitive - -Bundle Primitive -________________ - -A bundle may optionally contain one :ref:`primitive ` -resource. The primitive may have operations, instance attributes, and -meta-attributes defined, as usual. - -If a bundle contains a primitive resource, the container image must include -the Pacemaker Remote daemon, and at least one of ``ip-range-start`` or -``control-port`` must be configured in the bundle. Pacemaker will create an -implicit **ocf:pacemaker:remote** resource for the connection, launch -Pacemaker Remote within the container, and monitor and manage the primitive -resource via Pacemaker Remote. - -If the bundle has more than one container instance (replica), the primitive -resource will function as an implicit :ref:`clone ` -- a -:ref:`promotable clone ` if the bundle has ``promoted-max`` -greater than zero. - -.. note:: - - If you want to pass environment variables to a bundle's Pacemaker Remote - connection or primitive, you have two options: - - * Environment variables whose value is the same regardless of the underlying host - may be set using the container element's ``options`` attribute. - * If you want variables to have host-specific values, you can use the - :ref:`storage-mapping ` element to map a file on the host as - ``/etc/pacemaker/pcmk-init.env`` in the container *(since 2.0.3)*. - Pacemaker Remote will parse this file as a shell-like format, with - variables set as NAME=VALUE, ignoring blank lines and comments starting - with "#". - -.. important:: - - When a bundle has a ``primitive``, Pacemaker on all cluster nodes must be able to - contact Pacemaker Remote inside the bundle's containers. - - * The containers must have an accessible network (for example, ``network`` should - not be set to "none" with a ``primitive``). - * The default, using a distinct network space inside the container, works in - combination with ``ip-range-start``. Any firewall must allow access from all - cluster nodes to the ``control-port`` on the container IPs. - * If the container shares the host's network space (for example, by setting - ``network`` to "host"), a unique ``control-port`` should be specified for each - bundle. Any firewall must allow access from all cluster nodes to the - ``control-port`` on all cluster and remote node IPs. - -.. index:: - single: bundle; node attributes - -.. _s-bundle-attributes: - -Bundle Node Attributes -______________________ - -If the bundle has a ``primitive``, the primitive's resource agent may want to set -node attributes such as :ref:`promotion scores `. However, with -containers, it is not apparent which node should get the attribute. - -If the container uses shared storage that is the same no matter which node the -container is hosted on, then it is appropriate to use the promotion score on the -bundle node itself. - -On the other hand, if the container uses storage exported from the underlying host, -then it may be more appropriate to use the promotion score on the underlying host. - -Since this depends on the particular situation, the -``container-attribute-target`` resource meta-attribute allows the user to specify -which approach to use. If it is set to ``host``, then user-defined node attributes -will be checked on the underlying host. If it is anything else, the local node -(in this case the bundle node) is used as usual. - -This only applies to user-defined attributes; the cluster will always check the -local node for cluster-defined attributes such as ``#uname``. - -If ``container-attribute-target`` is ``host``, the cluster will pass additional -environment variables to the primitive's resource agent that allow it to set -node attributes appropriately: ``CRM_meta_container_attribute_target`` (identical -to the meta-attribute value) and ``CRM_meta_physical_host`` (the name of the -underlying host). - -.. note:: - - When called by a resource agent, the ``attrd_updater`` and ``crm_attribute`` - commands will automatically check those environment variables and set - attributes appropriately. - -.. index:: - single: bundle; meta-attributes - -Bundle Meta-Attributes -______________________ - -Any meta-attribute set on a bundle will be inherited by the bundle's -primitive and any resources implicitly created by Pacemaker for the bundle. - -This includes options such as ``priority``, ``target-role``, and ``is-managed``. See -:ref:`resource_options` for more information. - -Bundles support clone meta-attributes including ``notify``, ``ordered``, and -``interleave``. - -Limitations of Bundles -______________________ - -Restarting pacemaker while a bundle is unmanaged or the cluster is in -maintenance mode may cause the bundle to fail. - -Bundles may not be explicitly cloned or included in groups. This includes the -bundle's primitive and any resources implicitly created by Pacemaker for the -bundle. (If ``replicas`` is greater than 1, the bundle will behave like a clone -implicitly.) - -Bundles do not have instance attributes, utilization attributes, or operations, -though a bundle's primitive may have them. - -A bundle with a primitive can run on a Pacemaker Remote node only if the bundle -uses a distinct ``control-port``. - -.. [#] Of course, the service must support running multiple instances. - -.. [#] Docker is a trademark of Docker, Inc. No endorsement by or association with - Docker, Inc. is implied. diff --git a/doc/sphinx/Pacemaker_Explained/cluster-options.rst b/doc/sphinx/Pacemaker_Explained/cluster-options.rst new file mode 100644 index 0000000..77bd7e6 --- /dev/null +++ b/doc/sphinx/Pacemaker_Explained/cluster-options.rst @@ -0,0 +1,921 @@ +Cluster-Wide Configuration +-------------------------- + +.. index:: + pair: XML element; cib + pair: XML element; configuration + +Configuration Layout +#################### + +The cluster is defined by the Cluster Information Base (CIB), which uses XML +notation. The simplest CIB, an empty one, looks like this: + +.. topic:: An empty configuration + + .. code-block:: xml + + + + + + + + + + + +The empty configuration above contains the major sections that make up a CIB: + +* ``cib``: The entire CIB is enclosed with a ``cib`` element. Certain + fundamental settings are defined as attributes of this element. + + * ``configuration``: This section -- the primary focus of this document -- + contains traditional configuration information such as what resources the + cluster serves and the relationships among them. + + * ``crm_config``: cluster-wide configuration options + + * ``nodes``: the machines that host the cluster + + * ``resources``: the services run by the cluster + + * ``constraints``: indications of how resources should be placed + + * ``status``: This section contains the history of each resource on each + node. Based on this data, the cluster can construct the complete current + state of the cluster. The authoritative source for this section is the + local executor (pacemaker-execd process) on each cluster node, and the + cluster will occasionally repopulate the entire section. For this reason, + it is never written to disk, and administrators are advised against + modifying it in any way. + +In this document, configuration settings will be described as properties or +options based on how they are defined in the CIB: + +* Properties are XML attributes of an XML element. + +* Options are name-value pairs expressed as ``nvpair`` child elements of an XML + element. + +Normally, you will use command-line tools that abstract the XML, so the +distinction will be unimportant; both properties and options are cluster +settings you can tweak. + +Configuration Value Types +######################### + +Throughout this document, configuration values will be designated as having one +of the following types: + +.. list-table:: **Configuration Value Types** + :class: longtable + :widths: 1 3 + :header-rows: 1 + + * - Type + - Description + * - .. _boolean: + + .. index:: + pair: type; boolean + + boolean + - Case-insensitive text value where ``1``, ``yes``, ``y``, ``on``, + and ``true`` evaluate as true and ``0``, ``no``, ``n``, ``off``, + ``false``, and unset evaluate as false + * - .. _date_time: + + .. index:: + pair: type; date/time + + date/time + - Textual timestamp like ``Sat Dec 21 11:47:45 2013`` + * - .. _duration: + + .. index:: + pair: type; duration + + duration + - A time duration, specified either like a :ref:`timeout ` or an + `ISO 8601 duration `_. + A duration may be up to approximately 49 days but is intended for much + smaller time periods. + * - .. _enumeration: + + .. index:: + pair: type; enumeration + + enumeration + - Text that must be one of a set of defined values (which will be listed + in the description) + * - .. _integer: + + .. index:: + pair: type; integer + + integer + - 32-bit signed integer value (-2,147,483,648 to 2,147,483,647) + * - .. _nonnegative_integer: + + .. index:: + pair: type; nonnegative integer + + nonnegative integer + - 32-bit nonnegative integer value (0 to 2,147,483,647) + * - .. _port: + + .. index:: + pair: type; port + + port + - Integer TCP port number (0 to 65535) + * - .. _score: + + .. index:: + pair: type; score + + score + - A Pacemaker score can be an integer between -1,000,000 and 1,000,000, or + a string alias: ``INFINITY`` or ``+INFINITY`` is equivalent to + 1,000,000, ``-INFINITY`` is equivalent to -1,000,000, and ``red``, + ``yellow``, and ``green`` are equivalent to integers as described in + :ref:`node-health`. + * - .. _text: + + .. index:: + pair: type; text + + text + - A text string + * - .. _timeout: + + .. index:: + pair: type; timeout + + timeout + - A time duration, specified as a bare number (in which case it is + considered to be in seconds) or a number with a unit (``ms`` or ``msec`` + for milliseconds, ``us`` or ``usec`` for microseconds, ``s`` or ``sec`` + for seconds, ``m`` or ``min`` for minutes, ``h`` or ``hr`` for hours) + optionally with whitespace before and/or after the number. + * - .. _version: + + .. index:: + pair: type; version + + version + - Version number (any combination of alphanumeric characters, dots, and + dashes, starting with a number). + + +Scores +______ + +Scores are integral to how Pacemaker works. Practically everything from moving +a resource to deciding which resource to stop in a degraded cluster is achieved +by manipulating scores in some way. + +Scores are calculated per resource and node. Any node with a negative score for +a resource can't run that resource. The cluster places a resource on the node +with the highest score for it. + +Score addition and subtraction follow these rules: + +* Any value (including ``INFINITY``) - ``INFINITY`` = ``-INFINITY`` +* ``INFINITY`` + any value other than ``-INFINITY`` = ``INFINITY`` + +.. note:: + + What if you want to use a score higher than 1,000,000? Typically this possibility + arises when someone wants to base the score on some external metric that might + go above 1,000,000. + + The short answer is you can't. + + The long answer is it is sometimes possible work around this limitation + creatively. You may be able to set the score to some computed value based on + the external metric rather than use the metric directly. For nodes, you can + store the metric as a node attribute, and query the attribute when computing + the score (possibly as part of a custom resource agent). + + +CIB Properties +############## + +Certain settings are defined by CIB properties (that is, attributes of the +``cib`` tag) rather than with the rest of the cluster configuration in the +``configuration`` section. + +The reason is simply a matter of parsing. These options are used by the +configuration database which is, by design, mostly ignorant of the content it +holds. So the decision was made to place them in an easy-to-find location. + +.. list-table:: **CIB Properties** + :class: longtable + :widths: 2 2 2 5 + :header-rows: 1 + + * - Name + - Type + - Default + - Description + * - .. _admin_epoch: + + .. index:: + pair: admin_epoch; cib + + admin_epoch + - :ref:`nonnegative integer ` + - 0 + - When a node joins the cluster, the cluster asks the node with the + highest (``admin_epoch``, ``epoch``, ``num_updates``) tuple to replace + the configuration on all the nodes -- which makes setting them correctly + very important. ``admin_epoch`` is never modified by the cluster; you + can use this to make the configurations on any inactive nodes obsolete. + * - .. _epoch: + + .. index:: + pair: epoch; cib + + epoch + - :ref:`nonnegative integer ` + - 0 + - The cluster increments this every time the CIB's configuration section + is updated. + * - .. _num_updates: + + .. index:: + pair: num_updates; cib + + num_updates + - :ref:`nonnegative integer ` + - 0 + - The cluster increments this every time the CIB's configuration or status + sections are updated, and resets it to 0 when epoch changes. + * - .. _validate_with: + + .. index:: + pair: validate-with; cib + + validate-with + - :ref:`enumeration ` + - + - Determines the type of XML validation that will be done on the + configuration. Allowed values are ``none`` (in which case the cluster + will not require that updates conform to expected syntax) and the base + names of schema files installed on the local machine (for example, + "pacemaker-3.9") + * - .. _remote_tls_port: + + .. index:: + pair: remote-tls-port; cib + + remote-tls-port + - :ref:`port ` + - + - If set, the CIB manager will listen for anonymously encrypted remote + connections on this port, to allow CIB administration from hosts not in + the cluster. No key is used, so this should be used only on a protected + network where man-in-the-middle attacks can be avoided. + * - .. _remote_clear_port: + + .. index:: + pair: remote-clear-port; cib + + remote-clear-port + - :ref:`port ` + - + - If set to a TCP port number, the CIB manager will listen for remote + connections on this port, to allow for CIB administration from hosts not + in the cluster. No encryption is used, so this should be used only on a + protected network. + * - .. _cib_last_written: + + .. index:: + pair: cib-last-written; cib + + cib-last-written + - :ref:`date/time ` + - + - Indicates when the configuration was last written to disk. Maintained by + the cluster; for informational purposes only. + * - .. _have_quorum: + + .. index:: + pair: have-quorum; cib + + have-quorum + - :ref:`boolean ` + - + - Indicates whether the cluster has quorum. If false, the cluster's + response is determined by ``no-quorum-policy`` (see below). Maintained + by the cluster. + * - .. _dc_uuid: + + .. index:: + pair: dc-uuid; cib + + dc-uuid + - :ref:`text ` + - + - Node ID of the cluster's current designated controller (DC). Used and + maintained by the cluster. + + +.. _cluster_options: + +Cluster Options +############### + +Cluster options, as you might expect, control how the cluster behaves when +confronted with various situations. + +They are grouped into sets within the ``crm_config`` section. In advanced +configurations, there may be more than one set. (This will be described later +in the chapter on :ref:`rules` where we will show how to have the cluster use +different sets of options during working hours than during weekends.) For now, +we will describe the simple case where each option is present at most once. + +You can obtain an up-to-date list of cluster options, including their default +values, by running the ``man pacemaker-schedulerd`` and +``man pacemaker-controld`` commands. + +.. list-table:: **Cluster Options** + :class: longtable + :widths: 2 2 2 5 + :header-rows: 1 + + * - Name + - Type + - Default + - Description + * - .. _cluster_name: + + .. index:: + pair: cluster option; cluster-name + + cluster-name + - :ref:`text ` + - + - An (optional) name for the cluster as a whole. This is mostly for users' + convenience for use as desired in administration, but can be used in the + Pacemaker configuration in :ref:`rules` (as the ``#cluster-name`` + :ref:`node attribute `). It may also + be used by higher-level tools when displaying cluster information, and + by certain resource agents (for example, the ``ocf:heartbeat:GFS2`` + agent stores the cluster name in filesystem meta-data). + * - .. _dc_version: + + .. index:: + pair: cluster option; dc-version + + dc-version + - :ref:`version ` + - *detected* + - Version of Pacemaker on the cluster's designated controller (DC). + Maintained by the cluster, and intended for diagnostic purposes. + * - .. _cluster_infrastructure: + + .. index:: + pair: cluster option; cluster-infrastructure + + cluster-infrastructure + - :ref:`text ` + - *detected* + - The messaging layer with which Pacemaker is currently running. + Maintained by the cluster, and intended for informational and diagnostic + purposes. + * - .. _no_quorum_policy: + + .. index:: + pair: cluster option; no-quorum-policy + + no-quorum-policy + - :ref:`enumeration ` + - stop + - What to do when the cluster does not have quorum. Allowed values: + + * ``ignore:`` continue all resource management + * ``freeze:`` continue resource management, but don't recover resources + from nodes not in the affected partition + * ``stop:`` stop all resources in the affected cluster partition + * ``demote:`` demote promotable resources and stop all other resources + in the affected cluster partition *(since 2.0.5)* + * ``suicide:`` fence all nodes in the affected cluster partition + * - .. _batch_limit: + + .. index:: + pair: cluster option; batch-limit + + batch-limit + - :ref:`integer ` + - 0 + - The maximum number of actions that the cluster may execute in parallel + across all nodes. The ideal value will depend on the speed and load + of your network and cluster nodes. If zero, the cluster will impose a + dynamically calculated limit only when any node has high load. If -1, + the cluster will not impose any limit. + * - .. _migration_limit: + + .. index:: + pair: cluster option; migration-limit + + migration-limit + - :ref:`integer ` + - -1 + - The number of :ref:`live migration ` actions that the + cluster is allowed to execute in parallel on a node. A value of -1 means + unlimited. + * - .. _symmetric_cluster: + + .. index:: + pair: cluster option; symmetric-cluster + + symmetric-cluster + - :ref:`boolean ` + - true + - If true, resources can run on any node by default. If false, a resource + is allowed to run on a node only if a + :ref:`location constraint ` enables it. + * - .. _stop_all_resources: + + .. index:: + pair: cluster option; stop-all-resources + + stop-all-resources + - :ref:`boolean ` + - false + - Whether all resources should be disallowed from running (can be useful + during maintenance or troubleshooting) + * - .. _stop_orphan_resources: + + .. index:: + pair: cluster option; stop-orphan-resources + + stop-orphan-resources + - :ref:`boolean ` + - true + - Whether resources that have been deleted from the configuration should + be stopped. This value takes precedence over + :ref:`is-managed ` (that is, even unmanaged resources will + be stopped when orphaned if this value is ``true``). + * - .. _stop_orphan_actions: + + .. index:: + pair: cluster option; stop-orphan-actions + + stop-orphan-actions + - :ref:`boolean ` + - true + - Whether recurring :ref:`operations ` that have been deleted + from the configuration should be cancelled + * - .. _start_failure_is_fatal: + + .. index:: + pair: cluster option; start-failure-is-fatal + + start-failure-is-fatal + - :ref:`boolean ` + - true + - Whether a failure to start a resource on a particular node prevents + further start attempts on that node. If ``false``, the cluster will + decide whether the node is still eligible based on the resource's + current failure count and ``migration-threshold``. + * - .. _enable_startup_probes: + + .. index:: + pair: cluster option; enable-startup-probes + + enable-startup-probes + - :ref:`boolean ` + - true + - Whether the cluster should check the pre-existing state of resources + when the cluster starts + * - .. _maintenance_mode: + + .. index:: + pair: cluster option; maintenance-mode + + maintenance-mode + - :ref:`boolean ` + - false + - If true, the cluster will not start or stop any resource in the cluster, + and any recurring operations (expect those specifying ``role`` as + ``Stopped``) will be paused. If true, this overrides the + :ref:`maintenance ` node attribute, + :ref:`is-managed ` and :ref:`maintenance ` + resource meta-attributes, and :ref:`enabled ` operation + meta-attribute. + * - .. _stonith_enabled: + + .. index:: + pair: cluster option; stonith-enabled + + stonith-enabled + - :ref:`boolean ` + - true + - Whether the cluster is allowed to fence nodes (for example, failed nodes + and nodes with resources that can't be stopped). + + If true, at least one fence device must be configured before resources + are allowed to run. + + If false, unresponsive nodes are immediately assumed to be running no + resources, and resource recovery on online nodes starts without any + further protection (which can mean *data loss* if the unresponsive node + still accesses shared storage, for example). See also the + :ref:`requires ` resource meta-attribute. + * - .. _stonith_action: + + .. index:: + pair: cluster option; stonith-action + + stonith-action + - :ref:`enumeration ` + - reboot + - Action the cluster should send to the fence agent when a node must be + fenced. Allowed values are ``reboot``, ``off``, and (for legacy agents + only) ``poweroff``. + * - .. _stonith_timeout: + + .. index:: + pair: cluster option; stonith-timeout + + stonith-timeout + - :ref:`duration ` + - 60s + - How long to wait for ``on``, ``off``, and ``reboot`` fence actions to + complete by default. + * - .. _stonith_max_attempts: + + .. index:: + pair: cluster option; stonith-max-attempts + + stonith-max-attempts + - :ref:`score ` + - 10 + - How many times fencing can fail for a target before the cluster will no + longer immediately re-attempt it. Any value below 1 will be ignored, and + the default will be used instead. + * - .. _stonith_watchdog_timeout: + + .. index:: + pair: cluster option; stonith-watchdog-timeout + + stonith-watchdog-timeout + - :ref:`timeout ` + - 0 + - If nonzero, and the cluster detects ``have-watchdog`` as ``true``, then + watchdog-based self-fencing will be performed via SBD when fencing is + required, without requiring a fencing resource explicitly configured. + + If this is set to a positive value, unseen nodes are assumed to + self-fence within this much time. + + **Warning:** It must be ensured that this value is larger than the + ``SBD_WATCHDOG_TIMEOUT`` environment variable on all nodes. Pacemaker + verifies the settings individually on all nodes and prevents startup or + shuts down if configured wrongly on the fly. It is strongly recommended + that ``SBD_WATCHDOG_TIMEOUT`` be set to the same value on all nodes. + + If this is set to a negative value, and ``SBD_WATCHDOG_TIMEOUT`` is set, + twice that value will be used. + + **Warning:** In this case, it is essential (and currently not verified + by pacemaker) that ``SBD_WATCHDOG_TIMEOUT`` is set to the same value on + all nodes. + * - .. _concurrent-fencing: + + .. index:: + pair: cluster option; concurrent-fencing + + concurrent-fencing + - :ref:`boolean ` + - false + - Whether the cluster is allowed to initiate multiple fence actions + concurrently. Fence actions initiated externally, such as via the + ``stonith_admin`` tool or an application such as DLM, or by the fencer + itself such as recurring device monitors and ``status`` and ``list`` + commands, are not limited by this option. + * - .. _fence_reaction: + + .. index:: + pair: cluster option; fence-reaction + + fence-reaction + - :ref:`enumeration ` + - stop + - How should a cluster node react if notified of its own fencing? A + cluster node may receive notification of its own fencing if fencing is + misconfigured, or if fabric fencing is in use that doesn't cut cluster + communication. Allowed values are ``stop`` to attempt to immediately + stop Pacemaker and stay stopped, or ``panic`` to attempt to immediately + reboot the local node, falling back to stop on failure. The default is + likely to be changed to ``panic`` in a future release. *(since 2.0.3)* + * - .. _priority_fencing_delay: + + .. index:: + pair: cluster option; priority-fencing-delay + + priority-fencing-delay + - :ref:`duration ` + - 0 + - Apply this delay to any fencing targeting the lost nodes with the + highest total resource priority in case we don't have the majority of + the nodes in our cluster partition, so that the more significant nodes + potentially win any fencing match (especially meaningful in a + split-brain of a 2-node cluster). A promoted resource instance takes the + resource's priority plus 1 if the resource's priority is not 0. Any + static or random delays introduced by ``pcmk_delay_base`` and + ``pcmk_delay_max`` configured for the corresponding fencing resources + will be added to this delay. This delay should be significantly greater + than (safely twice) the maximum delay from those parameters. *(since + 2.0.4)* + * - .. _node_pending_timeout: + + .. index:: + pair: cluster option; node-pending-timeout + + node-pending-timeout + - :ref:`duration ` + - 0 + - Fence nodes that do not join the controller process group within this + much time after joining the cluster, to allow the cluster to continue + managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + *(since 2.1.7)* + * - .. _cluster_delay: + + .. index:: + pair: cluster option; cluster-delay + + cluster-delay + - :ref:`duration ` + - 60s + - If the DC requires an action to be executed on another node, it will + consider the action failed if it does not get a response from the other + node within this time (beyond the action's own timeout). The ideal value + will depend on the speed and load of your network and cluster nodes. + * - .. _dc_deadtime: + + .. index:: + pair: cluster option; dc-deadtime + + dc-deadtime + - :ref:`duration ` + - 20s + - How long to wait for a response from other nodes when electing a DC. The + ideal value will depend on the speed and load of your network and + cluster nodes. + * - .. _cluster_ipc_limit: + + .. index:: + pair: cluster option; cluster-ipc-limit + + cluster-ipc-limit + - :ref:`nonnegative integer ` + - 500 + - The maximum IPC message backlog before one cluster daemon will + disconnect another. This is of use in large clusters, for which a good + value is the number of resources in the cluster multiplied by the number + of nodes. The default of 500 is also the minimum. Raise this if you see + "Evicting client" log messages for cluster daemon process IDs. + * - .. _pe_error_series_max: + + .. index:: + pair: cluster option; pe-error-series-max + + pe-error-series-max + - :ref:`integer ` + - -1 + - The number of scheduler inputs resulting in errors to save. These inputs + can be helpful during troubleshooting and when reporting issues. A + negative value means save all inputs, and 0 means save none. + * - .. _pe_warn_series_max: + + .. index:: + pair: cluster option; pe-warn-series-max + + pe-warn-series-max + - :ref:`integer ` + - 5000 + - The number of scheduler inputs resulting in warnings to save. These + inputs can be helpful during troubleshooting and when reporting issues. + A negative value means save all inputs, and 0 means save none. + * - .. _pe_input_series_max: + + .. index:: + pair: cluster option; pe-input-series-max + + pe-input-series-max + - :ref:`integer ` + - 4000 + - The number of "normal" scheduler inputs to save. These inputs can be + helpful during troubleshooting and when reporting issues. A negative + value means save all inputs, and 0 means save none. + * - .. _enable_acl: + + .. index:: + pair: cluster option; enable-acl + + enable-acl + - :ref:`boolean ` + - false + - Whether :ref:`access control lists ` should be used to authorize + CIB modifications + * - .. _placement_strategy: + + .. index:: + pair: cluster option; placement-strategy + + placement-strategy + - :ref:`enumeration ` + - default + - How the cluster should assign resources to nodes (see + :ref:`utilization`). Allowed values are ``default``, ``utilization``, + ``balanced``, and ``minimal``. + * - .. _node_health_strategy: + + .. index:: + pair: cluster option; node-health-strategy + + node-health-strategy + - :ref:`enumeration ` + - none + - How the cluster should react to :ref:`node health ` + attributes. Allowed values are ``none``, ``migrate-on-red``, + ``only-green``, ``progressive``, and ``custom``. + * - .. _node_health_base: + + .. index:: + pair: cluster option; node-health-base + + node-health-base + - :ref:`score ` + - 0 + - The base health score assigned to a node. Only used when + ``node-health-strategy`` is ``progressive``. + * - .. _node_health_green: + + .. index:: + pair: cluster option; node-health-green + + node-health-green + - :ref:`score ` + - 0 + - The score to use for a node health attribute whose value is ``green``. + Only used when ``node-health-strategy`` is ``progressive`` or + ``custom``. + * - .. _node_health_yellow: + + .. index:: + pair: cluster option; node-health-yellow + + node-health-yellow + - :ref:`score ` + - 0 + - The score to use for a node health attribute whose value is ``yellow``. + Only used when ``node-health-strategy`` is ``progressive`` or + ``custom``. + * - .. _node_health_red: + + .. index:: + pair: cluster option; node-health-red + + node-health-red + - :ref:`score ` + - 0 + - The score to use for a node health attribute whose value is ``red``. + Only used when ``node-health-strategy`` is ``progressive`` or + ``custom``. + * - .. _cluster_recheck_interval: + + .. index:: + pair: cluster option; cluster-recheck-interval + + cluster-recheck-interval + - :ref:`duration ` + - 15min + - Pacemaker is primarily event-driven, and looks ahead to know when to + recheck the cluster for failure timeouts and most time-based rules + *(since 2.0.3)*. However, it will also recheck the cluster after this + amount of inactivity. This has two goals: rules with ``date_spec`` are + only guaranteed to be checked this often, and it also serves as a + fail-safe for some kinds of scheduler bugs. A value of 0 disables this + polling. + * - .. _shutdown_lock: + + .. index:: + pair: cluster option; shutdown-lock + + shutdown-lock + - :ref:`boolean ` + - false + - The default of false allows active resources to be recovered elsewhere + when their node is cleanly shut down, which is what the vast majority of + users will want. However, some users prefer to make resources highly + available only for failures, with no recovery for clean shutdowns. If + this option is true, resources active on a node when it is cleanly shut + down are kept "locked" to that node (not allowed to run elsewhere) until + they start again on that node after it rejoins (or for at most + ``shutdown-lock-limit``, if set). Stonith resources and Pacemaker Remote + connections are never locked. Clone and bundle instances and the + promoted role of promotable clones are currently never locked, though + support could be added in a future release. Locks may be manually + cleared using the ``--refresh`` option of ``crm_resource`` (both the + resource and node must be specified; this works with remote nodes if + their connection resource's ``target-role`` is set to ``Stopped``, but + not if Pacemaker Remote is stopped on the remote node without disabling + the connection resource). *(since 2.0.4)* + * - .. _shutdown_lock_limit: + + .. index:: + pair: cluster option; shutdown-lock-limit + + shutdown-lock-limit + - :ref:`duration ` + - 0 + - If ``shutdown-lock`` is true, and this is set to a nonzero time + duration, locked resources will be allowed to start after this much time + has passed since the node shutdown was initiated, even if the node has + not rejoined. (This works with remote nodes only if their connection + resource's ``target-role`` is set to ``Stopped``.) *(since 2.0.4)* + * - .. _remove_after_stop: + + .. index:: + pair: cluster option; remove-after-stop + + remove-after-stop + - :ref:`boolean ` + - false + - *Deprecated* Whether the cluster should remove resources from + Pacemaker's executor after they are stopped. Values other than the + default are, at best, poorly tested and potentially dangerous. This + option is deprecated and will be removed in a future release. + * - .. _startup_fencing: + + .. index:: + pair: cluster option; startup-fencing + + startup-fencing + - :ref:`boolean ` + - true + - *Advanced Use Only:* Whether the cluster should fence unseen nodes at + start-up. Setting this to false is unsafe, because the unseen nodes + could be active and running resources but unreachable. ``dc-deadtime`` + acts as a grace period before this fencing, since a DC must be elected + to schedule fencing. + * - .. _election_timeout: + + .. index:: + pair: cluster option; election-timeout + + election-timeout + - :ref:`duration ` + - 2min + - *Advanced Use Only:* If a winner is not declared within this much time + of starting an election, the node that initiated the election will + declare itself the winner. + * - .. _shutdown_escalation: + + .. index:: + pair: cluster option; shutdown-escalation + + shutdown-escalation + - :ref:`duration ` + - 20min + - *Advanced Use Only:* The controller will exit immediately if a shutdown + does not complete within this much time. + * - .. _join_integration_timeout: + + .. index:: + pair: cluster option; join-integration-timeout + + join-integration-timeout + - :ref:`duration ` + - 3min + - *Advanced Use Only:* If you need to adjust this value, it probably + indicates the presence of a bug. + * - .. _join_finalization_timeout: + + .. index:: + pair: cluster option; join-finalization-timeout + + join-finalization-timeout + - :ref:`duration ` + - 30min + - *Advanced Use Only:* If you need to adjust this value, it probably + indicates the presence of a bug. + * - .. _transition_delay: + + .. index:: + pair: cluster option; transition-delay + + transition-delay + - :ref:`duration ` + - 0s + - *Advanced Use Only:* Delay cluster recovery for the configured interval + to allow for additional or related events to occur. This can be useful + if your configuration is sensitive to the order in which ping updates + arrive. Enabling this option will slow down cluster recovery under all + conditions. diff --git a/doc/sphinx/Pacemaker_Explained/collective.rst b/doc/sphinx/Pacemaker_Explained/collective.rst new file mode 100644 index 0000000..a4fa9dc --- /dev/null +++ b/doc/sphinx/Pacemaker_Explained/collective.rst @@ -0,0 +1,1637 @@ +.. index: + single: collective resource + single: resource; collective + +Collective Resources +-------------------- + +Pacemaker supports several types of *collective* resources, which consist of +multiple, related resource instances. + + +.. index: + single: group resource + single: resource; group + +.. _group-resources: + +Groups - A Syntactic Shortcut +############################# + +One of the most common elements of a cluster is a set of resources +that need to be located together, start sequentially, and stop in the +reverse order. To simplify this configuration, we support the concept +of groups. + +.. topic:: A group of two primitive resources + + .. code-block:: xml + + + + + + + + + + +Although the example above contains only two resources, there is no +limit to the number of resources a group can contain. The example is +also sufficient to explain the fundamental properties of a group: + +* Resources are started in the order they appear in (**Public-IP** first, + then **Email**) +* Resources are stopped in the reverse order to which they appear in + (**Email** first, then **Public-IP**) + +If a resource in the group can't run anywhere, then nothing after that +is allowed to run, too. + +* If **Public-IP** can't run anywhere, neither can **Email**; +* but if **Email** can't run anywhere, this does not affect **Public-IP** + in any way + +The group above is logically equivalent to writing: + +.. topic:: How the cluster sees a group resource + + .. code-block:: xml + + + + + + + + + + + + + + + + +Obviously as the group grows bigger, the reduced configuration effort +can become significant. + +Another (typical) example of a group is a DRBD volume, the filesystem +mount, an IP address, and an application that uses them. + +.. index:: + pair: XML element; group + +Group Properties +________________ + +.. table:: **Properties of a Group Resource** + :widths: 1 4 + + +-------------+------------------------------------------------------------------+ + | Field | Description | + +=============+==================================================================+ + | id | .. index:: | + | | single: group; property, id | + | | single: property; id (group) | + | | single: id; group property | + | | | + | | A unique name for the group | + +-------------+------------------------------------------------------------------+ + | description | .. index:: | + | | single: group; attribute, description | + | | single: attribute; description (group) | + | | single: description; group attribute | + | | | + | | An optional description of the group, for the user's own | + | | purposes. | + | | E.g. ``resources needed for website`` | + +-------------+------------------------------------------------------------------+ + +Group Options +_____________ + +Groups inherit the ``priority``, ``target-role``, and ``is-managed`` properties +from primitive resources. See :ref:`resource_options` for information about +those properties. + +Group Instance Attributes +_________________________ + +Groups have no instance attributes. However, any that are set for the group +object will be inherited by the group's children. + +Group Contents +______________ + +Groups may only contain a collection of cluster resources (see +:ref:`primitive-resource`). To refer to a child of a group resource, just use +the child's ``id`` instead of the group's. + +Group Constraints +_________________ + +Although it is possible to reference a group's children in +constraints, it is usually preferable to reference the group itself. + +.. topic:: Some constraints involving groups + + .. code-block:: xml + + + + + + + +.. index:: + pair: resource-stickiness; group + +Group Stickiness +________________ + +Stickiness, the measure of how much a resource wants to stay where it +is, is additive in groups. Every active resource of the group will +contribute its stickiness value to the group's total. So if the +default ``resource-stickiness`` is 100, and a group has seven members, +five of which are active, then the group as a whole will prefer its +current location with a score of 500. + +.. index:: + single: clone + single: resource; clone + +.. _s-resource-clone: + +Clones - Resources That Can Have Multiple Active Instances +########################################################## + +*Clone* resources are resources that can have more than one copy active at the +same time. This allows you, for example, to run a copy of a daemon on every +node. You can clone any primitive or group resource [#]_. + +Anonymous versus Unique Clones +______________________________ + +A clone resource is configured to be either *anonymous* or *globally unique*. + +Anonymous clones are the simplest. These behave completely identically +everywhere they are running. Because of this, there can be only one instance of +an anonymous clone active per node. + +The instances of globally unique clones are distinct entities. All instances +are launched identically, but one instance of the clone is not identical to any +other instance, whether running on the same node or a different node. As an +example, a cloned IP address can use special kernel functionality such that +each instance handles a subset of requests for the same IP address. + +.. index:: + single: promotable clone + single: resource; promotable + +.. _s-resource-promotable: + +Promotable clones +_________________ + +If a clone is *promotable*, its instances can perform a special role that +Pacemaker will manage via the ``promote`` and ``demote`` actions of the resource +agent. + +Services that support such a special role have various terms for the special +role and the default role: primary and secondary, master and replica, +controller and worker, etc. Pacemaker uses the terms *promoted* and +*unpromoted* to be agnostic to what the service calls them or what they do. + +All that Pacemaker cares about is that an instance comes up in the unpromoted role +when started, and the resource agent supports the ``promote`` and ``demote`` actions +to manage entering and exiting the promoted role. + +.. index:: + pair: XML element; clone + +Clone Properties +________________ + +.. table:: **Properties of a Clone Resource** + :widths: 1 4 + + +-------------+------------------------------------------------------------------+ + | Field | Description | + +=============+==================================================================+ + | id | .. index:: | + | | single: clone; property, id | + | | single: property; id (clone) | + | | single: id; clone property | + | | | + | | A unique name for the clone | + +-------------+------------------------------------------------------------------+ + | description | .. index:: | + | | single: clone; attribute, description | + | | single: attribute; description (clone) | + | | single: description; clone attribute | + | | | + | | An optional description of the clone, for the user's own | + | | purposes. | + | | E.g. ``IP address for website`` | + +-------------+------------------------------------------------------------------+ + +.. index:: + pair: options; clone + +Clone Options +_____________ + +:ref:`Options ` inherited from primitive resources: +``priority, target-role, is-managed`` + +.. table:: **Clone-specific configuration options** + :class: longtable + :widths: 1 1 3 + + +-------------------+-----------------+-------------------------------------------------------+ + | Field | Default | Description | + +===================+=================+=======================================================+ + | globally-unique | false | .. index:: | + | | | single: clone; option, globally-unique | + | | | single: option; globally-unique (clone) | + | | | single: globally-unique; clone option | + | | | | + | | | If **true**, each clone instance performs a | + | | | distinct function | + +-------------------+-----------------+-------------------------------------------------------+ + | clone-max | 0 | .. index:: | + | | | single: clone; option, clone-max | + | | | single: option; clone-max (clone) | + | | | single: clone-max; clone option | + | | | | + | | | The maximum number of clone instances that can | + | | | be started across the entire cluster. If 0, the | + | | | number of nodes in the cluster will be used. | + +-------------------+-----------------+-------------------------------------------------------+ + | clone-node-max | 1 | .. index:: | + | | | single: clone; option, clone-node-max | + | | | single: option; clone-node-max (clone) | + | | | single: clone-node-max; clone option | + | | | | + | | | If ``globally-unique`` is **true**, the maximum | + | | | number of clone instances that can be started | + | | | on a single node | + +-------------------+-----------------+-------------------------------------------------------+ + | clone-min | 0 | .. index:: | + | | | single: clone; option, clone-min | + | | | single: option; clone-min (clone) | + | | | single: clone-min; clone option | + | | | | + | | | Require at least this number of clone instances | + | | | to be runnable before allowing resources | + | | | depending on the clone to be runnable. A value | + | | | of 0 means require all clone instances to be | + | | | runnable. | + +-------------------+-----------------+-------------------------------------------------------+ + | notify | false | .. index:: | + | | | single: clone; option, notify | + | | | single: option; notify (clone) | + | | | single: notify; clone option | + | | | | + | | | Call the resource agent's **notify** action for | + | | | all active instances, before and after starting | + | | | or stopping any clone instance. The resource | + | | | agent must support this action. | + | | | Allowed values: **false**, **true** | + +-------------------+-----------------+-------------------------------------------------------+ + | ordered | false | .. index:: | + | | | single: clone; option, ordered | + | | | single: option; ordered (clone) | + | | | single: ordered; clone option | + | | | | + | | | If **true**, clone instances must be started | + | | | sequentially instead of in parallel. | + | | | Allowed values: **false**, **true** | + +-------------------+-----------------+-------------------------------------------------------+ + | interleave | false | .. index:: | + | | | single: clone; option, interleave | + | | | single: option; interleave (clone) | + | | | single: interleave; clone option | + | | | | + | | | When this clone is ordered relative to another | + | | | clone, if this option is **false** (the default), | + | | | the ordering is relative to *all* instances of | + | | | the other clone, whereas if this option is | + | | | **true**, the ordering is relative only to | + | | | instances on the same node. | + | | | Allowed values: **false**, **true** | + +-------------------+-----------------+-------------------------------------------------------+ + | promotable | false | .. index:: | + | | | single: clone; option, promotable | + | | | single: option; promotable (clone) | + | | | single: promotable; clone option | + | | | | + | | | If **true**, clone instances can perform a | + | | | special role that Pacemaker will manage via the | + | | | resource agent's **promote** and **demote** | + | | | actions. The resource agent must support these | + | | | actions. | + | | | Allowed values: **false**, **true** | + +-------------------+-----------------+-------------------------------------------------------+ + | promoted-max | 1 | .. index:: | + | | | single: clone; option, promoted-max | + | | | single: option; promoted-max (clone) | + | | | single: promoted-max; clone option | + | | | | + | | | If ``promotable`` is **true**, the number of | + | | | instances that can be promoted at one time | + | | | across the entire cluster | + +-------------------+-----------------+-------------------------------------------------------+ + | promoted-node-max | 1 | .. index:: | + | | | single: clone; option, promoted-node-max | + | | | single: option; promoted-node-max (clone) | + | | | single: promoted-node-max; clone option | + | | | | + | | | If ``promotable`` is **true** and ``globally-unique`` | + | | | is **false**, the number of clone instances can be | + | | | promoted at one time on a single node | + +-------------------+-----------------+-------------------------------------------------------+ + +.. note:: **Deprecated Terminology** + + In older documentation and online examples, you may see promotable clones + referred to as *multi-state*, *stateful*, or *master/slave*; these mean the + same thing as *promotable*. Certain syntax is supported for backward + compatibility, but is deprecated and will be removed in a future version: + + * Using a ``master`` tag, instead of a ``clone`` tag with the ``promotable`` + meta-attribute set to ``true`` + * Using the ``master-max`` meta-attribute instead of ``promoted-max`` + * Using the ``master-node-max`` meta-attribute instead of + ``promoted-node-max`` + * Using ``Master`` as a role name instead of ``Promoted`` + * Using ``Slave`` as a role name instead of ``Unpromoted`` + + +Clone Contents +______________ + +Clones must contain exactly one primitive or group resource. + +.. topic:: A clone that runs a web server on all nodes + + .. code-block:: xml + + + + + + + + + +.. warning:: + + You should never reference the name of a clone's child (the primitive or group + resource being cloned). If you think you need to do this, you probably need to + re-evaluate your design. + +Clone Instance Attribute +________________________ + +Clones have no instance attributes; however, any that are set here will be +inherited by the clone's child. + +.. index:: + single: clone; constraint + +Clone Constraints +_________________ + +In most cases, a clone will have a single instance on each active cluster +node. If this is not the case, you can indicate which nodes the +cluster should preferentially assign copies to with resource location +constraints. These constraints are written no differently from those +for primitive resources except that the clone's **id** is used. + +.. topic:: Some constraints involving clones + + .. code-block:: xml + + + + + + + +Ordering constraints behave slightly differently for clones. In the +example above, ``apache-stats`` will wait until all copies of ``apache-clone`` +that need to be started have done so before being started itself. +Only if *no* copies can be started will ``apache-stats`` be prevented +from being active. Additionally, the clone will wait for +``apache-stats`` to be stopped before stopping itself. + +Colocation of a primitive or group resource with a clone means that +the resource can run on any node with an active instance of the clone. +The cluster will choose an instance based on where the clone is running and +the resource's own location preferences. + +Colocation between clones is also possible. If one clone **A** is colocated +with another clone **B**, the set of allowed locations for **A** is limited to +nodes on which **B** is (or will be) active. Placement is then performed +normally. + +.. index:: + single: promotable clone; constraint + +.. _promotable-clone-constraints: + +Promotable Clone Constraints +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For promotable clone resources, the ``first-action`` and/or ``then-action`` fields +for ordering constraints may be set to ``promote`` or ``demote`` to constrain the +promoted role, and colocation constraints may contain ``rsc-role`` and/or +``with-rsc-role`` fields. + +.. topic:: Constraints involving promotable clone resources + + .. code-block:: xml + + + + + + + + + +In the example above, **myApp** will wait until one of the database +copies has been started and promoted before being started +itself on the same node. Only if no copies can be promoted will **myApp** be +prevented from being active. Additionally, the cluster will wait for +**myApp** to be stopped before demoting the database. + +Colocation of a primitive or group resource with a promotable clone +resource means that it can run on any node with an active instance of +the promotable clone resource that has the specified role (``Promoted`` or +``Unpromoted``). In the example above, the cluster will choose a location +based on where database is running in the promoted role, and if there are +multiple promoted instances it will also factor in **myApp**'s own location +preferences when deciding which location to choose. + +Colocation with regular clones and other promotable clone resources is also +possible. In such cases, the set of allowed locations for the **rsc** +clone is (after role filtering) limited to nodes on which the +``with-rsc`` promotable clone resource is (or will be) in the specified role. +Placement is then performed as normal. + +Using Promotable Clone Resources in Colocation Sets +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When a promotable clone is used in a :ref:`resource set ` +inside a colocation constraint, the resource set may take a ``role`` attribute. + +In the following example, an instance of **B** may be promoted only on a node +where **A** is in the promoted role. Additionally, resources **C** and **D** +must be located on a node where both **A** and **B** are promoted. + +.. topic:: Colocate C and D with A's and B's promoted instances + + .. code-block:: xml + + + + + + + + + + + + + + +Using Promotable Clone Resources in Ordered Sets +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When a promotable clone is used in a :ref:`resource set ` +inside an ordering constraint, the resource set may take an ``action`` +attribute. + +.. topic:: Start C and D after first promoting A and B + + .. code-block:: xml + + + + + + + + + + + + + + +In the above example, **B** cannot be promoted until **A** has been promoted. +Additionally, resources **C** and **D** must wait until **A** and **B** have +been promoted before they can start. + +.. index:: + pair: resource-stickiness; clone + +.. _s-clone-stickiness: + +Clone Stickiness +________________ + +To achieve stable assignments, clones are slightly sticky by default. If no +value for ``resource-stickiness`` is provided, the clone will use a value of 1. +Being a small value, it causes minimal disturbance to the score calculations of +other resources but is enough to prevent Pacemaker from needlessly moving +instances around the cluster. + +.. note:: + + For globally unique clones, this may result in multiple instances of the + clone staying on a single node, even after another eligible node becomes + active (for example, after being put into standby mode then made active again). + If you do not want this behavior, specify a ``resource-stickiness`` of 0 + for the clone temporarily and let the cluster adjust, then set it back + to 1 if you want the default behavior to apply again. + +.. important:: + + If ``resource-stickiness`` is set in the ``rsc_defaults`` section, it will + apply to clone instances as well. This means an explicit ``resource-stickiness`` + of 0 in ``rsc_defaults`` works differently from the implicit default used when + ``resource-stickiness`` is not specified. + +Clone Resource Agent Requirements +_________________________________ + +Any resource can be used as an anonymous clone, as it requires no +additional support from the resource agent. Whether it makes sense to +do so depends on your resource and its resource agent. + +Resource Agent Requirements for Globally Unique Clones +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Globally unique clones require additional support in the resource agent. In +particular, it must only respond with ``${OCF_SUCCESS}`` if the node has that +exact instance active. All other probes for instances of the clone should +result in ``${OCF_NOT_RUNNING}`` (or one of the other OCF error codes if +they are failed). + +Individual instances of a clone are identified by appending a colon and a +numerical offset, e.g. **apache:2**. + +Resource agents can find out how many copies there are by examining +the ``OCF_RESKEY_CRM_meta_clone_max`` environment variable and which +instance it is by examining ``OCF_RESKEY_CRM_meta_clone``. + +The resource agent must not make any assumptions (based on +``OCF_RESKEY_CRM_meta_clone``) about which numerical instances are active. In +particular, the list of active copies will not always be an unbroken +sequence, nor always start at 0. + +Resource Agent Requirements for Promotable Clones +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Promotable clone resources require two extra actions, ``demote`` and ``promote``, +which are responsible for changing the state of the resource. Like **start** and +**stop**, they should return ``${OCF_SUCCESS}`` if they completed successfully or +a relevant error code if they did not. + +The states can mean whatever you wish, but when the resource is +started, it must come up in the unpromoted role. From there, the +cluster will decide which instances to promote. + +In addition to the clone requirements for monitor actions, agents must +also *accurately* report which state they are in. The cluster relies +on the agent to report its status (including role) accurately and does +not indicate to the agent what role it currently believes it to be in. + +.. table:: **Role implications of OCF return codes** + :widths: 1 3 + + +----------------------+--------------------------------------------------+ + | Monitor Return Code | Description | + +======================+==================================================+ + | OCF_NOT_RUNNING | .. index:: | + | | single: OCF_NOT_RUNNING | + | | single: OCF return code; OCF_NOT_RUNNING | + | | | + | | Stopped | + +----------------------+--------------------------------------------------+ + | OCF_SUCCESS | .. index:: | + | | single: OCF_SUCCESS | + | | single: OCF return code; OCF_SUCCESS | + | | | + | | Running (Unpromoted) | + +----------------------+--------------------------------------------------+ + | OCF_RUNNING_PROMOTED | .. index:: | + | | single: OCF_RUNNING_PROMOTED | + | | single: OCF return code; OCF_RUNNING_PROMOTED | + | | | + | | Running (Promoted) | + +----------------------+--------------------------------------------------+ + | OCF_FAILED_PROMOTED | .. index:: | + | | single: OCF_FAILED_PROMOTED | + | | single: OCF return code; OCF_FAILED_PROMOTED | + | | | + | | Failed (Promoted) | + +----------------------+--------------------------------------------------+ + | Other | .. index:: | + | | single: return code | + | | | + | | Failed (Unpromoted) | + +----------------------+--------------------------------------------------+ + +Clone Notifications +~~~~~~~~~~~~~~~~~~~ + +If the clone has the ``notify`` meta-attribute set to **true**, and the resource +agent supports the ``notify`` action, Pacemaker will call the action when +appropriate, passing a number of extra variables which, when combined with +additional context, can be used to calculate the current state of the cluster +and what is about to happen to it. + +.. index:: + single: clone; environment variables + single: notify; environment variables + +.. table:: **Environment variables supplied with Clone notify actions** + :widths: 1 1 + + +----------------------------------------------+-------------------------------------------------------------------------------+ + | Variable | Description | + +==============================================+===============================================================================+ + | OCF_RESKEY_CRM_meta_notify_type | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_type | + | | single: OCF_RESKEY_CRM_meta_notify_type | + | | | + | | Allowed values: **pre**, **post** | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_operation | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_operation | + | | single: OCF_RESKEY_CRM_meta_notify_operation | + | | | + | | Allowed values: **start**, **stop** | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_start_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_start_resource | + | | single: OCF_RESKEY_CRM_meta_notify_start_resource | + | | | + | | Resources to be started | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_stop_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_resource | + | | single: OCF_RESKEY_CRM_meta_notify_stop_resource | + | | | + | | Resources to be stopped | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_active_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_active_resource | + | | single: OCF_RESKEY_CRM_meta_notify_active_resource | + | | | + | | Resources that are running | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_inactive_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_inactive_resource | + | | single: OCF_RESKEY_CRM_meta_notify_inactive_resource | + | | | + | | Resources that are not running | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_start_uname | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_start_uname | + | | single: OCF_RESKEY_CRM_meta_notify_start_uname | + | | | + | | Nodes on which resources will be started | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_stop_uname | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_uname | + | | single: OCF_RESKEY_CRM_meta_notify_stop_uname | + | | | + | | Nodes on which resources will be stopped | + +----------------------------------------------+-------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_active_uname | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_active_uname | + | | single: OCF_RESKEY_CRM_meta_notify_active_uname | + | | | + | | Nodes on which resources are running | + +----------------------------------------------+-------------------------------------------------------------------------------+ + +The variables come in pairs, such as +``OCF_RESKEY_CRM_meta_notify_start_resource`` and +``OCF_RESKEY_CRM_meta_notify_start_uname``, and should be treated as an +array of whitespace-separated elements. + +``OCF_RESKEY_CRM_meta_notify_inactive_resource`` is an exception, as the +matching **uname** variable does not exist since inactive resources +are not running on any node. + +Thus, in order to indicate that **clone:0** will be started on **sles-1**, +**clone:2** will be started on **sles-3**, and **clone:3** will be started +on **sles-2**, the cluster would set: + +.. topic:: Notification variables + + .. code-block:: none + + OCF_RESKEY_CRM_meta_notify_start_resource="clone:0 clone:2 clone:3" + OCF_RESKEY_CRM_meta_notify_start_uname="sles-1 sles-3 sles-2" + +.. note:: + + Pacemaker will log but otherwise ignore failures of notify actions. + +Interpretation of Notification Variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Pre-notification (stop):** + +* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` +* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (stop) / Pre-notification (start):** + +* Active resources + + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +* Inactive resources + + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (start):** + +* Active resources: + + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + +* Inactive resources: + + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +Extra Notifications for Promotable Clones +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. index:: + single: clone; environment variables + single: promotable; environment variables + +.. table:: **Extra environment variables supplied for promotable clones** + :widths: 1 1 + + +------------------------------------------------+---------------------------------------------------------------------------------+ + | Variable | Description | + +================================================+=================================================================================+ + | OCF_RESKEY_CRM_meta_notify_promoted_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_resource | + | | single: OCF_RESKEY_CRM_meta_notify_promoted_resource | + | | | + | | Resources that are running in the promoted role | + +------------------------------------------------+---------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_unpromoted_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_resource | + | | single: OCF_RESKEY_CRM_meta_notify_unpromoted_resource | + | | | + | | Resources that are running in the unpromoted role | + +------------------------------------------------+---------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_promote_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_resource | + | | single: OCF_RESKEY_CRM_meta_notify_promote_resource | + | | | + | | Resources to be promoted | + +------------------------------------------------+---------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_demote_resource | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_resource | + | | single: OCF_RESKEY_CRM_meta_notify_demote_resource | + | | | + | | Resources to be demoted | + +------------------------------------------------+---------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_promote_uname | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_uname | + | | single: OCF_RESKEY_CRM_meta_notify_promote_uname | + | | | + | | Nodes on which resources will be promoted | + +------------------------------------------------+---------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_demote_uname | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_uname | + | | single: OCF_RESKEY_CRM_meta_notify_demote_uname | + | | | + | | Nodes on which resources will be demoted | + +------------------------------------------------+---------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_promoted_uname | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_uname | + | | single: OCF_RESKEY_CRM_meta_notify_promoted_uname | + | | | + | | Nodes on which resources are running in the promoted role | + +------------------------------------------------+---------------------------------------------------------------------------------+ + | OCF_RESKEY_CRM_meta_notify_unpromoted_uname | .. index:: | + | | single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_uname | + | | single: OCF_RESKEY_CRM_meta_notify_unpromoted_uname | + | | | + | | Nodes on which resources are running in the unpromoted role | + +------------------------------------------------+---------------------------------------------------------------------------------+ + +Interpretation of Promotable Notification Variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Pre-notification (demote):** + +* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` +* Promoted resources: ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` +* Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` +* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (demote) / Pre-notification (stop):** + +* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` +* Promoted resources: + + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` + +* Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` +* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` + +**Post-notification (stop) / Pre-notification (start)** + +* Active resources: + + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +* Promoted resources: + + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` + +* Unpromoted resources: + + * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +* Inactive resources: + + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (start) / Pre-notification (promote)** + +* Active resources: + + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + +* Promoted resources: + + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` + +* Unpromoted resources: + + * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + +* Inactive resources: + + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (promote)** + +* Active resources: + + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + +* Promoted resources: + + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` + +* Unpromoted resources: + + * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` + +* Inactive resources: + + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +Monitoring Promotable Clone Resources +_____________________________________ + +The usual monitor actions are insufficient to monitor a promotable clone +resource, because Pacemaker needs to verify not only that the resource is +active, but also that its actual role matches its intended one. + +Define two monitoring actions: the usual one will cover the unpromoted role, +and an additional one with ``role="Promoted"`` will cover the promoted role. + +.. topic:: Monitoring both states of a promotable clone resource + + .. code-block:: xml + + + + + + + + + + + + + +.. important:: + + It is crucial that *every* monitor operation has a different interval! + Pacemaker currently differentiates between operations + only by resource and interval; so if (for example) a promotable clone resource + had the same monitor interval for both roles, Pacemaker would ignore the + role when checking the status -- which would cause unexpected return + codes, and therefore unnecessary complications. + +.. _s-promotion-scores: + +Determining Which Instance is Promoted +______________________________________ + +Pacemaker can choose a promotable clone instance to be promoted in one of two +ways: + +* Promotion scores: These are node attributes set via the ``crm_attribute`` + command using the ``--promotion`` option, which generally would be called by + the resource agent's start action if it supports promotable clones. This tool + automatically detects both the resource and host, and should be used to set a + preference for being promoted. Based on this, ``promoted-max``, and + ``promoted-node-max``, the instance(s) with the highest preference will be + promoted. + +* Constraints: Location constraints can indicate which nodes are most preferred + to be promoted. + +.. topic:: Explicitly preferring node1 to be promoted + + .. code-block:: xml + + + +.. index: + single: bundle + single: resource; bundle + pair: container; Docker + pair: container; podman + pair: container; rkt + +.. _s-resource-bundle: + +Bundles - Containerized Resources +################################# + +Pacemaker supports a special syntax for launching a service inside a +`container `_ +with any infrastructure it requires: the *bundle*. + +Pacemaker bundles support `Docker `_, +`podman `_ *(since 2.0.1)*, and +`rkt `_ container technologies. [#]_ + +.. topic:: A bundle for a containerized web server + + .. code-block:: xml + + + + + + + + + + + + + + +Bundle Prerequisites +____________________ + +Before configuring a bundle in Pacemaker, the user must install the appropriate +container launch technology (Docker, podman, or rkt), and supply a fully +configured container image, on every node allowed to run the bundle. + +Pacemaker will create an implicit resource of type **ocf:heartbeat:docker**, +**ocf:heartbeat:podman**, or **ocf:heartbeat:rkt** to manage a bundle's +container. The user must ensure that the appropriate resource agent is +installed on every node allowed to run the bundle. + +.. index:: + pair: XML element; bundle + +Bundle Properties +_________________ + +.. table:: **XML Attributes of a bundle Element** + :widths: 1 4 + + +-------------+------------------------------------------------------------------+ + | Field | Description | + +=============+==================================================================+ + | id | .. index:: | + | | single: bundle; attribute, id | + | | single: attribute; id (bundle) | + | | single: id; bundle attribute | + | | | + | | A unique name for the bundle (required) | + +-------------+------------------------------------------------------------------+ + | description | .. index:: | + | | single: bundle; attribute, description | + | | single: attribute; description (bundle) | + | | single: description; bundle attribute | + | | | + | | An optional description of the group, for the user's own | + | | purposes. | + | | E.g. ``manages the container that runs the service`` | + +-------------+------------------------------------------------------------------+ + + +A bundle must contain exactly one ``docker``, ``podman``, or ``rkt`` element. + +.. index:: + pair: XML element; docker + pair: XML element; podman + pair: XML element; rkt + +Bundle Container Properties +___________________________ + +.. table:: **XML attributes of a docker, podman, or rkt Element** + :class: longtable + :widths: 2 3 4 + + +-------------------+------------------------------------+---------------------------------------------------+ + | Attribute | Default | Description | + +===================+====================================+===================================================+ + | image | | .. index:: | + | | | single: docker; attribute, image | + | | | single: attribute; image (docker) | + | | | single: image; docker attribute | + | | | single: podman; attribute, image | + | | | single: attribute; image (podman) | + | | | single: image; podman attribute | + | | | single: rkt; attribute, image | + | | | single: attribute; image (rkt) | + | | | single: image; rkt attribute | + | | | | + | | | Container image tag (required) | + +-------------------+------------------------------------+---------------------------------------------------+ + | replicas | Value of ``promoted-max`` | .. index:: | + | | if that is positive, else 1 | single: docker; attribute, replicas | + | | | single: attribute; replicas (docker) | + | | | single: replicas; docker attribute | + | | | single: podman; attribute, replicas | + | | | single: attribute; replicas (podman) | + | | | single: replicas; podman attribute | + | | | single: rkt; attribute, replicas | + | | | single: attribute; replicas (rkt) | + | | | single: replicas; rkt attribute | + | | | | + | | | A positive integer specifying the number of | + | | | container instances to launch | + +-------------------+------------------------------------+---------------------------------------------------+ + | replicas-per-host | 1 | .. index:: | + | | | single: docker; attribute, replicas-per-host | + | | | single: attribute; replicas-per-host (docker) | + | | | single: replicas-per-host; docker attribute | + | | | single: podman; attribute, replicas-per-host | + | | | single: attribute; replicas-per-host (podman) | + | | | single: replicas-per-host; podman attribute | + | | | single: rkt; attribute, replicas-per-host | + | | | single: attribute; replicas-per-host (rkt) | + | | | single: replicas-per-host; rkt attribute | + | | | | + | | | A positive integer specifying the number of | + | | | container instances allowed to run on a | + | | | single node | + +-------------------+------------------------------------+---------------------------------------------------+ + | promoted-max | 0 | .. index:: | + | | | single: docker; attribute, promoted-max | + | | | single: attribute; promoted-max (docker) | + | | | single: promoted-max; docker attribute | + | | | single: podman; attribute, promoted-max | + | | | single: attribute; promoted-max (podman) | + | | | single: promoted-max; podman attribute | + | | | single: rkt; attribute, promoted-max | + | | | single: attribute; promoted-max (rkt) | + | | | single: promoted-max; rkt attribute | + | | | | + | | | A non-negative integer that, if positive, | + | | | indicates that the containerized service | + | | | should be treated as a promotable service, | + | | | with this many replicas allowed to run the | + | | | service in the promoted role | + +-------------------+------------------------------------+---------------------------------------------------+ + | network | | .. index:: | + | | | single: docker; attribute, network | + | | | single: attribute; network (docker) | + | | | single: network; docker attribute | + | | | single: podman; attribute, network | + | | | single: attribute; network (podman) | + | | | single: network; podman attribute | + | | | single: rkt; attribute, network | + | | | single: attribute; network (rkt) | + | | | single: network; rkt attribute | + | | | | + | | | If specified, this will be passed to the | + | | | ``docker run``, ``podman run``, or | + | | | ``rkt run`` command as the network setting | + | | | for the container. | + +-------------------+------------------------------------+---------------------------------------------------+ + | run-command | ``/usr/sbin/pacemaker-remoted`` if | .. index:: | + | | bundle contains a **primitive**, | single: docker; attribute, run-command | + | | otherwise none | single: attribute; run-command (docker) | + | | | single: run-command; docker attribute | + | | | single: podman; attribute, run-command | + | | | single: attribute; run-command (podman) | + | | | single: run-command; podman attribute | + | | | single: rkt; attribute, run-command | + | | | single: attribute; run-command (rkt) | + | | | single: run-command; rkt attribute | + | | | | + | | | This command will be run inside the container | + | | | when launching it ("PID 1"). If the bundle | + | | | contains a **primitive**, this command *must* | + | | | start ``pacemaker-remoted`` (but could, for | + | | | example, be a script that does other stuff, too). | + +-------------------+------------------------------------+---------------------------------------------------+ + | options | | .. index:: | + | | | single: docker; attribute, options | + | | | single: attribute; options (docker) | + | | | single: options; docker attribute | + | | | single: podman; attribute, options | + | | | single: attribute; options (podman) | + | | | single: options; podman attribute | + | | | single: rkt; attribute, options | + | | | single: attribute; options (rkt) | + | | | single: options; rkt attribute | + | | | | + | | | Extra command-line options to pass to the | + | | | ``docker run``, ``podman run``, or ``rkt run`` | + | | | command | + +-------------------+------------------------------------+---------------------------------------------------+ + +.. note:: + + Considerations when using cluster configurations or container images from + Pacemaker 1.1: + + * If the container image has a pre-2.0.0 version of Pacemaker, set ``run-command`` + to ``/usr/sbin/pacemaker_remoted`` (note the underbar instead of dash). + + * ``masters`` is accepted as an alias for ``promoted-max``, but is deprecated since + 2.0.0, and support for it will be removed in a future version. + +Bundle Network Properties +_________________________ + +A bundle may optionally contain one ```` element. + +.. index:: + pair: XML element; network + single: bundle; network + +.. table:: **XML attributes of a network Element** + :widths: 2 1 5 + + +----------------+---------+------------------------------------------------------------+ + | Attribute | Default | Description | + +================+=========+============================================================+ + | add-host | TRUE | .. index:: | + | | | single: network; attribute, add-host | + | | | single: attribute; add-host (network) | + | | | single: add-host; network attribute | + | | | | + | | | If TRUE, and ``ip-range-start`` is used, Pacemaker will | + | | | automatically ensure that ``/etc/hosts`` inside the | + | | | containers has entries for each | + | | | :ref:`replica name ` | + | | | and its assigned IP. | + +----------------+---------+------------------------------------------------------------+ + | ip-range-start | | .. index:: | + | | | single: network; attribute, ip-range-start | + | | | single: attribute; ip-range-start (network) | + | | | single: ip-range-start; network attribute | + | | | | + | | | If specified, Pacemaker will create an implicit | + | | | ``ocf:heartbeat:IPaddr2`` resource for each container | + | | | instance, starting with this IP address, using up to | + | | | ``replicas`` sequential addresses. These addresses can be | + | | | used from the host's network to reach the service inside | + | | | the container, though it is not visible within the | + | | | container itself. Only IPv4 addresses are currently | + | | | supported. | + +----------------+---------+------------------------------------------------------------+ + | host-netmask | 32 | .. index:: | + | | | single: network; attribute; host-netmask | + | | | single: attribute; host-netmask (network) | + | | | single: host-netmask; network attribute | + | | | | + | | | If ``ip-range-start`` is specified, the IP addresses | + | | | are created with this CIDR netmask (as a number of bits). | + +----------------+---------+------------------------------------------------------------+ + | host-interface | | .. index:: | + | | | single: network; attribute; host-interface | + | | | single: attribute; host-interface (network) | + | | | single: host-interface; network attribute | + | | | | + | | | If ``ip-range-start`` is specified, the IP addresses are | + | | | created on this host interface (by default, it will be | + | | | determined from the IP address). | + +----------------+---------+------------------------------------------------------------+ + | control-port | 3121 | .. index:: | + | | | single: network; attribute; control-port | + | | | single: attribute; control-port (network) | + | | | single: control-port; network attribute | + | | | | + | | | If the bundle contains a ``primitive``, the cluster will | + | | | use this integer TCP port for communication with | + | | | Pacemaker Remote inside the container. Changing this is | + | | | useful when the container is unable to listen on the | + | | | default port, for example, when the container uses the | + | | | host's network rather than ``ip-range-start`` (in which | + | | | case ``replicas-per-host`` must be 1), or when the bundle | + | | | may run on a Pacemaker Remote node that is already | + | | | listening on the default port. Any ``PCMK_remote_port`` | + | | | environment variable set on the host or in the container | + | | | is ignored for bundle connections. | + +----------------+---------+------------------------------------------------------------+ + +.. _s-resource-bundle-note-replica-names: + +.. note:: + + Replicas are named by the bundle id plus a dash and an integer counter starting + with zero. For example, if a bundle named **httpd-bundle** has **replicas=2**, its + containers will be named **httpd-bundle-0** and **httpd-bundle-1**. + +.. index:: + pair: XML element; port-mapping + +Additionally, a ``network`` element may optionally contain one or more +``port-mapping`` elements. + +.. table:: **Attributes of a port-mapping Element** + :widths: 2 1 5 + + +---------------+-------------------+------------------------------------------------------+ + | Attribute | Default | Description | + +===============+===================+======================================================+ + | id | | .. index:: | + | | | single: port-mapping; attribute, id | + | | | single: attribute; id (port-mapping) | + | | | single: id; port-mapping attribute | + | | | | + | | | A unique name for the port mapping (required) | + +---------------+-------------------+------------------------------------------------------+ + | port | | .. index:: | + | | | single: port-mapping; attribute, port | + | | | single: attribute; port (port-mapping) | + | | | single: port; port-mapping attribute | + | | | | + | | | If this is specified, connections to this TCP port | + | | | number on the host network (on the container's | + | | | assigned IP address, if ``ip-range-start`` is | + | | | specified) will be forwarded to the container | + | | | network. Exactly one of ``port`` or ``range`` | + | | | must be specified in a ``port-mapping``. | + +---------------+-------------------+------------------------------------------------------+ + | internal-port | value of ``port`` | .. index:: | + | | | single: port-mapping; attribute, internal-port | + | | | single: attribute; internal-port (port-mapping) | + | | | single: internal-port; port-mapping attribute | + | | | | + | | | If ``port`` and this are specified, connections | + | | | to ``port`` on the host's network will be | + | | | forwarded to this port on the container network. | + +---------------+-------------------+------------------------------------------------------+ + | range | | .. index:: | + | | | single: port-mapping; attribute, range | + | | | single: attribute; range (port-mapping) | + | | | single: range; port-mapping attribute | + | | | | + | | | If this is specified, connections to these TCP | + | | | port numbers (expressed as *first_port*-*last_port*) | + | | | on the host network (on the container's assigned IP | + | | | address, if ``ip-range-start`` is specified) will | + | | | be forwarded to the same ports in the container | + | | | network. Exactly one of ``port`` or ``range`` | + | | | must be specified in a ``port-mapping``. | + +---------------+-------------------+------------------------------------------------------+ + +.. note:: + + If the bundle contains a ``primitive``, Pacemaker will automatically map the + ``control-port``, so it is not necessary to specify that port in a + ``port-mapping``. + +.. index: + pair: XML element; storage + pair: XML element; storage-mapping + single: bundle; storage + +.. _s-bundle-storage: + +Bundle Storage Properties +_________________________ + +A bundle may optionally contain one ``storage`` element. A ``storage`` element +has no properties of its own, but may contain one or more ``storage-mapping`` +elements. + +.. table:: **Attributes of a storage-mapping Element** + :widths: 2 1 5 + + +-----------------+---------+-------------------------------------------------------------+ + | Attribute | Default | Description | + +=================+=========+=============================================================+ + | id | | .. index:: | + | | | single: storage-mapping; attribute, id | + | | | single: attribute; id (storage-mapping) | + | | | single: id; storage-mapping attribute | + | | | | + | | | A unique name for the storage mapping (required) | + +-----------------+---------+-------------------------------------------------------------+ + | source-dir | | .. index:: | + | | | single: storage-mapping; attribute, source-dir | + | | | single: attribute; source-dir (storage-mapping) | + | | | single: source-dir; storage-mapping attribute | + | | | | + | | | The absolute path on the host's filesystem that will be | + | | | mapped into the container. Exactly one of ``source-dir`` | + | | | and ``source-dir-root`` must be specified in a | + | | | ``storage-mapping``. | + +-----------------+---------+-------------------------------------------------------------+ + | source-dir-root | | .. index:: | + | | | single: storage-mapping; attribute, source-dir-root | + | | | single: attribute; source-dir-root (storage-mapping) | + | | | single: source-dir-root; storage-mapping attribute | + | | | | + | | | The start of a path on the host's filesystem that will | + | | | be mapped into the container, using a different | + | | | subdirectory on the host for each container instance. | + | | | The subdirectory will be named the same as the | + | | | :ref:`replica name `. | + | | | Exactly one of ``source-dir`` and ``source-dir-root`` | + | | | must be specified in a ``storage-mapping``. | + +-----------------+---------+-------------------------------------------------------------+ + | target-dir | | .. index:: | + | | | single: storage-mapping; attribute, target-dir | + | | | single: attribute; target-dir (storage-mapping) | + | | | single: target-dir; storage-mapping attribute | + | | | | + | | | The path name within the container where the host | + | | | storage will be mapped (required) | + +-----------------+---------+-------------------------------------------------------------+ + | options | | .. index:: | + | | | single: storage-mapping; attribute, options | + | | | single: attribute; options (storage-mapping) | + | | | single: options; storage-mapping attribute | + | | | | + | | | A comma-separated list of file system mount | + | | | options to use when mapping the storage | + +-----------------+---------+-------------------------------------------------------------+ + +.. note:: + + Pacemaker does not define the behavior if the source directory does not already + exist on the host. However, it is expected that the container technology and/or + its resource agent will create the source directory in that case. + +.. note:: + + If the bundle contains a ``primitive``, + Pacemaker will automatically map the equivalent of + ``source-dir=/etc/pacemaker/authkey target-dir=/etc/pacemaker/authkey`` + and ``source-dir-root=/var/log/pacemaker/bundles target-dir=/var/log`` into the + container, so it is not necessary to specify those paths in a + ``storage-mapping``. + +.. important:: + + The ``PCMK_authkey_location`` environment variable must not be set to anything + other than the default of ``/etc/pacemaker/authkey`` on any node in the cluster. + +.. important:: + + If SELinux is used in enforcing mode on the host, you must ensure the container + is allowed to use any storage you mount into it. For Docker and podman bundles, + adding "Z" to the mount options will create a container-specific label for the + mount that allows the container access. + +.. index:: + single: bundle; primitive + +Bundle Primitive +________________ + +A bundle may optionally contain one :ref:`primitive ` +resource. The primitive may have operations, instance attributes, and +meta-attributes defined, as usual. + +If a bundle contains a primitive resource, the container image must include +the Pacemaker Remote daemon, and at least one of ``ip-range-start`` or +``control-port`` must be configured in the bundle. Pacemaker will create an +implicit **ocf:pacemaker:remote** resource for the connection, launch +Pacemaker Remote within the container, and monitor and manage the primitive +resource via Pacemaker Remote. + +If the bundle has more than one container instance (replica), the primitive +resource will function as an implicit :ref:`clone ` -- a +:ref:`promotable clone ` if the bundle has ``promoted-max`` +greater than zero. + +.. note:: + + If you want to pass environment variables to a bundle's Pacemaker Remote + connection or primitive, you have two options: + + * Environment variables whose value is the same regardless of the underlying host + may be set using the container element's ``options`` attribute. + * If you want variables to have host-specific values, you can use the + :ref:`storage-mapping ` element to map a file on the host as + ``/etc/pacemaker/pcmk-init.env`` in the container *(since 2.0.3)*. + Pacemaker Remote will parse this file as a shell-like format, with + variables set as NAME=VALUE, ignoring blank lines and comments starting + with "#". + +.. important:: + + When a bundle has a ``primitive``, Pacemaker on all cluster nodes must be able to + contact Pacemaker Remote inside the bundle's containers. + + * The containers must have an accessible network (for example, ``network`` should + not be set to "none" with a ``primitive``). + * The default, using a distinct network space inside the container, works in + combination with ``ip-range-start``. Any firewall must allow access from all + cluster nodes to the ``control-port`` on the container IPs. + * If the container shares the host's network space (for example, by setting + ``network`` to "host"), a unique ``control-port`` should be specified for each + bundle. Any firewall must allow access from all cluster nodes to the + ``control-port`` on all cluster and remote node IPs. + +.. index:: + single: bundle; node attributes + +.. _s-bundle-attributes: + +Bundle Node Attributes +______________________ + +If the bundle has a ``primitive``, the primitive's resource agent may want to set +node attributes such as :ref:`promotion scores `. However, with +containers, it is not apparent which node should get the attribute. + +If the container uses shared storage that is the same no matter which node the +container is hosted on, then it is appropriate to use the promotion score on the +bundle node itself. + +On the other hand, if the container uses storage exported from the underlying host, +then it may be more appropriate to use the promotion score on the underlying host. + +Since this depends on the particular situation, the +``container-attribute-target`` resource meta-attribute allows the user to specify +which approach to use. If it is set to ``host``, then user-defined node attributes +will be checked on the underlying host. If it is anything else, the local node +(in this case the bundle node) is used as usual. + +This only applies to user-defined attributes; the cluster will always check the +local node for cluster-defined attributes such as ``#uname``. + +If ``container-attribute-target`` is ``host``, the cluster will pass additional +environment variables to the primitive's resource agent that allow it to set +node attributes appropriately: ``CRM_meta_container_attribute_target`` (identical +to the meta-attribute value) and ``CRM_meta_physical_host`` (the name of the +underlying host). + +.. note:: + + When called by a resource agent, the ``attrd_updater`` and ``crm_attribute`` + commands will automatically check those environment variables and set + attributes appropriately. + +.. index:: + single: bundle; meta-attributes + +Bundle Meta-Attributes +______________________ + +Any meta-attribute set on a bundle will be inherited by the bundle's +primitive and any resources implicitly created by Pacemaker for the bundle. + +This includes options such as ``priority``, ``target-role``, and ``is-managed``. See +:ref:`resource_options` for more information. + +Bundles support clone meta-attributes including ``notify``, ``ordered``, and +``interleave``. + +Limitations of Bundles +______________________ + +Restarting pacemaker while a bundle is unmanaged or the cluster is in +maintenance mode may cause the bundle to fail. + +Bundles may not be explicitly cloned or included in groups. This includes the +bundle's primitive and any resources implicitly created by Pacemaker for the +bundle. (If ``replicas`` is greater than 1, the bundle will behave like a clone +implicitly.) + +Bundles do not have instance attributes, utilization attributes, or operations, +though a bundle's primitive may have them. + +A bundle with a primitive can run on a Pacemaker Remote node only if the bundle +uses a distinct ``control-port``. + +.. [#] Of course, the service must support running multiple instances. + +.. [#] Docker is a trademark of Docker, Inc. No endorsement by or association with + Docker, Inc. is implied. diff --git a/doc/sphinx/Pacemaker_Explained/constraints.rst b/doc/sphinx/Pacemaker_Explained/constraints.rst index ab34c9f..a78d6c2 100644 --- a/doc/sphinx/Pacemaker_Explained/constraints.rst +++ b/doc/sphinx/Pacemaker_Explained/constraints.rst @@ -7,49 +7,6 @@ Resource Constraints -------------------- -.. index:: - single: resource; score - single: node; score - -Scores -###### - -Scores of all kinds are integral to how the cluster works. -Practically everything from moving a resource to deciding which -resource to stop in a degraded cluster is achieved by manipulating -scores in some way. - -Scores are calculated per resource and node. Any node with a -negative score for a resource can't run that resource. The cluster -places a resource on the node with the highest score for it. - -Infinity Math -_____________ - -Pacemaker implements **INFINITY** (or equivalently, **+INFINITY**) internally as a -score of 1,000,000. Addition and subtraction with it follow these three basic -rules: - -* Any value + **INFINITY** = **INFINITY** - -* Any value - **INFINITY** = -**INFINITY** - -* **INFINITY** - **INFINITY** = **-INFINITY** - -.. note:: - - What if you want to use a score higher than 1,000,000? Typically this possibility - arises when someone wants to base the score on some external metric that might - go above 1,000,000. - - The short answer is you can't. - - The long answer is it is sometimes possible work around this limitation - creatively. You may be able to set the score to some computed value based on - the external metric rather than use the metric directly. For nodes, you can - store the metric as a node attribute, and query the attribute when computing - the score (possibly as part of a custom resource agent). - .. _location-constraint: .. index:: @@ -434,6 +391,20 @@ Because the above example lets ``symmetrical`` default to TRUE, **Webserver** must be stopped before **Database** can be stopped, and **Webserver** should be stopped before **IP** if they both need to be stopped. +Symmetric and asymmetric ordering +_________________________________ + +A mandatory symmetric ordering of "start A then start B" implies not only that +the start actions must be ordered, but that B is not allowed to be active +unless A is active. For example, if the ordering is added to the configuration +when A is stopped (due to target-role, failure, etc.) and B is already active, +then B will be stopped. + +By contrast, asymmetric ordering of "start A then start B" means the stops can +occur in either order, which implies that B *can* remain active in the same +situation. + + .. index:: single: colocation single: constraint; colocation @@ -535,8 +506,8 @@ _____________________ | | | If ``rsc`` and ``with-rsc`` are specified, and ``rsc`` | | | | is a :ref:`promotable clone `, | | | | the constraint applies only to ``rsc`` instances in | - | | | this role. Allowed values: ``Started``, ``Promoted``, | - | | | ``Unpromoted``. For details, see | + | | | this role. Allowed values: ``Started``, ``Stopped``, | + | | | ``Promoted``, ``Unpromoted``. For details, see | | | | :ref:`promotable-clone-constraints`. | +----------------+----------------+--------------------------------------------------------+ | with-rsc-role | Started | .. index:: | @@ -548,8 +519,8 @@ _____________________ | | | ``with-rsc`` is a | | | | :ref:`promotable clone `, the | | | | constraint applies only to ``with-rsc`` instances in | - | | | this role. Allowed values: ``Started``, ``Promoted``, | - | | | ``Unpromoted``. For details, see | + | | | this role. Allowed values: ``Started``, ``Stopped``, | + | | | ``Promoted``, ``Unpromoted``. For details, see | | | | :ref:`promotable-clone-constraints`. | +----------------+----------------+--------------------------------------------------------+ | influence | value of | .. index:: | diff --git a/doc/sphinx/Pacemaker_Explained/index.rst b/doc/sphinx/Pacemaker_Explained/index.rst index de2ddd9..63387f3 100644 --- a/doc/sphinx/Pacemaker_Explained/index.rst +++ b/doc/sphinx/Pacemaker_Explained/index.rst @@ -18,15 +18,16 @@ Table of Contents :numbered: intro - options + local-options + cluster-options nodes resources + operations constraints fencing alerts rules - advanced-options - advanced-resources + collective reusing-configuration utilization acls diff --git a/doc/sphinx/Pacemaker_Explained/local-options.rst b/doc/sphinx/Pacemaker_Explained/local-options.rst new file mode 100644 index 0000000..91eda66 --- /dev/null +++ b/doc/sphinx/Pacemaker_Explained/local-options.rst @@ -0,0 +1,515 @@ +Host-Local Configuration +------------------------ + +.. index:: + pair: XML element; configuration + +.. note:: Directory and file paths below may differ on your system depending on + your Pacemaker build settings. Check your Pacemaker configuration + file to find the correct paths. + +Pacemaker supports several host-local configuration options. These options can +be configured on each node in the main Pacemaker configuration file +(|PCMK_CONFIG_FILE|) in the format ``=""``. They work by setting +environment variables when Pacemaker daemons start up. + +.. list-table:: **Local Options** + :class: longtable + :widths: 2 2 2 5 + :header-rows: 1 + + * - Name + - Type + - Default + - Description + * - .. _pcmk_logfacility: + + .. index:: + pair: node option; PCMK_logfacility + + PCMK_logfacility + - :ref:`enumeration ` + - daemon + - Enable logging via the system log or journal, using the specified log + facility. Messages sent here are of value to all Pacemaker + administrators. This can be disabled using ``none``, but that is not + recommended. Allowed values: + + * ``none`` + * ``daemon`` + * ``user`` + * ``local0`` + * ``local1`` + * ``local2`` + * ``local3`` + * ``local4`` + * ``local5`` + * ``local6`` + * ``local7`` + + * - .. _pcmk_logpriority: + + .. index:: + pair:: node option; PCMK_logpriority + + PCMK_logpriority + - :ref:`enumeration ` + - notice + - Unless system logging is disabled using ``PCMK_logfacility=none``, + messages of the specified log severity and higher will be sent to the + system log. The default is appropriate for most installations. Allowed + values: + + * ``emerg`` + * ``alert`` + * ``crit`` + * ``error`` + * ``warning`` + * ``notice`` + * ``info`` + * ``debug`` + + * - .. _pcmk_logfile: + + .. index:: + pair:: node option; PCMK_logfile + + PCMK_logfile + - :ref:`text ` + - |PCMK_LOG_FILE| + - Unless set to ``none``, more detailed log messages will be sent to the + specified file (in addition to the system log, if enabled). These + messages may have extended information, and will include messages of info + severity. This log is of more use to developers and advanced system + administrators, and when reporting problems. + + * - .. _pcmk_logfile_mode: + + .. index:: + pair:: node option; PCMK_logfile_mode + + PCMK_logfile_mode + - :ref:`text ` + - 0660 + - Pacemaker will set the permissions on the detail log to this value (see + ``chmod(1)``). + + * - .. _pcmk_debug: + + .. index:: + pair:: node option; PCMK_debug + + PCMK_debug + - :ref:`enumeration ` + - no + - Whether to send debug severity messages to the detail log. This may be + set for all subsystems (``yes`` or ``no``) or for specific (comma- + separated) subsystems. Allowed subsystems are: + + * ``pacemakerd`` + * ``pacemaker-attrd`` + * ``pacemaker-based`` + * ``pacemaker-controld`` + * ``pacemaker-execd`` + * ``pacemaker-fenced`` + * ``pacemaker-schedulerd`` + + Example: ``PCMK_debug="pacemakerd,pacemaker-execd"`` + + * - .. _pcmk_stderr: + + .. index:: + pair:: node option; PCMK_stderr + + PCMK_stderr + - :ref:`boolean ` + - no + - *Advanced Use Only:* Whether to send daemon log messages to stderr. This + would be useful only during troubleshooting, when starting Pacemaker + manually on the command line. + + Setting this option in the configuration file is pointless, since the + file is not read when starting Pacemaker manually. However, it can be set + directly as an environment variable on the command line. + + * - .. _pcmk_trace_functions: + + .. index:: + pair:: node option; PCMK_trace_functions + + PCMK_trace_functions + - :ref:`text ` + - + - *Advanced Use Only:* Send debug and trace severity messages from these + (comma-separated) source code functions to the detail log. + + Example: + ``PCMK_trace_functions="func1,func2"`` + + * - .. _pcmk_trace_files: + + .. index:: + pair:: node option; PCMK_trace_files + + PCMK_trace_files + - :ref:`text ` + - + - *Advanced Use Only:* Send debug and trace severity messages from all + functions in these (comma-separated) source file names to the detail log. + + Example: ``PCMK_trace_files="file1.c,file2.c"`` + + * - .. _pcmk_trace_formats: + + .. index:: + pair:: node option; PCMK_trace_formats + + PCMK_trace_formats + - :ref:`text ` + - + - *Advanced Use Only:* Send trace severity messages that are generated by + these (comma-separated) format strings in the source code to the detail + log. + + Example: ``PCMK_trace_formats="Error: %s (%d)"`` + + * - .. _pcmk_trace_tags: + + .. index:: + pair:: node option; PCMK_trace_tags + + PCMK_trace_tags + - :ref:`text ` + - + - *Advanced Use Only:* Send debug and trace severity messages related to + these (comma-separated) resource IDs to the detail log. + + Example: ``PCMK_trace_tags="client-ip,dbfs"`` + + * - .. _pcmk_blackbox: + + .. index:: + pair:: node option; PCMK_blackbox + + PCMK_blackbox + - :ref:`enumeration ` + - no + - *Advanced Use Only:* Enable blackbox logging globally (``yes`` or ``no``) + or by subsystem. A blackbox contains a rolling buffer of all logs (of all + severities). Blackboxes are stored under |CRM_BLACKBOX_DIR| by default, + by default, and their contents can be viewed using the ``qb-blackbox(8)`` + command. + + The blackbox recorder can be enabled at start using this variable, or at + runtime by sending a Pacemaker subsystem daemon process a ``SIGUSR1`` or + ``SIGTRAP`` signal, and disabled by sending ``SIGUSR2`` (see + ``kill(1)``). The blackbox will be written after a crash, assertion + failure, or ``SIGTRAP`` signal. + + See :ref:`PCMK_debug ` for allowed subsystems. + + Example: + ``PCMK_blackbox="pacemakerd,pacemaker-execd"`` + + * - .. _pcmk_trace_blackbox: + + .. index:: + pair:: node option; PCMK_trace_blackbox + + PCMK_trace_blackbox + - :ref:`enumeration ` + - + - *Advanced Use Only:* Write a blackbox whenever the message at the + specified function and line is logged. Multiple entries may be comma- + separated. + + Example: ``PCMK_trace_blackbox="remote.c:144,remote.c:149"`` + + * - .. _pcmk_node_start_state: + + .. index:: + pair:: node option; PCMK_node_start_state + + PCMK_node_start_state + - :ref:`enumeration ` + - default + - By default, the local host will join the cluster in an online or standby + state when Pacemaker first starts depending on whether it was previously + put into standby mode. If this variable is set to ``standby`` or + ``online``, it will force the local host to join in the specified state. + + * - .. _pcmk_node_action_limit: + + .. index:: + pair:: node option; PCMK_node_action_limit + + PCMK_node_action_limit + - :ref:`nonnegative integer ` + - + - Specify the maximum number of jobs that can be scheduled on this node. If + set, this overrides the ``node-action-limit`` cluster property for this + node. + + * - .. _pcmk_shutdown_delay: + + .. index:: + pair:: node option; PCMK_shutdown_delay + + PCMK_shutdown_delay + - :ref:`timeout ` + - + - Specify a delay before shutting down ``pacemakerd`` after shutting down + all other Pacemaker daemons. + + * - .. _pcmk_fail_fast: + + .. index:: + pair:: node option; PCMK_fail_fast + + PCMK_fail_fast + - :ref:`boolean ` + - no + - By default, if a Pacemaker subsystem crashes, the main ``pacemakerd`` + process will attempt to restart it. If this variable is set to ``yes``, + ``pacemakerd`` will panic the local host instead. + + * - .. _pcmk_panic_action: + + .. index:: + pair:: node option; PCMK_panic_action + + PCMK_panic_action + - :ref:`enumeration ` + - reboot + - Pacemaker will panic the local host under certain conditions. By default, + this means rebooting the host. This variable can change that behavior: if + ``crash``, trigger a kernel crash (useful if you want a kernel dump to + investigate); if ``sync-reboot`` or ``sync-crash``, synchronize + filesystems before rebooting the host or triggering a kernel crash. The + sync values are more likely to preserve log messages, but with the risk + that the host may be left active if the synchronization hangs. + + * - .. _pcmk_authkey_location: + + .. index:: + pair:: node option; PCMK_authkey_location + + PCMK_authkey_location + - :ref:`text ` + - |PCMK_AUTHKEY_FILE| + - Use the contents of this file as the authorization key to use with + Pacemaker Remote connections. This file must be readable by Pacemaker + daemons (that is, it must allow read permissions to either the + |CRM_DAEMON_USER| user or the |CRM_DAEMON_GROUP| group), and its contents + must be identical on all nodes. + + * - .. _pcmk_remote_address: + + .. index:: + pair:: node option; PCMK_remote_address + + PCMK_remote_address + - :ref:`text ` + - + - By default, if the Pacemaker Remote service is run on the local node, it + will listen for connections on all IP addresses. This may be set to one + address to listen on instead, as a resolvable hostname or as a numeric + IPv4 or IPv6 address. When resolving names or listening on all addresses, + IPv6 will be preferred if available. When listening on an IPv6 address, + IPv4 clients will be supported via IPv4-mapped IPv6 addresses. + + Example: ``PCMK_remote_address="192.0.2.1"`` + + * - .. _pcmk_remote_port: + + .. index:: + pair:: node option; PCMK_remote_port + + PCMK_remote_port + - :ref:`port ` + - 3121 + - Use this TCP port number for Pacemaker Remote node connections. This + value must be the same on all nodes. + + * - .. _pcmk_remote_pid1: + + .. index:: + pair:: node option; PCMK_remote_pid1 + + PCMK_remote_pid1 + - :ref:`enumeration ` + - default + - *Advanced Use Only:* When a bundle resource's ``run-command`` option is + left to default, Pacemaker Remote runs as PID 1 in the bundle's + containers. When it does so, it loads environment variables from the + container's |PCMK_INIT_ENV_FILE| and performs the PID 1 responsibility of + reaping dead subprocesses. + + This option controls whether those actions are performed when Pacemaker + Remote is not running as PID 1. It is intended primarily for developer + testing but can be useful when ``run-command`` is set to a separate, + custom PID 1 process that launches Pacemaker Remote. + + * ``full``: Pacemaker Remote loads environment variables from + |PCMK_INIT_ENV_FILE| and reaps dead subprocesses. + * ``vars``: Pacemaker Remote loads environment variables from + |PCMK_INIT_ENV_FILE| but does not reap dead subprocesses. + * ``default``: Pacemaker Remote performs neither action. + + If Pacemaker Remote is running as PID 1, this option is ignored, and the + behavior is the same as for ``full``. + + * - .. _pcmk_tls_priorities: + + .. index:: + pair:: node option; PCMK_tls_priorities + + PCMK_tls_priorities + - :ref:`text ` + - |PCMK_GNUTLS_PRIORITIES| + - *Advanced Use Only:* These GnuTLS cipher priorities will be used for TLS + connections (whether for Pacemaker Remote connections or remote CIB + access, when enabled). See: + + https://gnutls.org/manual/html_node/Priority-Strings.html + + Pacemaker will append ``":+ANON-DH"`` for remote CIB access and + ``":+DHE-PSK:+PSK"`` for Pacemaker Remote connections, as they are + required for the respective functionality. + + Example: + ``PCMK_tls_priorities="SECURE128:+SECURE192"`` + + * - .. _pcmk_dh_min_bits: + + .. index:: + pair:: node option; PCMK_dh_min_bits + + PCMK_dh_min_bits + - :ref:`nonnegative integer ` + - 0 (no minimum) + - *Advanced Use Only:* Set a lower bound on the bit length of the prime + number generated for Diffie-Hellman parameters needed by TLS connections. + The default is no minimum. + + The server (Pacemaker Remote daemon, or CIB manager configured to accept + remote clients) will use this value to provide a floor for the value + recommended by the GnuTLS library. The library will only accept a limited + number of specific values, which vary by library version, so setting + these is recommended only when required for compatibility with specific + client versions. + + Clients (connecting cluster nodes or remote CIB commands) will require + that the server use a prime of at least this size. This is recommended + only when the value must be lowered in order for the client's GnuTLS + library to accept a connection to an older server. + + * - .. _pcmk_dh_max_bits: + + .. index:: + pair:: node option; PCMK_dh_max_bits + + PCMK_dh_max_bits + - :ref:`nonnegative integer ` + - 0 (no maximum) + - *Advanced Use Only:* Set an upper bound on the bit length of the prime + number generated for Diffie-Hellman parameters needed by TLS connections. + The default is no maximum. + + The server (Pacemaker Remote daemon, or CIB manager configured to accept + remote clients) will use this value to provide a ceiling for the value + recommended by the GnuTLS library. The library will only accept a limited + number of specific values, which vary by library version, so setting + these is recommended only when required for compatibility with specific + client versions. + + Clients do not use ``PCMK_dh_max_bits``. + + * - .. _pcmk_ipc_type: + + .. index:: + pair:: node option; PCMK_ipc_type + + PCMK_ipc_type + - :ref:`enumeration ` + - shared-mem + - *Advanced Use Only:* Force use of a particular IPC method. Allowed values: + + * ``shared-mem`` + * ``socket`` + * ``posix`` + * ``sysv`` + + * - .. _pcmk_ipc_buffer: + + .. index:: + pair:: node option; PCMK_ipc_buffer + + PCMK_ipc_buffer + - :ref:`nonnegative integer ` + - 131072 + - *Advanced Use Only:* Specify an IPC buffer size in bytes. This can be + useful when connecting to large clusters that result in messages + exceeding the default size (which will also result in log messages + referencing this variable). + + * - .. _pcmk_cluster_type: + + .. index:: + pair:: node option; PCMK_cluster_type + + PCMK_cluster_type + - :ref:`enumeration ` + - corosync + - *Advanced Use Only:* Specify the cluster layer to be used. If unset, + Pacemaker will detect and use a supported cluster layer, if available. + Currently, ``"corosync"`` is the only supported cluster layer. If + multiple layers are supported in the future, this will allow overriding + Pacemaker's automatic detection to select a specific one. + + * - .. _pcmk_schema_directory: + + .. index:: + pair:: node option; PCMK_schema_directory + + PCMK_schema_directory + - :ref:`text ` + - |CRM_SCHEMA_DIRECTORY| + - *Advanced Use Only:* Specify an alternate location for RNG schemas and + XSL transforms. + + * - .. _pcmk_valgrind_enabled: + + .. index:: + pair:: node option; PCMK_valgrind_enabled + + PCMK_valgrind_enabled + - :ref:`enumeration ` + - no + - *Advanced Use Only:* Whether subsystem daemons should be run under + ``valgrind``. Allowed values are the same as for ``PCMK_debug``. + + * - .. _pcmk_callgrind_enabled: + + .. index:: + pair:: node option; PCMK_callgrind_enabled + + PCMK_callgrind_enabled + - :ref:`enumeration ` + - no + - *Advanced Use Only:* Whether subsystem daemons should be run under + ``valgrind`` with the ``callgrind`` tool enabled. Allowed values are the + same as for ``PCMK_debug``. + + * - .. _valgrind_opts: + + .. index:: + pair:: node option; VALGRIND_OPTS + + VALGRIND_OPTS + - :ref:`text ` + - + - *Advanced Use Only:* Pass these options to valgrind, when enabled (see + ``valgrind(1)``). ``"--vgdb=no"`` should usually be specified because + ``pacemaker-execd`` can lower privileges when executing commands, which + would otherwise leave a bunch of unremovable files in ``/tmp``. diff --git a/doc/sphinx/Pacemaker_Explained/nodes.rst b/doc/sphinx/Pacemaker_Explained/nodes.rst index 6fcadb3..378b067 100644 --- a/doc/sphinx/Pacemaker_Explained/nodes.rst +++ b/doc/sphinx/Pacemaker_Explained/nodes.rst @@ -105,6 +105,9 @@ To read back the value that was just set: The ``--type nodes`` indicates that this is a permanent node attribute; ``--type status`` would indicate a transient node attribute. + +.. _special_node_attributes: + Special node attributes ####################### @@ -154,35 +157,26 @@ unset to be false, and anything else to be an error. | | ``crm_resource --cleanup`` commands rather | | | than directly. | +----------------------------+-----------------------------------------------------+ - | maintenance | .. index:: | - | | pair: node attribute; maintenance | + | maintenance | .. _node_maintenance: | | | | - | | Similar to the ``maintenance-mode`` | - | | :ref:`cluster option `, but | - | | for a single node. If true, resources will | - | | not be started or stopped on the node, | - | | resources and individual clone instances | - | | running on the node will become unmanaged, | - | | and any recurring operations for those will | - | | be cancelled. | + | | .. index:: | + | | pair: node attribute; maintenance | | | | - | | **Warning:** Restarting pacemaker on a node that is | - | | in single-node maintenance mode will likely | - | | lead to undesirable effects. If | - | | ``maintenance`` is set as a transient | - | | attribute, it will be erased when | - | | Pacemaker is stopped, which will | - | | immediately take the node out of | - | | maintenance mode and likely get it | - | | fenced. Even if permanent, if Pacemaker | - | | is restarted, any resources active on the | - | | node will have their local history erased | - | | when the node rejoins, so the cluster | - | | will no longer consider them running on | - | | the node and thus will consider them | - | | managed again, leading them to be started | - | | elsewhere. This behavior might be | - | | improved in a future release. | + | | If true, the cluster will not start or stop any | + | | resources on this node. Any resources active on the | + | | node become unmanaged, and any recurring operations | + | | for those resources (except those specifying | + | | ``role`` as ``Stopped``) will be paused. The | + | | :ref:`maintenance-mode ` cluster | + | | option, if true, overrides this. If this attribute | + | | is true, it overrides the | + | | :ref:`is-managed ` and | + | | :ref:`maintenance ` | + | | meta-attributes of affected resources and | + | | :ref:`enabled ` meta-attribute for | + | | affected recurring actions. Pacemaker should not be | + | | restarted on a node that is in single-node | + | | maintenance mode. | +----------------------------+-----------------------------------------------------+ | probe_complete | .. index:: | | | pair: node attribute; probe_complete | diff --git a/doc/sphinx/Pacemaker_Explained/operations.rst b/doc/sphinx/Pacemaker_Explained/operations.rst new file mode 100644 index 0000000..b1ad65d --- /dev/null +++ b/doc/sphinx/Pacemaker_Explained/operations.rst @@ -0,0 +1,623 @@ +.. index:: + single: resource; action + single: resource; operation + +.. _operation: + +Resource Operations +------------------- + +*Operations* are actions the cluster can perform on a resource by calling the +resource agent. Resource agents must support certain common operations such as +start, stop, and monitor, and may implement any others. + +Operations may be explicitly configured for two purposes: to override defaults +for options (such as timeout) that the cluster will use whenever it initiates +the operation, and to run an operation on a recurring basis (for example, to +monitor the resource for failure). + +.. topic:: An OCF resource with a non-default start timeout + + .. code-block:: xml + + + + + + + + + + +Pacemaker identifies operations by a combination of name and interval, so this +combination must be unique for each resource. That is, you should not configure +two operations for the same resource with the same name and interval. + +.. _operation_properties: + +Operation Properties +#################### + +Operation properties may be specified directly in the ``op`` element as +XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements. +XML attributes take precedence over ``nvpair`` elements if both are specified. + +.. table:: **Properties of an Operation** + :class: longtable + :widths: 1 2 3 + + +----------------+-----------------------------------+-----------------------------------------------------+ + | Field | Default | Description | + +================+===================================+=====================================================+ + | id | | .. index:: | + | | | single: id; action property | + | | | single: action; property, id | + | | | | + | | | A unique name for the operation. | + +----------------+-----------------------------------+-----------------------------------------------------+ + | name | | .. index:: | + | | | single: name; action property | + | | | single: action; property, name | + | | | | + | | | The action to perform. This can be any action | + | | | supported by the agent; common values include | + | | | ``monitor``, ``start``, and ``stop``. | + +----------------+-----------------------------------+-----------------------------------------------------+ + | interval | 0 | .. index:: | + | | | single: interval; action property | + | | | single: action; property, interval | + | | | | + | | | How frequently (in seconds) to perform the | + | | | operation. A value of 0 means "when needed". | + | | | A positive value defines a *recurring action*, | + | | | which is typically used with | + | | | :ref:`monitor `. | + +----------------+-----------------------------------+-----------------------------------------------------+ + | timeout | | .. index:: | + | | | single: timeout; action property | + | | | single: action; property, timeout | + | | | | + | | | How long to wait before declaring the action | + | | | has failed | + +----------------+-----------------------------------+-----------------------------------------------------+ + | on-fail | Varies by action: | .. index:: | + | | | single: on-fail; action property | + | | * ``stop``: ``fence`` if | single: action; property, on-fail | + | | ``stonith-enabled`` is true | | + | | or ``block`` otherwise | The action to take if this action ever fails. | + | | * ``demote``: ``on-fail`` of the | Allowed values: | + | | ``monitor`` action with | | + | | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. | + | | if present, enabled, and | * ``block:`` Don't perform any further operations | + | | configured to a value other | on the resource. | + | | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start | + | | otherwise | it elsewhere. | + | | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a | + | | | full restart. This is valid only for ``promote`` | + | | | actions, and for ``monitor`` actions with both | + | | | a nonzero ``interval`` and ``role`` set to | + | | | ``Promoted``; for any other action, a | + | | | configuration error will be logged, and the | + | | | default behavior will be used. *(since 2.0.5)* | + | | | * ``restart:`` Stop the resource and start it | + | | | again (possibly on a different node). | + | | | * ``fence:`` STONITH the node on which the | + | | | resource failed. | + | | | * ``standby:`` Move *all* resources away from the | + | | | node on which the resource failed. | + +----------------+-----------------------------------+-----------------------------------------------------+ + | enabled | TRUE | .. _op_enabled: | + | | | | + | | | .. index:: | + | | | single: enabled; action property | + | | | single: action; property, enabled | + | | | | + | | | If ``false``, ignore this operation definition. | + | | | This does not suppress all actions of this type, | + | | | but is typically used to pause a recurring monitor. | + | | | This can complement the resource being unmanaged | + | | | (:ref:`is-managed ` set to ``false``), | + | | | which does not stop recurring operations. | + | | | Maintenance mode, which does stop configured this | + | | | monitors, overrides this setting. Allowed values: | + | | | ``true``, ``false``. | + +----------------+-----------------------------------+-----------------------------------------------------+ + | record-pending | TRUE | .. index:: | + | | | single: record-pending; action property | + | | | single: action; property, record-pending | + | | | | + | | | If ``true``, the intention to perform the operation | + | | | is recorded so that GUIs and CLI tools can indicate | + | | | that an operation is in progress. This is best set | + | | | as an *operation default* | + | | | (see :ref:`s-operation-defaults`). Allowed values: | + | | | ``true``, ``false``. | + +----------------+-----------------------------------+-----------------------------------------------------+ + | role | | .. index:: | + | | | single: role; action property | + | | | single: action; property, role | + | | | | + | | | Run the operation only on node(s) that the cluster | + | | | thinks should be in the specified role. This only | + | | | makes sense for recurring ``monitor`` operations. | + | | | Allowed (case-sensitive) values: ``Stopped``, | + | | | ``Started``, and in the case of :ref:`promotable | + | | | clone resources `, | + | | | ``Unpromoted`` and ``Promoted``. | + +----------------+-----------------------------------+-----------------------------------------------------+ + +.. note:: + + When ``on-fail`` is set to ``demote``, recovery from failure by a successful + demote causes the cluster to recalculate whether and where a new instance + should be promoted. The node with the failure is eligible, so if promotion + scores have not changed, it will be promoted again. + + There is no direct equivalent of ``migration-threshold`` for the promoted + role, but the same effect can be achieved with a location constraint using a + :ref:`rule ` with a node attribute expression for the resource's fail + count. + + For example, to immediately ban the promoted role from a node with any + failed promote or promoted instance monitor: + + .. code-block:: xml + + + + + + + + + This example assumes that there is a promotable clone of the ``my_primitive`` + resource (note that the primitive name, not the clone name, is used in the + rule), and that there is a recurring 10-second-interval monitor configured for + the promoted role (fail count attributes specify the interval in + milliseconds). + +.. _s-resource-monitoring: + +Monitoring Resources for Failure +################################ + +When Pacemaker first starts a resource, it runs one-time ``monitor`` operations +(referred to as *probes*) to ensure the resource is running where it's +supposed to be, and not running where it's not supposed to be. (This behavior +can be affected by the ``resource-discovery`` location constraint property.) + +Other than those initial probes, Pacemaker will *not* (by default) check that +the resource continues to stay healthy [#]_. You must configure ``monitor`` +operations explicitly to perform these checks. + +.. topic:: An OCF resource with a recurring health check + + .. code-block:: xml + + + + + + + + + + + +By default, a ``monitor`` operation will ensure that the resource is running +where it is supposed to. The ``target-role`` property can be used for further +checking. + +For example, if a resource has one ``monitor`` operation with +``interval=10 role=Started`` and a second ``monitor`` operation with +``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes +it thinks *should* be running the resource, and the second monitor on any nodes +that it thinks *should not* be running the resource (for the truly paranoid, +who want to know when an administrator manually starts a service by mistake). + +.. note:: + + Currently, monitors with ``role=Stopped`` are not implemented for + :ref:`clone ` resources. + + +.. _s-operation-defaults: + +Setting Global Defaults for Operations +###################################### + +You can change the global default values for operation properties +in a given cluster. These are defined in an ``op_defaults`` section +of the CIB's ``configuration`` section, and can be set with +``crm_attribute``. For example, + +.. code-block:: none + + # crm_attribute --type op_defaults --name timeout --update 20s + +would default each operation's ``timeout`` to 20 seconds. If an +operation's definition also includes a value for ``timeout``, then that +value would be used for that operation instead. + +When Implicit Operations Take a Long Time +######################################### + +The cluster will always perform a number of implicit operations: ``start``, +``stop`` and a non-recurring ``monitor`` operation used at startup to check +whether the resource is already active. If one of these is taking too long, +then you can create an entry for them and specify a longer timeout. + +.. topic:: An OCF resource with custom timeouts for its implicit actions + + .. code-block:: xml + + + + + + + + + + + + +Multiple Monitor Operations +########################### + +Provided no two operations (for a single resource) have the same name +and interval, you can have as many ``monitor`` operations as you like. +In this way, you can do a superficial health check every minute and +progressively more intense ones at higher intervals. + +To tell the resource agent what kind of check to perform, you need to +provide each monitor with a different value for a common parameter. +The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL`` +for this purpose and dictates that it is "made available to the +resource agent without the normal ``OCF_RESKEY`` prefix". + +Whatever name you choose, you can specify it by adding an +``instance_attributes`` block to the ``op`` tag. It is up to each +resource agent to look for the parameter and decide how to use it. + +.. topic:: An OCF resource with two recurring health checks, performing + different levels of checks specified via ``OCF_CHECK_LEVEL``. + + .. code-block:: xml + + + + + + + + + + + + + + + + + + + +Disabling a Monitor Operation +############################# + +The easiest way to stop a recurring monitor is to just delete it. +However, there can be times when you only want to disable it +temporarily. In such cases, simply add ``enabled=false`` to the +operation's definition. + +.. topic:: Example of an OCF resource with a disabled health check + + .. code-block:: xml + + + + + + + + + + +This can be achieved from the command line by executing: + +.. code-block:: none + + # cibadmin --modify --xml-text '' + +Once you've done whatever you needed to do, you can then re-enable it with + +.. code-block:: none + + # cibadmin --modify --xml-text '' + + +.. index:: + single: start-delay; operation attribute + single: interval-origin; operation attribute + single: interval; interval-origin + single: operation; interval-origin + single: operation; start-delay + +Specifying When Recurring Actions are Performed +############################################### + +By default, recurring actions are scheduled relative to when the resource +started. In some cases, you might prefer that a recurring action start relative +to a specific date and time. For example, you might schedule an in-depth +monitor to run once every 24 hours, and want it to run outside business hours. + +To do this, set the operation's ``interval-origin``. The cluster uses this point +to calculate the correct ``start-delay`` such that the operation will occur +at ``interval-origin`` plus a multiple of the operation interval. + +For example, if the recurring operation's interval is 24h, its +``interval-origin`` is set to 02:00, and it is currently 14:32, then the +cluster would initiate the operation after 11 hours and 28 minutes. + +The value specified for ``interval`` and ``interval-origin`` can be any +date/time conforming to the +`ISO8601 standard `_. By way of +example, to specify an operation that would run on the first Monday of +2021 and every Monday after that, you would add: + +.. topic:: Example recurring action that runs relative to base date/time + + .. code-block:: xml + + + + +.. index:: + single: resource; failure recovery + single: operation; failure recovery + +.. _failure-handling: + +Handling Resource Failure +######################### + +By default, Pacemaker will attempt to recover failed resources by restarting +them. However, failure recovery is highly configurable. + +.. index:: + single: resource; failure count + single: operation; failure count + +Failure Counts +______________ + +Pacemaker tracks resource failures for each combination of node, resource, and +operation (start, stop, monitor, etc.). + +You can query the fail count for a particular node, resource, and/or operation +using the ``crm_failcount`` command. For example, to see how many times the +10-second monitor for ``myrsc`` has failed on ``node1``, run: + +.. code-block:: none + + # crm_failcount --query -r myrsc -N node1 -n monitor -I 10s + +If you omit the node, ``crm_failcount`` will use the local node. If you omit +the operation and interval, ``crm_failcount`` will display the sum of the fail +counts for all operations on the resource. + +You can use ``crm_resource --cleanup`` or ``crm_failcount --delete`` to clear +fail counts. For example, to clear the above monitor failures, run: + +.. code-block:: none + + # crm_resource --cleanup -r myrsc -N node1 -n monitor -I 10s + +If you omit the resource, ``crm_resource --cleanup`` will clear failures for +all resources. If you omit the node, it will clear failures on all nodes. If +you omit the operation and interval, it will clear the failures for all +operations on the resource. + +.. note:: + + Even when cleaning up only a single operation, all failed operations will + disappear from the status display. This allows us to trigger a re-check of + the resource's current status. + +Higher-level tools may provide other commands for querying and clearing +fail counts. + +The ``crm_mon`` tool shows the current cluster status, including any failed +operations. To see the current fail counts for any failed resources, call +``crm_mon`` with the ``--failcounts`` option. This shows the fail counts per +resource (that is, the sum of any operation fail counts for the resource). + +.. index:: + single: migration-threshold; resource meta-attribute + single: resource; migration-threshold + +Failure Response +________________ + +Normally, if a running resource fails, pacemaker will try to stop it and start +it again. Pacemaker will choose the best location to start it each time, which +may be the same node that it failed on. + +However, if a resource fails repeatedly, it is possible that there is an +underlying problem on that node, and you might desire trying a different node +in such a case. Pacemaker allows you to set your preference via the +``migration-threshold`` resource meta-attribute. [#]_ + +If you define ``migration-threshold`` to *N* for a resource, it will be banned +from the original node after *N* failures there. + +.. note:: + + The ``migration-threshold`` is per *resource*, even though fail counts are + tracked per *operation*. The operation fail counts are added together + to compare against the ``migration-threshold``. + +By default, fail counts remain until manually cleared by an administrator +using ``crm_resource --cleanup`` or ``crm_failcount --delete`` (hopefully after +first fixing the failure's cause). It is possible to have fail counts expire +automatically by setting the ``failure-timeout`` resource meta-attribute. + +.. important:: + + A successful operation does not clear past failures. If a recurring monitor + operation fails once, succeeds many times, then fails again days later, its + fail count is 2. Fail counts are cleared only by manual intervention or + failure timeout. + +For example, setting ``migration-threshold`` to 2 and ``failure-timeout`` to +``60s`` would cause the resource to move to a new node after 2 failures, and +allow it to move back (depending on stickiness and constraint scores) after one +minute. + +.. note:: + + ``failure-timeout`` is measured since the most recent failure. That is, older + failures do not individually time out and lower the fail count. Instead, all + failures are timed out simultaneously (and the fail count is reset to 0) if + there is no new failure for the timeout period. + +There are two exceptions to the migration threshold: when a resource either +fails to start or fails to stop. + +If the cluster property ``start-failure-is-fatal`` is set to ``true`` (which is +the default), start failures cause the fail count to be set to ``INFINITY`` and +thus always cause the resource to move immediately. + +Stop failures are slightly different and crucial. If a resource fails to stop +and fencing is enabled, then the cluster will fence the node in order to be +able to start the resource elsewhere. If fencing is disabled, then the cluster +has no way to continue and will not try to start the resource elsewhere, but +will try to stop it again after any failure timeout or clearing. + + +.. index:: + single: reload + single: reload-agent + +Reloading an Agent After a Definition Change +############################################ + +The cluster automatically detects changes to the configuration of active +resources. The cluster's normal response is to stop the service (using the old +definition) and start it again (with the new definition). This works, but some +resource agents are smarter and can be told to use a new set of options without +restarting. + +To take advantage of this capability, the resource agent must: + +* Implement the ``reload-agent`` action. What it should do depends completely + on your application! + + .. note:: + + Resource agents may also implement a ``reload`` action to make the managed + service reload its own *native* configuration. This is different from + ``reload-agent``, which makes effective changes in the resource's + *Pacemaker* configuration (specifically, the values of the agent's + reloadable parameters). + +* Advertise the ``reload-agent`` operation in the ``actions`` section of its + meta-data. + +* Set the ``reloadable`` attribute to 1 in the ``parameters`` section of + its meta-data for any parameters eligible to be reloaded after a change. + +Once these requirements are satisfied, the cluster will automatically know to +reload the resource (instead of restarting) when a reloadable parameter +changes. + +.. note:: + + Metadata will not be re-read unless the resource needs to be started. If you + edit the agent of an already active resource to set a parameter reloadable, + the resource may restart the first time the parameter value changes. + +.. note:: + + If both a reloadable and non-reloadable parameter are changed + simultaneously, the resource will be restarted. + + + +.. _live-migration: + +Migrating Resources +################### + +Normally, when the cluster needs to move a resource, it fully restarts the +resource (that is, it stops the resource on the current node and starts it on +the new node). + +However, some types of resources, such as many virtual machines, are able to +move to another location without loss of state (often referred to as live +migration or hot migration). In pacemaker, this is called live migration. +Pacemaker can be configured to migrate a resource when moving it, rather than +restarting it. + +Not all resources are able to migrate; see the +:ref:`migration checklist ` below. Even those that can, +won't do so in all situations. Conceptually, there are two requirements from +which the other prerequisites follow: + +* The resource must be active and healthy at the old location; and +* everything required for the resource to run must be available on both the old + and new locations. + +The cluster is able to accommodate both *push* and *pull* migration models by +requiring the resource agent to support two special actions: ``migrate_to`` +(performed on the current location) and ``migrate_from`` (performed on the +destination). + +In push migration, the process on the current location transfers the resource +to the new location where is it later activated. In this scenario, most of the +work would be done in the ``migrate_to`` action and, if anything, the +activation would occur during ``migrate_from``. + +Conversely for pull, the ``migrate_to`` action is practically empty and +``migrate_from`` does most of the work, extracting the relevant resource state +from the old location and activating it. + +There is no wrong or right way for a resource agent to implement migration, as +long as it works. + +.. _migration_checklist: + +.. topic:: Migration Checklist + + * The resource may not be a clone. + * The resource agent standard must be OCF. + * The resource must not be in a failed or degraded state. + * The resource agent must support ``migrate_to`` and ``migrate_from`` + actions, and advertise them in its meta-data. + * The resource must have the ``allow-migrate`` meta-attribute set to + ``true`` (which is not the default). + +If an otherwise migratable resource depends on another resource via an ordering +constraint, there are special situations in which it will be restarted rather +than migrated. + +For example, if the resource depends on a clone, and at the time the resource +needs to be moved, the clone has instances that are stopping and instances that +are starting, then the resource will be restarted. The scheduler is not yet +able to model this situation correctly and so takes the safer (if less optimal) +path. + +Also, if a migratable resource depends on a non-migratable resource, and both +need to be moved, the migratable resource will be restarted. +.. rubric:: Footnotes + +.. [#] Currently, anyway. Automatic monitoring operations may be added in a future + version of Pacemaker. + +.. [#] The naming of this option was perhaps unfortunate as it is easily + confused with live migration, the process of moving a resource from one + node to another without stopping it. Xen virtual guests are the most + common example of resources that can be migrated in this manner. diff --git a/doc/sphinx/Pacemaker_Explained/options.rst b/doc/sphinx/Pacemaker_Explained/options.rst deleted file mode 100644 index ee0511c..0000000 --- a/doc/sphinx/Pacemaker_Explained/options.rst +++ /dev/null @@ -1,622 +0,0 @@ -Cluster-Wide Configuration --------------------------- - -.. index:: - pair: XML element; cib - pair: XML element; configuration - -Configuration Layout -#################### - -The cluster is defined by the Cluster Information Base (CIB), which uses XML -notation. The simplest CIB, an empty one, looks like this: - -.. topic:: An empty configuration - - .. code-block:: xml - - - - - - - - - - - -The empty configuration above contains the major sections that make up a CIB: - -* ``cib``: The entire CIB is enclosed with a ``cib`` element. Certain - fundamental settings are defined as attributes of this element. - - * ``configuration``: This section -- the primary focus of this document -- - contains traditional configuration information such as what resources the - cluster serves and the relationships among them. - - * ``crm_config``: cluster-wide configuration options - - * ``nodes``: the machines that host the cluster - - * ``resources``: the services run by the cluster - - * ``constraints``: indications of how resources should be placed - - * ``status``: This section contains the history of each resource on each - node. Based on this data, the cluster can construct the complete current - state of the cluster. The authoritative source for this section is the - local executor (pacemaker-execd process) on each cluster node, and the - cluster will occasionally repopulate the entire section. For this reason, - it is never written to disk, and administrators are advised against - modifying it in any way. - -In this document, configuration settings will be described as properties or -options based on how they are defined in the CIB: - -* Properties are XML attributes of an XML element. - -* Options are name-value pairs expressed as ``nvpair`` child elements of an XML - element. - -Normally, you will use command-line tools that abstract the XML, so the -distinction will be unimportant; both properties and options are cluster -settings you can tweak. - -CIB Properties -############## - -Certain settings are defined by CIB properties (that is, attributes of the -``cib`` tag) rather than with the rest of the cluster configuration in the -``configuration`` section. - -The reason is simply a matter of parsing. These options are used by the -configuration database which is, by design, mostly ignorant of the content it -holds. So the decision was made to place them in an easy-to-find location. - -.. table:: **CIB Properties** - :class: longtable - :widths: 1 3 - - +------------------+-----------------------------------------------------------+ - | Attribute | Description | - +==================+===========================================================+ - | admin_epoch | .. index:: | - | | pair: admin_epoch; cib | - | | | - | | When a node joins the cluster, the cluster performs a | - | | check to see which node has the best configuration. It | - | | asks the node with the highest (``admin_epoch``, | - | | ``epoch``, ``num_updates``) tuple to replace the | - | | configuration on all the nodes -- which makes setting | - | | them, and setting them correctly, very important. | - | | ``admin_epoch`` is never modified by the cluster; you can | - | | use this to make the configurations on any inactive nodes | - | | obsolete. | - | | | - | | **Warning:** Never set this value to zero. In such cases, | - | | the cluster cannot tell the difference between your | - | | configuration and the "empty" one used when nothing is | - | | found on disk. | - +------------------+-----------------------------------------------------------+ - | epoch | .. index:: | - | | pair: epoch; cib | - | | | - | | The cluster increments this every time the configuration | - | | is updated (usually by the administrator). | - +------------------+-----------------------------------------------------------+ - | num_updates | .. index:: | - | | pair: num_updates; cib | - | | | - | | The cluster increments this every time the configuration | - | | or status is updated (usually by the cluster) and resets | - | | it to 0 when epoch changes. | - +------------------+-----------------------------------------------------------+ - | validate-with | .. index:: | - | | pair: validate-with; cib | - | | | - | | Determines the type of XML validation that will be done | - | | on the configuration. If set to ``none``, the cluster | - | | will not verify that updates conform to the DTD (nor | - | | reject ones that don't). | - +------------------+-----------------------------------------------------------+ - | cib-last-written | .. index:: | - | | pair: cib-last-written; cib | - | | | - | | Indicates when the configuration was last written to | - | | disk. Maintained by the cluster; for informational | - | | purposes only. | - +------------------+-----------------------------------------------------------+ - | have-quorum | .. index:: | - | | pair: have-quorum; cib | - | | | - | | Indicates if the cluster has quorum. If false, this may | - | | mean that the cluster cannot start resources or fence | - | | other nodes (see ``no-quorum-policy`` below). Maintained | - | | by the cluster. | - +------------------+-----------------------------------------------------------+ - | dc-uuid | .. index:: | - | | pair: dc-uuid; cib | - | | | - | | Indicates which cluster node is the current leader. Used | - | | by the cluster when placing resources and determining the | - | | order of some events. Maintained by the cluster. | - +------------------+-----------------------------------------------------------+ - -.. _cluster_options: - -Cluster Options -############### - -Cluster options, as you might expect, control how the cluster behaves when -confronted with various situations. - -They are grouped into sets within the ``crm_config`` section. In advanced -configurations, there may be more than one set. (This will be described later -in the chapter on :ref:`rules` where we will show how to have the cluster use -different sets of options during working hours than during weekends.) For now, -we will describe the simple case where each option is present at most once. - -You can obtain an up-to-date list of cluster options, including their default -values, by running the ``man pacemaker-schedulerd`` and -``man pacemaker-controld`` commands. - -.. table:: **Cluster Options** - :class: longtable - :widths: 2 1 4 - - +---------------------------+---------+----------------------------------------------------+ - | Option | Default | Description | - +===========================+=========+====================================================+ - | cluster-name | | .. index:: | - | | | pair: cluster option; cluster-name | - | | | | - | | | An (optional) name for the cluster as a whole. | - | | | This is mostly for users' convenience for use | - | | | as desired in administration, but this can be | - | | | used in the Pacemaker configuration in | - | | | :ref:`rules` (as the ``#cluster-name`` | - | | | :ref:`node attribute | - | | | `. It may | - | | | also be used by higher-level tools when | - | | | displaying cluster information, and by | - | | | certain resource agents (for example, the | - | | | ``ocf:heartbeat:GFS2`` agent stores the | - | | | cluster name in filesystem meta-data). | - +---------------------------+---------+----------------------------------------------------+ - | dc-version | | .. index:: | - | | | pair: cluster option; dc-version | - | | | | - | | | Version of Pacemaker on the cluster's DC. | - | | | Determined automatically by the cluster. Often | - | | | includes the hash which identifies the exact | - | | | Git changeset it was built from. Used for | - | | | diagnostic purposes. | - +---------------------------+---------+----------------------------------------------------+ - | cluster-infrastructure | | .. index:: | - | | | pair: cluster option; cluster-infrastructure | - | | | | - | | | The messaging stack on which Pacemaker is | - | | | currently running. Determined automatically by | - | | | the cluster. Used for informational and | - | | | diagnostic purposes. | - +---------------------------+---------+----------------------------------------------------+ - | no-quorum-policy | stop | .. index:: | - | | | pair: cluster option; no-quorum-policy | - | | | | - | | | What to do when the cluster does not have | - | | | quorum. Allowed values: | - | | | | - | | | * ``ignore:`` continue all resource management | - | | | * ``freeze:`` continue resource management, but | - | | | don't recover resources from nodes not in the | - | | | affected partition | - | | | * ``stop:`` stop all resources in the affected | - | | | cluster partition | - | | | * ``demote:`` demote promotable resources and | - | | | stop all other resources in the affected | - | | | cluster partition *(since 2.0.5)* | - | | | * ``suicide:`` fence all nodes in the affected | - | | | cluster partition | - +---------------------------+---------+----------------------------------------------------+ - | batch-limit | 0 | .. index:: | - | | | pair: cluster option; batch-limit | - | | | | - | | | The maximum number of actions that the cluster | - | | | may execute in parallel across all nodes. The | - | | | "correct" value will depend on the speed and | - | | | load of your network and cluster nodes. If zero, | - | | | the cluster will impose a dynamically calculated | - | | | limit only when any node has high load. If -1, the | - | | | cluster will not impose any limit. | - +---------------------------+---------+----------------------------------------------------+ - | migration-limit | -1 | .. index:: | - | | | pair: cluster option; migration-limit | - | | | | - | | | The number of | - | | | :ref:`live migration ` actions | - | | | that the cluster is allowed to execute in | - | | | parallel on a node. A value of -1 means | - | | | unlimited. | - +---------------------------+---------+----------------------------------------------------+ - | symmetric-cluster | true | .. index:: | - | | | pair: cluster option; symmetric-cluster | - | | | | - | | | Whether resources can run on any node by default | - | | | (if false, a resource is allowed to run on a | - | | | node only if a | - | | | :ref:`location constraint ` | - | | | enables it) | - +---------------------------+---------+----------------------------------------------------+ - | stop-all-resources | false | .. index:: | - | | | pair: cluster option; stop-all-resources | - | | | | - | | | Whether all resources should be disallowed from | - | | | running (can be useful during maintenance) | - +---------------------------+---------+----------------------------------------------------+ - | stop-orphan-resources | true | .. index:: | - | | | pair: cluster option; stop-orphan-resources | - | | | | - | | | Whether resources that have been deleted from | - | | | the configuration should be stopped. This value | - | | | takes precedence over ``is-managed`` (that is, | - | | | even unmanaged resources will be stopped when | - | | | orphaned if this value is ``true`` | - +---------------------------+---------+----------------------------------------------------+ - | stop-orphan-actions | true | .. index:: | - | | | pair: cluster option; stop-orphan-actions | - | | | | - | | | Whether recurring :ref:`operations ` | - | | | that have been deleted from the configuration | - | | | should be cancelled | - +---------------------------+---------+----------------------------------------------------+ - | start-failure-is-fatal | true | .. index:: | - | | | pair: cluster option; start-failure-is-fatal | - | | | | - | | | Whether a failure to start a resource on a | - | | | particular node prevents further start attempts | - | | | on that node? If ``false``, the cluster will | - | | | decide whether the node is still eligible based | - | | | on the resource's current failure count and | - | | | :ref:`migration-threshold `. | - +---------------------------+---------+----------------------------------------------------+ - | enable-startup-probes | true | .. index:: | - | | | pair: cluster option; enable-startup-probes | - | | | | - | | | Whether the cluster should check the | - | | | pre-existing state of resources when the cluster | - | | | starts | - +---------------------------+---------+----------------------------------------------------+ - | maintenance-mode | false | .. index:: | - | | | pair: cluster option; maintenance-mode | - | | | | - | | | Whether the cluster should refrain from | - | | | monitoring, starting and stopping resources | - +---------------------------+---------+----------------------------------------------------+ - | stonith-enabled | true | .. index:: | - | | | pair: cluster option; stonith-enabled | - | | | | - | | | Whether the cluster is allowed to fence nodes | - | | | (for example, failed nodes and nodes with | - | | | resources that can't be stopped. | - | | | | - | | | If true, at least one fence device must be | - | | | configured before resources are allowed to run. | - | | | | - | | | If false, unresponsive nodes are immediately | - | | | assumed to be running no resources, and resource | - | | | recovery on online nodes starts without any | - | | | further protection (which can mean *data loss* | - | | | if the unresponsive node still accesses shared | - | | | storage, for example). See also the | - | | | :ref:`requires ` resource | - | | | meta-attribute. | - +---------------------------+---------+----------------------------------------------------+ - | stonith-action | reboot | .. index:: | - | | | pair: cluster option; stonith-action | - | | | | - | | | Action the cluster should send to the fence agent | - | | | when a node must be fenced. Allowed values are | - | | | ``reboot``, ``off``, and (for legacy agents only) | - | | | ``poweroff``. | - +---------------------------+---------+----------------------------------------------------+ - | stonith-timeout | 60s | .. index:: | - | | | pair: cluster option; stonith-timeout | - | | | | - | | | How long to wait for ``on``, ``off``, and | - | | | ``reboot`` fence actions to complete by default. | - +---------------------------+---------+----------------------------------------------------+ - | stonith-max-attempts | 10 | .. index:: | - | | | pair: cluster option; stonith-max-attempts | - | | | | - | | | How many times fencing can fail for a target | - | | | before the cluster will no longer immediately | - | | | re-attempt it. | - +---------------------------+---------+----------------------------------------------------+ - | stonith-watchdog-timeout | 0 | .. index:: | - | | | pair: cluster option; stonith-watchdog-timeout | - | | | | - | | | If nonzero, and the cluster detects | - | | | ``have-watchdog`` as ``true``, then watchdog-based | - | | | self-fencing will be performed via SBD when | - | | | fencing is required, without requiring a fencing | - | | | resource explicitly configured. | - | | | | - | | | If this is set to a positive value, unseen nodes | - | | | are assumed to self-fence within this much time. | - | | | | - | | | **Warning:** It must be ensured that this value is | - | | | larger than the ``SBD_WATCHDOG_TIMEOUT`` | - | | | environment variable on all nodes. Pacemaker | - | | | verifies the settings individually on all nodes | - | | | and prevents startup or shuts down if configured | - | | | wrongly on the fly. It is strongly recommended | - | | | that ``SBD_WATCHDOG_TIMEOUT`` be set to the same | - | | | value on all nodes. | - | | | | - | | | If this is set to a negative value, and | - | | | ``SBD_WATCHDOG_TIMEOUT`` is set, twice that value | - | | | will be used. | - | | | | - | | | **Warning:** In this case, it is essential (and | - | | | currently not verified by pacemaker) that | - | | | ``SBD_WATCHDOG_TIMEOUT`` is set to the same | - | | | value on all nodes. | - +---------------------------+---------+----------------------------------------------------+ - | concurrent-fencing | false | .. index:: | - | | | pair: cluster option; concurrent-fencing | - | | | | - | | | Whether the cluster is allowed to initiate | - | | | multiple fence actions concurrently. Fence actions | - | | | initiated externally, such as via the | - | | | ``stonith_admin`` tool or an application such as | - | | | DLM, or by the fencer itself such as recurring | - | | | device monitors and ``status`` and ``list`` | - | | | commands, are not limited by this option. | - +---------------------------+---------+----------------------------------------------------+ - | fence-reaction | stop | .. index:: | - | | | pair: cluster option; fence-reaction | - | | | | - | | | How should a cluster node react if notified of its | - | | | own fencing? A cluster node may receive | - | | | notification of its own fencing if fencing is | - | | | misconfigured, or if fabric fencing is in use that | - | | | doesn't cut cluster communication. Allowed values | - | | | are ``stop`` to attempt to immediately stop | - | | | pacemaker and stay stopped, or ``panic`` to | - | | | attempt to immediately reboot the local node, | - | | | falling back to stop on failure. The default is | - | | | likely to be changed to ``panic`` in a future | - | | | release. *(since 2.0.3)* | - +---------------------------+---------+----------------------------------------------------+ - | priority-fencing-delay | 0 | .. index:: | - | | | pair: cluster option; priority-fencing-delay | - | | | | - | | | Apply this delay to any fencing targeting the lost | - | | | nodes with the highest total resource priority in | - | | | case we don't have the majority of the nodes in | - | | | our cluster partition, so that the more | - | | | significant nodes potentially win any fencing | - | | | match (especially meaningful in a split-brain of a | - | | | 2-node cluster). A promoted resource instance | - | | | takes the resource's priority plus 1 if the | - | | | resource's priority is not 0. Any static or random | - | | | delays introduced by ``pcmk_delay_base`` and | - | | | ``pcmk_delay_max`` configured for the | - | | | corresponding fencing resources will be added to | - | | | this delay. This delay should be significantly | - | | | greater than (safely twice) the maximum delay from | - | | | those parameters. *(since 2.0.4)* | - +---------------------------+---------+----------------------------------------------------+ - | cluster-delay | 60s | .. index:: | - | | | pair: cluster option; cluster-delay | - | | | | - | | | Estimated maximum round-trip delay over the | - | | | network (excluding action execution). If the DC | - | | | requires an action to be executed on another node, | - | | | it will consider the action failed if it does not | - | | | get a response from the other node in this time | - | | | (after considering the action's own timeout). The | - | | | "correct" value will depend on the speed and load | - | | | of your network and cluster nodes. | - +---------------------------+---------+----------------------------------------------------+ - | dc-deadtime | 20s | .. index:: | - | | | pair: cluster option; dc-deadtime | - | | | | - | | | How long to wait for a response from other nodes | - | | | during startup. The "correct" value will depend on | - | | | the speed/load of your network and the type of | - | | | switches used. | - +---------------------------+---------+----------------------------------------------------+ - | cluster-ipc-limit | 500 | .. index:: | - | | | pair: cluster option; cluster-ipc-limit | - | | | | - | | | The maximum IPC message backlog before one cluster | - | | | daemon will disconnect another. This is of use in | - | | | large clusters, for which a good value is the | - | | | number of resources in the cluster multiplied by | - | | | the number of nodes. The default of 500 is also | - | | | the minimum. Raise this if you see | - | | | "Evicting client" messages for cluster daemon PIDs | - | | | in the logs. | - +---------------------------+---------+----------------------------------------------------+ - | pe-error-series-max | -1 | .. index:: | - | | | pair: cluster option; pe-error-series-max | - | | | | - | | | The number of scheduler inputs resulting in errors | - | | | to save. Used when reporting problems. A value of | - | | | -1 means unlimited (report all), and 0 means none. | - +---------------------------+---------+----------------------------------------------------+ - | pe-warn-series-max | 5000 | .. index:: | - | | | pair: cluster option; pe-warn-series-max | - | | | | - | | | The number of scheduler inputs resulting in | - | | | warnings to save. Used when reporting problems. A | - | | | value of -1 means unlimited (report all), and 0 | - | | | means none. | - +---------------------------+---------+----------------------------------------------------+ - | pe-input-series-max | 4000 | .. index:: | - | | | pair: cluster option; pe-input-series-max | - | | | | - | | | The number of "normal" scheduler inputs to save. | - | | | Used when reporting problems. A value of -1 means | - | | | unlimited (report all), and 0 means none. | - +---------------------------+---------+----------------------------------------------------+ - | enable-acl | false | .. index:: | - | | | pair: cluster option; enable-acl | - | | | | - | | | Whether :ref:`acl` should be used to authorize | - | | | modifications to the CIB | - +---------------------------+---------+----------------------------------------------------+ - | placement-strategy | default | .. index:: | - | | | pair: cluster option; placement-strategy | - | | | | - | | | How the cluster should allocate resources to nodes | - | | | (see :ref:`utilization`). Allowed values are | - | | | ``default``, ``utilization``, ``balanced``, and | - | | | ``minimal``. | - +---------------------------+---------+----------------------------------------------------+ - | node-health-strategy | none | .. index:: | - | | | pair: cluster option; node-health-strategy | - | | | | - | | | How the cluster should react to node health | - | | | attributes (see :ref:`node-health`). Allowed values| - | | | are ``none``, ``migrate-on-red``, ``only-green``, | - | | | ``progressive``, and ``custom``. | - +---------------------------+---------+----------------------------------------------------+ - | node-health-base | 0 | .. index:: | - | | | pair: cluster option; node-health-base | - | | | | - | | | The base health score assigned to a node. Only | - | | | used when ``node-health-strategy`` is | - | | | ``progressive``. | - +---------------------------+---------+----------------------------------------------------+ - | node-health-green | 0 | .. index:: | - | | | pair: cluster option; node-health-green | - | | | | - | | | The score to use for a node health attribute whose | - | | | value is ``green``. Only used when | - | | | ``node-health-strategy`` is ``progressive`` or | - | | | ``custom``. | - +---------------------------+---------+----------------------------------------------------+ - | node-health-yellow | 0 | .. index:: | - | | | pair: cluster option; node-health-yellow | - | | | | - | | | The score to use for a node health attribute whose | - | | | value is ``yellow``. Only used when | - | | | ``node-health-strategy`` is ``progressive`` or | - | | | ``custom``. | - +---------------------------+---------+----------------------------------------------------+ - | node-health-red | 0 | .. index:: | - | | | pair: cluster option; node-health-red | - | | | | - | | | The score to use for a node health attribute whose | - | | | value is ``red``. Only used when | - | | | ``node-health-strategy`` is ``progressive`` or | - | | | ``custom``. | - +---------------------------+---------+----------------------------------------------------+ - | cluster-recheck-interval | 15min | .. index:: | - | | | pair: cluster option; cluster-recheck-interval | - | | | | - | | | Pacemaker is primarily event-driven, and looks | - | | | ahead to know when to recheck the cluster for | - | | | failure timeouts and most time-based rules | - | | | *(since 2.0.3)*. However, it will also recheck the | - | | | cluster after this amount of inactivity. This has | - | | | two goals: rules with ``date_spec`` are only | - | | | guaranteed to be checked this often, and it also | - | | | serves as a fail-safe for some kinds of scheduler | - | | | bugs. A value of 0 disables this polling; positive | - | | | values are a time interval. | - +---------------------------+---------+----------------------------------------------------+ - | shutdown-lock | false | .. index:: | - | | | pair: cluster option; shutdown-lock | - | | | | - | | | The default of false allows active resources to be | - | | | recovered elsewhere when their node is cleanly | - | | | shut down, which is what the vast majority of | - | | | users will want. However, some users prefer to | - | | | make resources highly available only for failures, | - | | | with no recovery for clean shutdowns. If this | - | | | option is true, resources active on a node when it | - | | | is cleanly shut down are kept "locked" to that | - | | | node (not allowed to run elsewhere) until they | - | | | start again on that node after it rejoins (or for | - | | | at most ``shutdown-lock-limit``, if set). Stonith | - | | | resources and Pacemaker Remote connections are | - | | | never locked. Clone and bundle instances and the | - | | | promoted role of promotable clones are currently | - | | | never locked, though support could be added in a | - | | | future release. Locks may be manually cleared | - | | | using the ``--refresh`` option of ``crm_resource`` | - | | | (both the resource and node must be specified; | - | | | this works with remote nodes if their connection | - | | | resource's ``target-role`` is set to ``Stopped``, | - | | | but not if Pacemaker Remote is stopped on the | - | | | remote node without disabling the connection | - | | | resource). *(since 2.0.4)* | - +---------------------------+---------+----------------------------------------------------+ - | shutdown-lock-limit | 0 | .. index:: | - | | | pair: cluster option; shutdown-lock-limit | - | | | | - | | | If ``shutdown-lock`` is true, and this is set to a | - | | | nonzero time duration, locked resources will be | - | | | allowed to start after this much time has passed | - | | | since the node shutdown was initiated, even if the | - | | | node has not rejoined. (This works with remote | - | | | nodes only if their connection resource's | - | | | ``target-role`` is set to ``Stopped``.) | - | | | *(since 2.0.4)* | - +---------------------------+---------+----------------------------------------------------+ - | remove-after-stop | false | .. index:: | - | | | pair: cluster option; remove-after-stop | - | | | | - | | | *Deprecated* Should the cluster remove | - | | | resources from Pacemaker's executor after they are | - | | | stopped? Values other than the default are, at | - | | | best, poorly tested and potentially dangerous. | - | | | This option is deprecated and will be removed in a | - | | | future release. | - +---------------------------+---------+----------------------------------------------------+ - | startup-fencing | true | .. index:: | - | | | pair: cluster option; startup-fencing | - | | | | - | | | *Advanced Use Only:* Should the cluster fence | - | | | unseen nodes at start-up? Setting this to false is | - | | | unsafe, because the unseen nodes could be active | - | | | and running resources but unreachable. | - +---------------------------+---------+----------------------------------------------------+ - | election-timeout | 2min | .. index:: | - | | | pair: cluster option; election-timeout | - | | | | - | | | *Advanced Use Only:* If you need to adjust this | - | | | value, it probably indicates the presence of a bug.| - +---------------------------+---------+----------------------------------------------------+ - | shutdown-escalation | 20min | .. index:: | - | | | pair: cluster option; shutdown-escalation | - | | | | - | | | *Advanced Use Only:* If you need to adjust this | - | | | value, it probably indicates the presence of a bug.| - +---------------------------+---------+----------------------------------------------------+ - | join-integration-timeout | 3min | .. index:: | - | | | pair: cluster option; join-integration-timeout | - | | | | - | | | *Advanced Use Only:* If you need to adjust this | - | | | value, it probably indicates the presence of a bug.| - +---------------------------+---------+----------------------------------------------------+ - | join-finalization-timeout | 30min | .. index:: | - | | | pair: cluster option; join-finalization-timeout | - | | | | - | | | *Advanced Use Only:* If you need to adjust this | - | | | value, it probably indicates the presence of a bug.| - +---------------------------+---------+----------------------------------------------------+ - | transition-delay | 0s | .. index:: | - | | | pair: cluster option; transition-delay | - | | | | - | | | *Advanced Use Only:* Delay cluster recovery for | - | | | the configured interval to allow for additional or | - | | | related events to occur. This can be useful if | - | | | your configuration is sensitive to the order in | - | | | which ping updates arrive. Enabling this option | - | | | will slow down cluster recovery under all | - | | | conditions. | - +---------------------------+---------+----------------------------------------------------+ diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst index 3b7520f..a971c44 100644 --- a/doc/sphinx/Pacemaker_Explained/resources.rst +++ b/doc/sphinx/Pacemaker_Explained/resources.rst @@ -362,8 +362,8 @@ behave and can be easily set using the ``--meta`` option of the | | | all :ref:`colocation constraints | | | | ` involving this resource, | | | | as well as the implicit colocation constraints | - | | | created if this resource is in a :ref:`group | - | | | `. For details, see | + | | | created if this resource is in a | + | | | :ref:`group `. For details, see | | | | :ref:`s-coloc-influence`. *(since 2.1.0)* | +----------------------------+----------------------------------+------------------------------------------------------+ | target-role | Started | .. index:: | @@ -375,31 +375,39 @@ behave and can be easily set using the ``--meta`` option of the | | | | | | | * ``Stopped:`` Force the resource to be stopped | | | | * ``Started:`` Allow the resource to be started | - | | | (and in the case of :ref:`promotable clone | - | | | resources `, promoted | - | | | if appropriate) | + | | | (and in the case of | + | | | :ref:`promotable ` clone | + | | | resources, promoted if appropriate) | | | | * ``Unpromoted:`` Allow the resource to be started, | | | | but only in the unpromoted role if the resource is | | | | :ref:`promotable ` | | | | * ``Promoted:`` Equivalent to ``Started`` | +----------------------------+----------------------------------+------------------------------------------------------+ - | is-managed | TRUE | .. index:: | + | is-managed | TRUE | .. _is_managed: | + | | | | + | | | .. index:: | | | | single: is-managed; resource option | | | | single: resource; option, is-managed | | | | | - | | | Is the cluster allowed to start and stop | - | | | the resource? Allowed values: ``true``, ``false`` | + | | | If false, the cluster will not start or stop the | + | | | resource on any node. Recurring actions for the | + | | | resource are unaffected. Maintenance mode overrides | + | | | this setting. Allowed values: ``true``, ``false`` | +----------------------------+----------------------------------+------------------------------------------------------+ - | maintenance | FALSE | .. index:: | + | maintenance | FALSE | .. _rsc_maintenance: | + | | | | + | | | .. index:: | | | | single: maintenance; resource option | | | | single: resource; option, maintenance | | | | | - | | | Similar to the ``maintenance-mode`` | - | | | :ref:`cluster option `, but for | - | | | a single resource. If true, the resource will not | - | | | be started, stopped, or monitored on any node. This | - | | | differs from ``is-managed`` in that monitors will | - | | | not be run. Allowed values: ``true``, ``false`` | + | | | If true, the cluster will not start or stop the | + | | | resource on any node, and will pause any recurring | + | | | monitors (except those specifying ``role`` as | + | | | ``Stopped``). If true, the | + | | | :ref:`maintenance-mode ` cluster | + | | | option or :ref:`maintenance ` | + | | | node attribute override this. Allowed values: | + | | | ``true``, ``false`` | +----------------------------+----------------------------------+------------------------------------------------------+ | resource-stickiness | 1 for individual clone | .. _resource-stickiness: | | | instances, 0 for all | | @@ -686,389 +694,3 @@ attributes, their purpose and default values. - -.. index:: - single: resource; action - single: resource; operation - -.. _operation: - -Resource Operations -################### - -*Operations* are actions the cluster can perform on a resource by calling the -resource agent. Resource agents must support certain common operations such as -start, stop, and monitor, and may implement any others. - -Operations may be explicitly configured for two purposes: to override defaults -for options (such as timeout) that the cluster will use whenever it initiates -the operation, and to run an operation on a recurring basis (for example, to -monitor the resource for failure). - -.. topic:: An OCF resource with a non-default start timeout - - .. code-block:: xml - - - - - - - - - - -Pacemaker identifies operations by a combination of name and interval, so this -combination must be unique for each resource. That is, you should not configure -two operations for the same resource with the same name and interval. - -.. _operation_properties: - -Operation Properties -____________________ - -Operation properties may be specified directly in the ``op`` element as -XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements. -XML attributes take precedence over ``nvpair`` elements if both are specified. - -.. table:: **Properties of an Operation** - :class: longtable - :widths: 1 2 3 - - +----------------+-----------------------------------+-----------------------------------------------------+ - | Field | Default | Description | - +================+===================================+=====================================================+ - | id | | .. index:: | - | | | single: id; action property | - | | | single: action; property, id | - | | | | - | | | A unique name for the operation. | - +----------------+-----------------------------------+-----------------------------------------------------+ - | name | | .. index:: | - | | | single: name; action property | - | | | single: action; property, name | - | | | | - | | | The action to perform. This can be any action | - | | | supported by the agent; common values include | - | | | ``monitor``, ``start``, and ``stop``. | - +----------------+-----------------------------------+-----------------------------------------------------+ - | interval | 0 | .. index:: | - | | | single: interval; action property | - | | | single: action; property, interval | - | | | | - | | | How frequently (in seconds) to perform the | - | | | operation. A value of 0 means "when needed". | - | | | A positive value defines a *recurring action*, | - | | | which is typically used with | - | | | :ref:`monitor `. | - +----------------+-----------------------------------+-----------------------------------------------------+ - | timeout | | .. index:: | - | | | single: timeout; action property | - | | | single: action; property, timeout | - | | | | - | | | How long to wait before declaring the action | - | | | has failed | - +----------------+-----------------------------------+-----------------------------------------------------+ - | on-fail | Varies by action: | .. index:: | - | | | single: on-fail; action property | - | | * ``stop``: ``fence`` if | single: action; property, on-fail | - | | ``stonith-enabled`` is true | | - | | or ``block`` otherwise | The action to take if this action ever fails. | - | | * ``demote``: ``on-fail`` of the | Allowed values: | - | | ``monitor`` action with | | - | | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. | - | | if present, enabled, and | * ``block:`` Don't perform any further operations | - | | configured to a value other | on the resource. | - | | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start | - | | otherwise | it elsewhere. | - | | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a | - | | | full restart. This is valid only for ``promote`` | - | | | actions, and for ``monitor`` actions with both | - | | | a nonzero ``interval`` and ``role`` set to | - | | | ``Promoted``; for any other action, a | - | | | configuration error will be logged, and the | - | | | default behavior will be used. *(since 2.0.5)* | - | | | * ``restart:`` Stop the resource and start it | - | | | again (possibly on a different node). | - | | | * ``fence:`` STONITH the node on which the | - | | | resource failed. | - | | | * ``standby:`` Move *all* resources away from the | - | | | node on which the resource failed. | - +----------------+-----------------------------------+-----------------------------------------------------+ - | enabled | TRUE | .. index:: | - | | | single: enabled; action property | - | | | single: action; property, enabled | - | | | | - | | | If ``false``, ignore this operation definition. | - | | | This is typically used to pause a particular | - | | | recurring ``monitor`` operation; for instance, it | - | | | can complement the respective resource being | - | | | unmanaged (``is-managed=false``), as this alone | - | | | will :ref:`not block any configured monitoring | - | | | `. Disabling the operation | - | | | does not suppress all actions of the given type. | - | | | Allowed values: ``true``, ``false``. | - +----------------+-----------------------------------+-----------------------------------------------------+ - | record-pending | TRUE | .. index:: | - | | | single: record-pending; action property | - | | | single: action; property, record-pending | - | | | | - | | | If ``true``, the intention to perform the operation | - | | | is recorded so that GUIs and CLI tools can indicate | - | | | that an operation is in progress. This is best set | - | | | as an *operation default* | - | | | (see :ref:`s-operation-defaults`). Allowed values: | - | | | ``true``, ``false``. | - +----------------+-----------------------------------+-----------------------------------------------------+ - | role | | .. index:: | - | | | single: role; action property | - | | | single: action; property, role | - | | | | - | | | Run the operation only on node(s) that the cluster | - | | | thinks should be in the specified role. This only | - | | | makes sense for recurring ``monitor`` operations. | - | | | Allowed (case-sensitive) values: ``Stopped``, | - | | | ``Started``, and in the case of :ref:`promotable | - | | | clone resources `, | - | | | ``Unpromoted`` and ``Promoted``. | - +----------------+-----------------------------------+-----------------------------------------------------+ - -.. note:: - - When ``on-fail`` is set to ``demote``, recovery from failure by a successful - demote causes the cluster to recalculate whether and where a new instance - should be promoted. The node with the failure is eligible, so if promotion - scores have not changed, it will be promoted again. - - There is no direct equivalent of ``migration-threshold`` for the promoted - role, but the same effect can be achieved with a location constraint using a - :ref:`rule ` with a node attribute expression for the resource's fail - count. - - For example, to immediately ban the promoted role from a node with any - failed promote or promoted instance monitor: - - .. code-block:: xml - - - - - - - - - This example assumes that there is a promotable clone of the ``my_primitive`` - resource (note that the primitive name, not the clone name, is used in the - rule), and that there is a recurring 10-second-interval monitor configured for - the promoted role (fail count attributes specify the interval in - milliseconds). - -.. _s-resource-monitoring: - -Monitoring Resources for Failure -________________________________ - -When Pacemaker first starts a resource, it runs one-time ``monitor`` operations -(referred to as *probes*) to ensure the resource is running where it's -supposed to be, and not running where it's not supposed to be. (This behavior -can be affected by the ``resource-discovery`` location constraint property.) - -Other than those initial probes, Pacemaker will *not* (by default) check that -the resource continues to stay healthy [#]_. You must configure ``monitor`` -operations explicitly to perform these checks. - -.. topic:: An OCF resource with a recurring health check - - .. code-block:: xml - - - - - - - - - - - -By default, a ``monitor`` operation will ensure that the resource is running -where it is supposed to. The ``target-role`` property can be used for further -checking. - -For example, if a resource has one ``monitor`` operation with -``interval=10 role=Started`` and a second ``monitor`` operation with -``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes -it thinks *should* be running the resource, and the second monitor on any nodes -that it thinks *should not* be running the resource (for the truly paranoid, -who want to know when an administrator manually starts a service by mistake). - -.. note:: - - Currently, monitors with ``role=Stopped`` are not implemented for - :ref:`clone ` resources. - -.. _s-monitoring-unmanaged: - -Monitoring Resources When Administration is Disabled -____________________________________________________ - -Recurring ``monitor`` operations behave differently under various administrative -settings: - -* When a resource is unmanaged (by setting ``is-managed=false``): No monitors - will be stopped. - - If the unmanaged resource is stopped on a node where the cluster thinks it - should be running, the cluster will detect and report that it is not, but it - will not consider the monitor failed, and will not try to start the resource - until it is managed again. - - Starting the unmanaged resource on a different node is strongly discouraged - and will at least cause the cluster to consider the resource failed, and - may require the resource's ``target-role`` to be set to ``Stopped`` then - ``Started`` to be recovered. - -* When a resource is put into maintenance mode (by setting - ``maintenance=true``): The resource will be marked as unmanaged. (This - overrides ``is-managed=true``.) - - Additionally, all monitor operations will be stopped, except those specifying - ``role`` as ``Stopped`` (which will be newly initiated if appropriate). As - with unmanaged resources in general, starting a resource on a node other than - where the cluster expects it to be will cause problems. - -* When a node is put into standby: All resources will be moved away from the - node, and all ``monitor`` operations will be stopped on the node, except those - specifying ``role`` as ``Stopped`` (which will be newly initiated if - appropriate). - -* When a node is put into maintenance mode: All resources that are active on the - node will be marked as in maintenance mode. See above for more details. - -* When the cluster is put into maintenance mode: All resources in the cluster - will be marked as in maintenance mode. See above for more details. - -A resource is in maintenance mode if the cluster, the node where the resource -is active, or the resource itself is configured to be in maintenance mode. If a -resource is in maintenance mode, then it is also unmanaged. However, if a -resource is unmanaged, it is not necessarily in maintenance mode. - -.. _s-operation-defaults: - -Setting Global Defaults for Operations -______________________________________ - -You can change the global default values for operation properties -in a given cluster. These are defined in an ``op_defaults`` section -of the CIB's ``configuration`` section, and can be set with -``crm_attribute``. For example, - -.. code-block:: none - - # crm_attribute --type op_defaults --name timeout --update 20s - -would default each operation's ``timeout`` to 20 seconds. If an -operation's definition also includes a value for ``timeout``, then that -value would be used for that operation instead. - -When Implicit Operations Take a Long Time -_________________________________________ - -The cluster will always perform a number of implicit operations: ``start``, -``stop`` and a non-recurring ``monitor`` operation used at startup to check -whether the resource is already active. If one of these is taking too long, -then you can create an entry for them and specify a longer timeout. - -.. topic:: An OCF resource with custom timeouts for its implicit actions - - .. code-block:: xml - - - - - - - - - - - - -Multiple Monitor Operations -___________________________ - -Provided no two operations (for a single resource) have the same name -and interval, you can have as many ``monitor`` operations as you like. -In this way, you can do a superficial health check every minute and -progressively more intense ones at higher intervals. - -To tell the resource agent what kind of check to perform, you need to -provide each monitor with a different value for a common parameter. -The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL`` -for this purpose and dictates that it is "made available to the -resource agent without the normal ``OCF_RESKEY`` prefix". - -Whatever name you choose, you can specify it by adding an -``instance_attributes`` block to the ``op`` tag. It is up to each -resource agent to look for the parameter and decide how to use it. - -.. topic:: An OCF resource with two recurring health checks, performing - different levels of checks specified via ``OCF_CHECK_LEVEL``. - - .. code-block:: xml - - - - - - - - - - - - - - - - - - - -Disabling a Monitor Operation -_____________________________ - -The easiest way to stop a recurring monitor is to just delete it. -However, there can be times when you only want to disable it -temporarily. In such cases, simply add ``enabled=false`` to the -operation's definition. - -.. topic:: Example of an OCF resource with a disabled health check - - .. code-block:: xml - - - - - - - - - - -This can be achieved from the command line by executing: - -.. code-block:: none - - # cibadmin --modify --xml-text '' - -Once you've done whatever you needed to do, you can then re-enable it with - -.. code-block:: none - - # cibadmin --modify --xml-text '' - -.. [#] Currently, anyway. Automatic monitoring operations may be added in a future - version of Pacemaker. diff --git a/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst b/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst index 0f34f84..06c00f0 100644 --- a/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst +++ b/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst @@ -330,6 +330,11 @@ resources. A single configuration element can be listed in any number of tags. +.. important:: + + If listing nodes in a tag, you must list the node's ``id``, not name. + + Using Tags in Constraints and Resource Sets ___________________________________________ diff --git a/doc/sphinx/Pacemaker_Explained/status.rst b/doc/sphinx/Pacemaker_Explained/status.rst index 2d7dd7e..6384eda 100644 --- a/doc/sphinx/Pacemaker_Explained/status.rst +++ b/doc/sphinx/Pacemaker_Explained/status.rst @@ -33,7 +33,7 @@ Users are highly recommended *not* to modify any part of a node's state *directly*. The cluster will periodically regenerate the entire section from authoritative sources, so any changes should be done with the tools appropriate to those sources. - + .. table:: **Authoritative Sources for State Information** :widths: 1 1 @@ -48,9 +48,7 @@ with the tools appropriate to those sources. +----------------------+----------------------+ The fields used in the ``node_state`` objects are named as they are -largely for historical reasons and are rooted in Pacemaker's origins -as the resource manager for the older Heartbeat project. They have remained -unchanged to preserve compatibility with older versions. +largely for historical reasons, to maintain compatibility with older versions. .. table:: **Node Status Fields** :widths: 1 3 @@ -147,8 +145,8 @@ all known resources have been checked for on this machine (``probe_complete``). Operation History ################# -A node's resource history is held in the ``lrm_resources`` tag (a child -of the ``lrm`` tag). The information stored here includes enough +A node's resource history is held in the ``lrm_resources`` element (a child +of the ``lrm`` element). The information stored here includes enough information for the cluster to stop the resource safely if it is removed from the ``configuration`` section. Specifically, the resource's ``id``, ``class``, ``type`` and ``provider`` are stored. @@ -159,11 +157,9 @@ removed from the ``configuration`` section. Specifically, the resource's -Additionally, we store the last job for every combination of -``resource``, ``action`` and ``interval``. The concatenation of the values in -this tuple are used to create the id of the ``lrm_rsc_op`` object. +Additionally, we store history entries for certain actions. -.. table:: **Contents of an lrm_rsc_op job** +.. table:: **Attributes of an lrm_rsc_op element** :class: longtable :widths: 1 3 @@ -174,78 +170,78 @@ this tuple are used to create the id of the ``lrm_rsc_op`` object. | | single: id; action status | | | single: action; status, id | | | | - | | Identifier for the job constructed from the resource's | - | | ``operation`` and ``interval``. | + | | Identifier for the history entry constructed from the | + | | resource ID, action name, and operation interval. | +------------------+----------------------------------------------------------+ | call-id | .. index:: | | | single: call-id; action status | | | single: action; status, call-id | | | | - | | The job's ticket number. Used as a sort key to determine | - | | the order in which the jobs were executed. | + | | A node-specific counter used to determine the order in | + | | which actions were executed. | +------------------+----------------------------------------------------------+ | operation | .. index:: | | | single: operation; action status | | | single: action; status, operation | | | | - | | The action the resource agent was invoked with. | + | | The action name the resource agent was invoked with. | +------------------+----------------------------------------------------------+ | interval | .. index:: | | | single: interval; action status | | | single: action; status, interval | | | | | | The frequency, in milliseconds, at which the operation | - | | will be repeated. A one-off job is indicated by 0. | + | | will be repeated. One-time execution is indicated by 0. | +------------------+----------------------------------------------------------+ | op-status | .. index:: | | | single: op-status; action status | | | single: action; status, op-status | | | | - | | The job's status. Generally this will be either 0 (done) | - | | or -1 (pending). Rarely used in favor of ``rc-code``. | + | | The execution status of this action. The meanings of | + | | these codes are internal to Pacemaker. | +------------------+----------------------------------------------------------+ | rc-code | .. index:: | | | single: rc-code; action status | | | single: action; status, rc-code | | | | - | | The job's result. Refer to the *Resource Agents* chapter | - | | of *Pacemaker Administration* for details on what the | - | | values here mean and how they are interpreted. | + | | The resource agent's exit status for this action. Refer | + | | to the *Resource Agents* chapter of | + | | *Pacemaker Administration* for how these values are | + | | interpreted. | +------------------+----------------------------------------------------------+ | last-rc-change | .. index:: | | | single: last-rc-change; action status | | | single: action; status, last-rc-change | | | | | | Machine-local date/time, in seconds since epoch, at | - | | which the job first returned the current value of | + | | which the action first returned the current value of | | | ``rc-code``. For diagnostic purposes. | +------------------+----------------------------------------------------------+ | exec-time | .. index:: | | | single: exec-time; action status | | | single: action; status, exec-time | | | | - | | Time, in milliseconds, that the job was running for. | + | | Time, in milliseconds, that the action was running for. | | | For diagnostic purposes. | +------------------+----------------------------------------------------------+ | queue-time | .. index:: | | | single: queue-time; action status | | | single: action; status, queue-time | | | | - | | Time, in seconds, that the job was queued for in the | + | | Time, in seconds, that the action was queued for in the | | | local executor. For diagnostic purposes. | +------------------+----------------------------------------------------------+ | crm_feature_set | .. index:: | | | single: crm_feature_set; action status | | | single: action; status, crm_feature_set | | | | - | | The version which this job description conforms to. Used | - | | when processing ``op-digest``. | + | | The Pacemaker feature set used to record this entry. | +------------------+----------------------------------------------------------+ | transition-key | .. index:: | | | single: transition-key; action status | | | single: action; status, transition-key | | | | - | | A concatenation of the job's graph action number, the | + | | A concatenation of the action's graph action number, the | | | graph number, the expected result and the UUID of the | | | controller instance that scheduled it. This is used to | | | construct ``transition-magic`` (below). | @@ -254,13 +250,13 @@ this tuple are used to create the id of the ``lrm_rsc_op`` object. | | single: transition-magic; action status | | | single: action; status, transition-magic | | | | - | | A concatenation of the job's ``op-status``, ``rc-code`` | + | | A concatenation of ``op-status``, ``rc-code`` | | | and ``transition-key``. Guaranteed to be unique for the | | | life of the cluster (which ensures it is part of CIB | | | update notifications) and contains all the information | | | needed for the controller to correctly analyze and | - | | process the completed job. Most importantly, the | - | | decomposed elements tell the controller if the job | + | | process the completed action. Most importantly, the | + | | decomposed elements tell the controller if the history | | | entry was expected and whether it failed. | +------------------+----------------------------------------------------------+ | op-digest | .. index:: | @@ -268,7 +264,7 @@ this tuple are used to create the id of the ``lrm_rsc_op`` object. | | single: action; status, op-digest | | | | | | An MD5 sum representing the parameters passed to the | - | | job. Used to detect changes to the configuration, to | + | | action. Used to detect changes to the configuration, to | | | restart resources if necessary. | +------------------+----------------------------------------------------------+ | crm-debug-origin | .. index:: | @@ -296,7 +292,7 @@ ________________________________ last-rc-change="1239008085" exec-time="10" queue-time="0"/> -In the above example, the job is a non-recurring monitor operation +In the above example, the action is a non-recurring monitor operation often referred to as a "probe" for the ``apcstonith`` resource. The cluster schedules probes for every configured resource on a node when @@ -308,16 +304,16 @@ the 2nd graph produced by this instance of the controller (2668bbeb-06d5-40f9-936d-24cb7f87006a). The third field of the ``transition-key`` contains a 7, which indicates -that the job expects to find the resource inactive. By looking at the ``rc-code`` -property, we see that this was the case. +that the cluster expects to find the resource inactive. By looking at the +``rc-code`` property, we see that this was the case. -As that is the only job recorded for this node, we can conclude that +As that is the only action recorded for this node, we can conclude that the cluster started the resource elsewhere. Complex Operation History Example _________________________________ -.. topic:: Resource history of a ``pingd`` clone with multiple jobs +.. topic:: Resource history of a ``pingd`` clone with multiple entries .. code-block:: xml @@ -344,7 +340,7 @@ _________________________________ last-rc-change="1239008085" exec-time="20" queue-time="0"/> -When more than one job record exists, it is important to first sort +When more than one history entry exists, it is important to first sort them by ``call-id`` before interpreting them. Once sorted, the above example can be summarized as: @@ -354,7 +350,7 @@ Once sorted, the above example can be summarized as: #. A start operation returning 0 (success), with a ``call-id`` of 33 #. A recurring monitor returning 0 (success), with a ``call-id`` of 34 -The cluster processes each job record to build up a picture of the +The cluster processes each history entry to build up a picture of the resource's state. After the first and second entries, it is considered stopped, and after the third it considered active. diff --git a/doc/sphinx/Pacemaker_Explained/utilization.rst b/doc/sphinx/Pacemaker_Explained/utilization.rst index 93c67cd..87eef60 100644 --- a/doc/sphinx/Pacemaker_Explained/utilization.rst +++ b/doc/sphinx/Pacemaker_Explained/utilization.rst @@ -4,19 +4,19 @@ Utilization and Placement Strategy ---------------------------------- Pacemaker decides where to place a resource according to the resource -allocation scores on every node. The resource will be allocated to the +assignment scores on every node. The resource will be assigned to the node where the resource has the highest score. -If the resource allocation scores on all the nodes are equal, by the default +If the resource assignment scores on all the nodes are equal, by the default placement strategy, Pacemaker will choose a node with the least number of -allocated resources for balancing the load. If the number of resources on each +assigned resources for balancing the load. If the number of resources on each node is equal, the first eligible node listed in the CIB will be chosen to run the resource. Often, in real-world situations, different resources use significantly different proportions of a node's capacities (memory, I/O, etc.). We cannot balance the load ideally just according to the number of resources -allocated to a node. Besides, if resources are placed such that their combined +assigned to a node. Besides, if resources are placed such that their combined requirements exceed the provided capacity, they may fail to start completely or run with degraded performance. @@ -119,7 +119,7 @@ Four values are available for the ``placement-strategy``: * **default** Utilization values are not taken into account at all. - Resources are allocated according to allocation scores. If scores are equal, + Resources are assigned according to assignment scores. If scores are equal, resources are evenly distributed across nodes. * **utilization** @@ -127,7 +127,7 @@ Four values are available for the ``placement-strategy``: Utilization values are taken into account *only* when deciding whether a node is considered eligible (i.e. whether it has sufficient free capacity to satisfy the resource's requirements). Load-balancing is still done based on the - number of resources allocated to a node. + number of resources assigned to a node. * **balanced** @@ -152,11 +152,11 @@ Now Pacemaker will ensure the load from your resources will be distributed evenly throughout the cluster, without the need for convoluted sets of colocation constraints. -Allocation Details +Assignment Details ################## -Which node is preferred to get consumed first when allocating resources? -________________________________________________________________________ +Which node is preferred to get consumed first when assigning resources? +_______________________________________________________________________ * The node with the highest node weight gets consumed first. Node weight is a score maintained by the cluster to represent node health. @@ -164,18 +164,18 @@ ________________________________________________________________________ * If multiple nodes have the same node weight: * If ``placement-strategy`` is ``default`` or ``utilization``, - the node that has the least number of allocated resources gets consumed first. + the node that has the least number of assigned resources gets consumed first. - * If their numbers of allocated resources are equal, + * If their numbers of assigned resources are equal, the first eligible node listed in the CIB gets consumed first. * If ``placement-strategy`` is ``balanced``, the node that has the most free capacity gets consumed first. * If the free capacities of the nodes are equal, - the node that has the least number of allocated resources gets consumed first. + the node that has the least number of assigned resources gets consumed first. - * If their numbers of allocated resources are equal, + * If their numbers of assigned resources are equal, the first eligible node listed in the CIB gets consumed first. * If ``placement-strategy`` is ``minimal``, @@ -201,17 +201,17 @@ Which resource is preferred to be assigned first? _________________________________________________ * The resource that has the highest ``priority`` (see :ref:`resource_options`) gets - allocated first. + assigned first. * If their priorities are equal, check whether they are already running. The - resource that has the highest score on the node where it's running gets allocated + resource that has the highest score on the node where it's running gets assigned first, to prevent resource shuffling. * If the scores above are equal or the resources are not running, the resource has - the highest score on the preferred node gets allocated first. + the highest score on the preferred node gets assigned first. * If the scores above are equal, the first runnable resource listed in the CIB - gets allocated first. + gets assigned first. Limitations and Workarounds ########################### @@ -233,9 +233,9 @@ services stopped. In the contrived example at the start of this chapter: -* ``rsc-small`` would be allocated to ``node1`` +* ``rsc-small`` would be assigned to ``node1`` -* ``rsc-medium`` would be allocated to ``node2`` +* ``rsc-medium`` would be assigned to ``node2`` * ``rsc-large`` would remain inactive diff --git a/doc/sphinx/Pacemaker_Remote/alternatives.rst b/doc/sphinx/Pacemaker_Remote/alternatives.rst index 83ed67c..adbdc99 100644 --- a/doc/sphinx/Pacemaker_Remote/alternatives.rst +++ b/doc/sphinx/Pacemaker_Remote/alternatives.rst @@ -78,13 +78,8 @@ using virtual machines. Key differences: technology -- for example, the ``libvirt-daemon-lxc`` package to get the `libvirt-lxc `_ driver for LXC containers. -* Libvirt XML definitions must be generated for the containers. The - ``pacemaker-cts`` package includes a script for this purpose, - ``/usr/share/pacemaker/tests/cts/lxc_autogen.sh``. Run it with the - ``--help`` option for details on how to use it. It is intended for testing - purposes only, and hardcodes various parameters that would need to be set - appropriately in real usage. Of course, you can create XML definitions - manually, following the appropriate libvirt driver documentation. +* Libvirt XML definitions must be generated for the containers. You can create + XML definitions manually, following the appropriate libvirt driver documentation. * To share the authentication key, either share the host's ``/etc/pacemaker`` directory with the container, or copy the key into the container's diff --git a/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst b/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst index a3c0fbe..7c23bd6 100644 --- a/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst +++ b/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst @@ -109,7 +109,7 @@ Start and enable the ``pcsd`` daemon on the remote node. [root@remote1 ~]# systemctl enable pcsd Created symlink /etc/systemd/system/multi-user.target.wants/pcsd.service → /usr/lib/systemd/system/pcsd.service. -Next, set a password for the ``hacluster`` user on the remote node +Next, set a password for the |CRM_DAEMON_USER| user on the remote node .. code-block:: none diff --git a/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst b/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst index 253149e..ef09882 100644 --- a/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst +++ b/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst @@ -254,7 +254,7 @@ Start and enable the ``pcsd`` daemon on the guest. [root@guest1 ~]# systemctl enable pcsd Created symlink /etc/systemd/system/multi-user.target.wants/pcsd.service → /usr/lib/systemd/system/pcsd.service. -Next, set a password for the ``hacluster`` user on the guest. +Next, set a password for the |CRM_DAEMON_USER| user on the guest. .. code-block:: none diff --git a/doc/sphinx/conf.py.in b/doc/sphinx/conf.py.in index 7d843d8..556eb72 100644 --- a/doc/sphinx/conf.py.in +++ b/doc/sphinx/conf.py.in @@ -30,6 +30,16 @@ doc_license += " version 4.0 or later (CC-BY-SA v4.0+)" rst_prolog=""" .. |CFS_DISTRO| replace:: AlmaLinux .. |CFS_DISTRO_VER| replace:: 9 +.. |CRM_BLACKBOX_DIR| replace:: ``%CRM_BLACKBOX_DIR%`` +.. |CRM_DAEMON_GROUP| replace:: ``%CRM_DAEMON_GROUP%`` +.. |CRM_DAEMON_USER| replace:: ``%CRM_DAEMON_USER%`` +.. |CRM_DAEMON_USER_RAW| replace:: %CRM_DAEMON_USER% +.. |CRM_SCHEMA_DIRECTORY| replace:: %CRM_SCHEMA_DIRECTORY% +.. |PCMK_AUTHKEY_FILE| replace:: %PACEMAKER_CONFIG_DIR%/authkey +.. |PCMK_CONFIG_FILE| replace:: ``%CONFIGDIR%/pacemaker`` +.. |PCMK_INIT_ENV_FILE| replace:: ``%PACEMAKER_CONFIG_DIR%/pcmk-init.env`` +.. |PCMK_LOG_FILE| replace:: %CRM_LOG_DIR%/pacemaker.log +.. |PCMK_GNUTLS_PRIORITIES| replace:: %PCMK_GNUTLS_PRIORITIES% .. |REMOTE_DISTRO| replace:: AlmaLinux .. |REMOTE_DISTRO_VER| replace:: 9 """ diff --git a/etc/Makefile.am b/etc/Makefile.am index b810f82..b90bb50 100644 --- a/etc/Makefile.am +++ b/etc/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2021-2022 the Pacemaker project contributors +# Copyright 2021-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -10,7 +10,8 @@ MAINTAINERCLEANFILES = Makefile.in configdir = @CONFIGDIR@ -CONFIGS = crm_mon pacemaker +CONFIGS = crm_mon \ + pacemaker if !BUILD_SYSTEMD initdir = $(INITDIR) @@ -23,6 +24,7 @@ logrotate_DATA = logrotate.d/pacemaker EXTRA_DIST = $(foreach f,$(CONFIGS),sysconfig/$(f)) # Don't overwrite user's existing config files +.PHONY: install-data-local install-data-local: $(AM_V_at)$(MKDIR_P) $(DESTDIR)$(configdir) $(AM_V_at)for f in $(CONFIGS); do \ @@ -31,6 +33,7 @@ install-data-local: $(INSTALL_DATA) "$(srcdir)/sysconfig/$$f" "$$dest"; \ done +.PHONY: uninstall-local uninstall-local: $(AM_V_at)for f in $(CONFIGS); do \ dest="$(DESTDIR)$(configdir)/$$f"; \ diff --git a/etc/sysconfig/pacemaker.in b/etc/sysconfig/pacemaker.in index 3b03ad6..0c3609d 100644 --- a/etc/sysconfig/pacemaker.in +++ b/etc/sysconfig/pacemaker.in @@ -81,6 +81,17 @@ # Default: PCMK_debug="no" # Example: PCMK_debug="pacemakerd,pacemaker-execd" +# PCMK_stderr (Advanced Use Only) +# +# Whether to send daemon log messages to stderr. This would be useful only +# during troubleshooting, when starting Pacemaker manually on the command line. +# +# Setting this option in this file is pointless, since this file is not read +# when starting Pacemaker manually. However, it can be set directly as an +# environment variable on the command line. +# +# Default: PCMK_stderr="no" + # PCMK_trace_functions (Advanced Use Only) # # Send debug and trace severity messages from these (comma-separated) @@ -137,18 +148,24 @@ # Example: PCMK_trace_blackbox="remote.c:144,remote.c:149" -## Node start state +## Option overrides # PCMK_node_start_state # # By default, the local host will join the cluster in an online or standby # state when Pacemaker first starts depending on whether it was previously put # into standby mode. If this variable is set to "standby" or "online", it will -# force the local host to join in the specified state. This has no effect on -# Pacemaker Remote nodes. +# force the local host to join in the specified state. # # Default: PCMK_node_start_state="default" +# PCMK_node_action_limit +# +# Specify the maximum number of jobs that can be scheduled on this node. If set, +# this overrides the node-action-limit cluster property for this node. +# +# Default: PCMK_node_action_limit="" + ## Crash Handling @@ -179,8 +196,8 @@ # # Use the contents of this file as the authorization key to use with Pacemaker # Remote connections. This file must be readable by Pacemaker daemons (that is, -# it must allow read permissions to either the hacluster user or the haclient -# group), and its contents must be identical on all nodes. +# it must allow read permissions to either the @CRM_DAEMON_USER@ user or the +# @CRM_DAEMON_GROUP@ group), and its contents must be identical on all nodes. # # Default: PCMK_authkey_location="@PACEMAKER_CONFIG_DIR@/authkey" @@ -203,6 +220,30 @@ # # Default: PCMK_remote_port="3121" +# PCMK_remote_pid1 (Advanced Use Only) +# +# When a bundle resource's "run-command" option is left to default, Pacemaker +# Remote runs as PID 1 in the bundle's containers. When it does so, it loads +# environment variables from the container's +# @PACEMAKER_CONFIG_DIR@/pcmk-init.env and performs the PID 1 responsibility of +# reaping dead subprocesses. +# +# This option controls whether those actions are performed when Pacemaker +# Remote is not running as PID 1. It is intended primarily for developer testing +# but can be useful when "run-command" is set to a separate, custom PID 1 +# process that launches Pacemaker Remote. +# +# * If set to "full", Pacemaker Remote loads environment variables from +# @PACEMAKER_CONFIG_DIR@/pcmk-init.env and reaps dead subprocesses. +# * If set to "vars", Pacemaker Remote loads environment variables from +# @PACEMAKER_CONFIG_DIR@/pcmk-init.env but does not reap dead subprocesses. +# * If set to "default", Pacemaker Remote performs neither action. +# +# If Pacemaker Remote is running as PID 1, this option is ignored, and the +# behavior is the same as for "full". +# +# Default: PCMK_remote_pid1="default" + # PCMK_tls_priorities (Advanced Use Only) # # These GnuTLS cipher priorities will be used for TLS connections (whether for @@ -235,7 +276,7 @@ # the value must be lowered in order for the client's GnuTLS library to accept # a connection to an older server. # -# Default: PCMK_dh_min_bits="1024" +# Default: PCMK_dh_min_bits="0" (no minimum) # PCMK_dh_max_bits (Advanced Use Only) # @@ -252,7 +293,7 @@ # # Clients do not use PCMK_dh_max_bits. # -# Default: PCMK_dh_max_bits="2048" +# Default: PCMK_dh_max_bits="0" (no maximum) ## Inter-process Communication @@ -277,6 +318,19 @@ # Default: PCMK_ipc_buffer="131072" +## Cluster type + +# PCMK_cluster_type (Advanced Use Only) +# +# Specify the cluster layer to be used. If unset, Pacemaker will detect and use +# a supported cluster layer, if available. Currently, "corosync" is the only +# supported cluster layer. If multiple layers are supported in the future, this +# will allow overriding Pacemaker's automatic detection to select a specific +# one. +# +# Default: PCMK_cluster_type="" + + ## Developer Options # PCMK_schema_directory (Advanced Use Only) diff --git a/include/Makefile.am b/include/Makefile.am index dfd7085..6618c7a 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2003-2019 the Pacemaker project contributors +# Copyright 2003-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -7,14 +7,15 @@ # or later (GPLv2+) WITHOUT ANY WARRANTY. # -MAINTAINERCLEANFILES = Makefile.in config.h.in +MAINTAINERCLEANFILES = Makefile.in \ + config.h.in -noinst_HEADERS = config.h \ - crm_internal.h \ - doxygen.h \ - pacemaker.h \ - pacemaker-internal.h \ - portability.h \ +noinst_HEADERS = config.h \ + crm_internal.h \ + doxygen.h \ + pacemaker.h \ + pacemaker-internal.h \ + portability.h \ gettext.h pkginclude_HEADERS = crm_config.h @@ -24,16 +25,17 @@ SUBDIRS = crm pcmki GETTEXT_H ?= $(datadir)/gettext/gettext.h +.PHONY: update-gettext update-gettext: @if [ ! -e "$(GETTEXT_H)" ]; then \ echo "$(GETTEXT_H) not found"; \ else \ cp "$(GETTEXT_H)" gettext.h; \ - git diff --quiet gettext.h 2>/dev/null; \ + "$(GIT)" diff --quiet gettext.h 2>/dev/null; \ if [ $$? -eq 0 ]; then \ echo "No update needed"; \ else \ - git add gettext.h; \ + "$(GIT)" add gettext.h; \ echo 'Review changes then run:'; \ echo 'git commit -m "Low: NLS: update gettext.h from upstream"'; \ fi \ diff --git a/include/crm/Makefile.am b/include/crm/Makefile.am index 6dd52fd..95564b8 100644 --- a/include/crm/Makefile.am +++ b/include/crm/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2021 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -11,12 +11,23 @@ MAINTAINERCLEANFILES = Makefile.in headerdir=$(pkgincludedir)/crm -header_HEADERS = cib.h cluster.h compatibility.h crm.h \ - lrmd.h msg_xml.h services.h stonith-ng.h \ +header_HEADERS = cib.h \ + cluster.h \ + compatibility.h \ + crm.h \ crm_compat.h \ + lrmd.h \ + lrmd_events.h \ + msg_xml.h \ msg_xml_compat.h \ - services_compat.h + services.h \ + services_compat.h \ + stonith-ng.h -noinst_HEADERS = lrmd_internal.h services_internal.h +noinst_HEADERS = $(wildcard *_internal.h) -SUBDIRS = common pengine cib fencing cluster +SUBDIRS = common \ + pengine \ + cib \ + fencing \ + cluster diff --git a/include/crm/cib/cib_types.h b/include/crm/cib/cib_types.h index 5bd10e4..a803311 100644 --- a/include/crm/cib/cib_types.h +++ b/include/crm/cib/cib_types.h @@ -59,12 +59,54 @@ enum cib_call_options { cib_discard_reply = (1 << 4), cib_no_children = (1 << 5), cib_xpath_address = (1 << 6), + + //! \deprecated This value will be removed in a future release cib_mixed_update = (1 << 7), + + /* @COMPAT: cib_scope_local is processed only in the legacy function + * parse_local_options_v1(). + * + * If (host == NULL): + * * In legacy mode, the CIB manager forwards a request to the primary + * instance unless cib_scope_local is set or the local node is primary. + * * Outside of legacy mode: + * * If a request modifies the CIB, the CIB manager forwards it to all + * nodes. + * * Otherwise, the CIB manager processes the request locally. + * + * There is no current use case for this implementing this flag in + * non-legacy mode. + */ + + //! \deprecated This value will be removed in a future release cib_scope_local = (1 << 8), + cib_dryrun = (1 << 9), + + /*! + * \brief Process request when the client commits the active transaction + * + * Add the request to the client's active transaction instead of processing + * it immediately. If the client has no active transaction, or if the + * request is not supported in transactions, the call will fail. + * + * The request is added to the transaction synchronously, and the return + * value indicates whether it was added successfully. + * + * Refer to \p cib_api_operations_t:init_transaction() and + * \p cib_api_operations_t:end_transaction() for more details on CIB + * transactions. + */ + cib_transaction = (1 << 10), + cib_sync_call = (1 << 12), cib_no_mtime = (1 << 13), + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated This value will be removed in a future release cib_zero_copy = (1 << 14), +#endif // !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + cib_inhibit_notify = (1 << 16), #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) @@ -82,13 +124,19 @@ typedef struct cib_s cib_t; typedef struct cib_api_operations_s { int (*signon) (cib_t *cib, const char *name, enum cib_conn_type type); + + //! \deprecated This method will be removed and should not be used int (*signon_raw) (cib_t *cib, const char *name, enum cib_conn_type type, int *event_fd); + int (*signoff) (cib_t *cib); int (*free) (cib_t *cib); + + //! \deprecated This method will be removed and should not be used int (*set_op_callback) (cib_t *cib, void (*callback) (const xmlNode *msg, int callid, int rc, xmlNode *output)); + int (*add_notify_callback) (cib_t *cib, const char *event, void (*callback) (const char *event, xmlNode *msg)); @@ -97,8 +145,13 @@ typedef struct cib_api_operations_s { xmlNode *msg)); int (*set_connection_dnotify) (cib_t *cib, void (*dnotify) (gpointer user_data)); + + //! \deprecated This method will be removed and should not be used int (*inputfd) (cib_t *cib); + + //! \deprecated This method will be removed and should not be used int (*noop) (cib_t *cib, int call_options); + int (*ping) (cib_t *cib, xmlNode **output_data, int call_options); int (*query) (cib_t *cib, const char *section, xmlNode **output_data, int call_options); @@ -141,7 +194,9 @@ typedef struct cib_api_operations_s { int (*delete_absolute) (cib_t *cib, const char *section, xmlNode *data, int call_options); + //! \deprecated This method is not implemented and should not be used int (*quit) (cib_t *cib, int call_options); + int (*register_notification) (cib_t *cib, const char *callback, int enabled); gboolean (*register_callback) (cib_t *cib, int call_id, int timeout, @@ -190,14 +245,85 @@ typedef struct cib_api_operations_s { * * \return Legacy Pacemaker return code * - * \note The client IDs are assigned by \p pacemaker-based when the client - * connects. \p cib_t variants that don't connect to - * \p pacemaker-based may never be assigned a client ID. * \note Some variants may have only one client for both asynchronous and * synchronous requests. */ int (*client_id)(const cib_t *cib, const char **async_id, const char **sync_id); + + /*! + * \brief Initiate an atomic CIB transaction for this client + * + * If the client has initiated a transaction and a new request's call + * options contain \p cib_transaction, the new request is appended to the + * transaction for later processing. + * + * Supported requests are those that meet the following conditions: + * * can be processed synchronously (with any changes applied to a working + * CIB copy) + * * are not queries + * * do not involve other nodes + * * do not affect the state of pacemaker-based itself + * + * Currently supported CIB API functions include: + * * \p bump_epoch() + * * \p create() + * * \p erase() + * * \p modify() + * * \p remove() + * * \p replace() + * * \p upgrade() + * + * Because the transaction is atomic, individual requests do not trigger + * callbacks or notifications when they are processed, and they do not + * receive output XML. The commit request itself can trigger callbacks and + * notifications if any are registered. + * + * An \c init_transaction() call is always synchronous. + * + * \param[in,out] cib CIB connection + * + * \return Legacy Pacemaker return code + */ + int (*init_transaction)(cib_t *cib); + + /*! + * \brief End and optionally commit this client's CIB transaction + * + * When a client commits a transaction, all requests in the transaction are + * processed in a FIFO manner until either a request fails or all requests + * have been processed. Changes are applied to a working copy of the CIB. + * If a request fails, the transaction and working CIB copy are discarded, + * and an error is returned. If all requests succeed, the working CIB copy + * replaces the initial CIB copy. + * + * Callbacks and notifications can be triggered by the commit request itself + * but not by the individual requests in a transaction. + * + * An \c end_transaction() call with \p commit set to \c false is always + * synchronous. + * + * \param[in,out] cib CIB connection + * \param[in] commit If \p true, commit transaction; otherwise, + * discard it + * \param[in] call_options Group of enum cib_call_options + * flags + * + * \return Legacy Pacemaker return code + */ + int (*end_transaction)(cib_t *cib, bool commit, int call_options); + + /*! + * \brief Set the user as whom all CIB requests via methods will be executed + * + * By default, the value of the \c CIB_user environment variable is used if + * set. Otherwise, \c root is used. + * + * \param[in,out] cib CIB connection + * \param[in] user Name of user whose permissions to use when + * processing requests + */ + void (*set_user)(cib_t *cib, const char *user); } cib_api_operations_t; struct cib_s { @@ -211,9 +337,16 @@ struct cib_s { void *delegate_fn; GList *notify_list; + + //! \deprecated This method will be removed in a future release void (*op_callback) (const xmlNode *msg, int call_id, int rc, xmlNode *output); + cib_api_operations_t *cmds; + + xmlNode *transaction; + + char *user; }; #ifdef __cplusplus diff --git a/include/crm/cib/internal.h b/include/crm/cib/internal.h index 374902b..20059ec 100644 --- a/include/crm/cib/internal.h +++ b/include/crm/cib/internal.h @@ -15,7 +15,6 @@ // Request types for CIB manager IPC/CPG #define PCMK__CIB_REQUEST_SECONDARY "cib_slave" -#define PCMK__CIB_REQUEST_ALL_SECONDARY "cib_slave_all" #define PCMK__CIB_REQUEST_PRIMARY "cib_master" #define PCMK__CIB_REQUEST_SYNC_TO_ALL "cib_sync" #define PCMK__CIB_REQUEST_SYNC_TO_ONE "cib_sync_one" @@ -32,6 +31,7 @@ #define PCMK__CIB_REQUEST_ABS_DELETE "cib_delete_alt" #define PCMK__CIB_REQUEST_NOOP "noop" #define PCMK__CIB_REQUEST_SHUTDOWN "cib_shutdown_req" +#define PCMK__CIB_REQUEST_COMMIT_TRANSACT "cib_commit_transact" # define F_CIB_CLIENTID "cib_clientid" # define F_CIB_CALLOPTS "cib_callopt" @@ -60,34 +60,72 @@ # define F_CIB_LOCAL_NOTIFY_ID "cib_local_notify_id" # define F_CIB_PING_ID "cib_ping_id" # define F_CIB_SCHEMA_MAX "cib_schema_max" -# define F_CIB_CHANGE_SECTION "cib_change_section" # define T_CIB "cib" +# define T_CIB_COMMAND "cib_command" # define T_CIB_NOTIFY "cib_notify" /* notify sub-types */ # define T_CIB_PRE_NOTIFY "cib_pre_notify" # define T_CIB_POST_NOTIFY "cib_post_notify" +# define T_CIB_TRANSACTION "cib_transaction" # define T_CIB_UPDATE_CONFIRM "cib_update_confirmation" -# define T_CIB_REPLACE_NOTIFY "cib_refresh_notify" /*! * \internal - * \enum cib_change_section_info - * \brief Flags to indicate which sections of the CIB have changed + * \enum cib__op_attr + * \brief Flags for CIB operation attributes */ -enum cib_change_section_info { - cib_change_section_none = 0, //!< No sections have changed - cib_change_section_nodes = (1 << 0), //!< The nodes section has changed - cib_change_section_alerts = (1 << 1), //!< The alerts section has changed - cib_change_section_status = (1 << 2), //!< The status section has changed +enum cib__op_attr { + cib__op_attr_none = 0, //!< No special attributes + cib__op_attr_modifies = (1 << 1), //!< Modifies CIB + cib__op_attr_privileged = (1 << 2), //!< Requires privileges + cib__op_attr_local = (1 << 3), //!< Must only be processed locally + cib__op_attr_replaces = (1 << 4), //!< Replaces CIB + cib__op_attr_writes_through = (1 << 5), //!< Writes to disk on success + cib__op_attr_transaction = (1 << 6), //!< Supported in a transaction }; +/*! + * \internal + * \enum cib__op_type + * \brief Types of CIB operations + */ +enum cib__op_type { + cib__op_abs_delete, + cib__op_apply_patch, + cib__op_bump, + cib__op_commit_transact, + cib__op_create, + cib__op_delete, + cib__op_erase, + cib__op_is_primary, + cib__op_modify, + cib__op_noop, + cib__op_ping, + cib__op_primary, + cib__op_query, + cib__op_replace, + cib__op_secondary, + cib__op_shutdown, + cib__op_sync_all, + cib__op_sync_one, + cib__op_upgrade, +}; gboolean cib_diff_version_details(xmlNode * diff, int *admin_epoch, int *epoch, int *updates, int *_admin_epoch, int *_epoch, int *_updates); gboolean cib_read_config(GHashTable * options, xmlNode * current_cib); +typedef int (*cib__op_fn_t)(const char *, int, const char *, xmlNode *, + xmlNode *, xmlNode *, xmlNode **, xmlNode **); + +typedef struct cib__operation_s { + const char *name; + enum cib__op_type type; + uint32_t flags; //!< Group of enum cib__op_attr flags +} cib__operation_t; + typedef struct cib_notify_client_s { const char *event; const char *obj_id; /* implement one day */ @@ -124,24 +162,66 @@ struct timer_rec_s { (flags_to_clear), #flags_to_clear); \ } while (0) -typedef int (*cib_op_t) (const char *, int, const char *, xmlNode *, - xmlNode *, xmlNode *, xmlNode **, xmlNode **); - cib_t *cib_new_variant(void); -int cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_query, - const char *section, xmlNode * req, xmlNode * input, - gboolean manage_counters, gboolean * config_changed, - xmlNode * current_cib, xmlNode ** result_cib, xmlNode ** diff, - xmlNode ** output); - -xmlNode *cib_create_op(int call_id, const char *op, const char *host, - const char *section, xmlNode * data, int call_options, - const char *user_name); +/*! + * \internal + * \brief Check whether a given CIB client's update should trigger a refresh + * + * Here, "refresh" means that Pacemaker daemons write out their current state. + * + * If a Pacemaker daemon or one of certain Pacemaker CLI tools modifies the CIB, + * we can assume that the CIB hasn't diverged from the true cluster state. A + * "safe" CLI tool requests that all relevant daemons update their state before + * the tool requests any CIB modifications directly. + * + * In contrast, other "unsafe" tools (for example, \c cibadmin and external + * tools) may request arbitrary CIB changes. + * + * A Pacemaker daemon can write out its current state to the CIB when it's + * notified of an update from an unsafe client, to ensure the CIB still contains + * the daemon's correct state. + * + * \param[in] name CIB client name + * + * \return \c true if the CIB client should trigger a refresh, or \c false + * otherwise + */ +static inline bool +cib__client_triggers_refresh(const char *name) +{ + return !crm_is_daemon_name(name) + && !pcmk__str_any_of(name, + "attrd_updater", + "crm_attribute", + "crm_node", + "crm_resource", + "crm_ticket", + NULL); +} + +int cib__get_notify_patchset(const xmlNode *msg, const xmlNode **patchset); + +bool cib__element_in_patchset(const xmlNode *patchset, const char *element); + +int cib_perform_op(const char *op, int call_options, cib__op_fn_t fn, + bool is_query, const char *section, xmlNode *req, + xmlNode *input, bool manage_counters, bool *config_changed, + xmlNode **current_cib, xmlNode **result_cib, xmlNode **diff, + xmlNode **output); + +int cib__create_op(cib_t *cib, const char *op, const char *host, + const char *section, xmlNode *data, int call_options, + const char *user_name, const char *client_name, + xmlNode **op_msg); + +int cib__extend_transaction(cib_t *cib, xmlNode *request); void cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc); void cib_native_notify(gpointer data, gpointer user_data); +int cib__get_operation(const char *op, const cib__operation_t **operation); + int cib_process_query(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer); diff --git a/include/crm/cluster.h b/include/crm/cluster.h index bceb9c2..b61fd70 100644 --- a/include/crm/cluster.h +++ b/include/crm/cluster.h @@ -78,6 +78,9 @@ typedef struct crm_peer_node_s { time_t peer_lost; char *conn_host; + + time_t when_member; // Since when node has been a cluster member + time_t when_online; // Since when peer has been online in CPG } crm_node_t; void crm_peer_init(void); @@ -133,8 +136,8 @@ enum crm_get_peer_flags { }; gboolean send_cluster_message(const crm_node_t *node, - enum crm_ais_msg_types service, xmlNode *data, - gboolean ordered); + enum crm_ais_msg_types service, + const xmlNode *data, gboolean ordered); int crm_remote_peer_cache_size(void); @@ -174,7 +177,6 @@ char *pcmk_message_common_cs(cpg_handle_t handle, uint32_t nodeid, uint32_t pid, const char *crm_peer_uuid(crm_node_t *node); const char *crm_peer_uname(const char *uuid); -void set_uuid(xmlNode *xml, const char *attr, crm_node_t *node); enum crm_status_type { crm_status_uname, diff --git a/include/crm/cluster/Makefile.am b/include/crm/cluster/Makefile.am index 96f2bd0..2500a87 100644 --- a/include/crm/cluster/Makefile.am +++ b/include/crm/cluster/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2012-2021 the Pacemaker project contributors +# Copyright 2012-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -10,5 +10,6 @@ MAINTAINERCLEANFILES = Makefile.in headerdir=$(pkgincludedir)/crm/cluster -noinst_HEADERS = internal.h election_internal.h +noinst_HEADERS = internal.h \ + $(wildcard *_internal.h) header_HEADERS = compat.h diff --git a/include/crm/cluster/compat.h b/include/crm/cluster/compat.h index 9bf14ee..89a03fd 100644 --- a/include/crm/cluster/compat.h +++ b/include/crm/cluster/compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2021 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,6 +10,9 @@ #ifndef PCMK__CRM_CLUSTER_COMPAT__H # define PCMK__CRM_CLUSTER_COMPAT__H +#include // xmlNode +#include // crm_node_t + #ifdef __cplusplus extern "C" { #endif @@ -30,6 +33,9 @@ int crm_terminate_member(int nodeid, const char *uname, void *unused); int crm_terminate_member_no_mainloop(int nodeid, const char *uname, int *connection); +// \deprecated Use crm_xml_add(xml, attr, crm_peer_uuid(node)) instead +void set_uuid(xmlNode *xml, const char *attr, crm_node_t *node); + #ifdef __cplusplus } #endif diff --git a/include/crm/cluster/internal.h b/include/crm/cluster/internal.h index 9bc57c6..e20ee4c 100644 --- a/include/crm/cluster/internal.h +++ b/include/crm/cluster/internal.h @@ -124,10 +124,16 @@ void pcmk__corosync_quorum_connect(gboolean (*dispatch)(unsigned long long, void (*destroy) (gpointer)); crm_node_t *pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags); -crm_node_t *pcmk__search_cluster_node_cache(unsigned int id, const char *uname); +crm_node_t *pcmk__search_cluster_node_cache(unsigned int id, const char *uname, + const char *uuid); void pcmk__refresh_node_caches_from_cib(xmlNode *cib); crm_node_t *pcmk__search_known_node_cache(unsigned int id, const char *uname, uint32_t flags); +crm_node_t *pcmk__get_peer(unsigned int id, const char *uname, + const char *uuid); +crm_node_t *pcmk__get_peer_full(unsigned int id, const char *uname, + const char *uuid, int flags); + #endif diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am index 7d417e4..83a4197 100644 --- a/include/crm/common/Makefile.am +++ b/include/crm/common/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2022 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -11,45 +11,34 @@ MAINTAINERCLEANFILES = Makefile.in headerdir=$(pkgincludedir)/crm/common -header_HEADERS = acl.h \ - agents.h \ - agents_compat.h \ - cib.h \ - ipc.h \ - ipc_attrd_internal.h \ - ipc_controld.h \ - ipc_pacemakerd.h \ - ipc_schedulerd.h \ - iso8601.h \ - logging.h \ - logging_compat.h \ - mainloop.h \ - mainloop_compat.h \ - nvpair.h \ - output.h \ - results.h \ - results_compat.h \ - util.h \ - util_compat.h \ - xml.h \ - xml_compat.h +header_HEADERS = acl.h \ + actions.h \ + agents.h \ + agents_compat.h \ + cib.h \ + ipc.h \ + ipc_controld.h \ + ipc_pacemakerd.h \ + ipc_schedulerd.h \ + iso8601.h \ + logging.h \ + logging_compat.h \ + mainloop.h \ + mainloop_compat.h \ + nodes.h \ + nvpair.h \ + output.h \ + resources.h \ + results.h \ + results_compat.h \ + roles.h \ + scheduler.h \ + scheduler_types.h \ + tags.h \ + tickets.h \ + util.h \ + util_compat.h \ + xml.h \ + xml_compat.h -noinst_HEADERS = acl_internal.h \ - alerts_internal.h \ - attrd_internal.h \ - cmdline_internal.h \ - health_internal.h \ - internal.h \ - io_internal.h \ - ipc_internal.h \ - iso8601_internal.h \ - lists_internal.h \ - logging_internal.h \ - messages_internal.h \ - options_internal.h \ - output_internal.h \ - remote_internal.h \ - results_internal.h \ - strings_internal.h \ - unittest_internal.h \ - xml_internal.h +noinst_HEADERS = $(wildcard *internal.h) diff --git a/include/crm/common/action_relation_internal.h b/include/crm/common/action_relation_internal.h new file mode 100644 index 0000000..e789131 --- /dev/null +++ b/include/crm/common/action_relation_internal.h @@ -0,0 +1,132 @@ +/* + * Copyright 2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_ACTION_RELATION_INTERNAL__H +# define PCMK__CRM_COMMON_ACTION_RELATION_INTERNAL__H + +/*! + * Flags to indicate the relationship between two actions + * + * @COMPAT The values and semantics of these flags should not be changed until + * the deprecated enum pe_ordering is dropped from the public API. + */ +enum pcmk__action_relation_flags { + //! No relation (compare with equality rather than bit set) + pcmk__ar_none = 0U, + + //! Actions are ordered (optionally, if no other flags are set) + pcmk__ar_ordered = (1U << 0), + + //! Relation applies only if 'first' cannot be part of a live migration + pcmk__ar_if_first_unmigratable = (1U << 1), + + /*! + * If 'then' is required, 'first' becomes required (and becomes unmigratable + * if 'then' is); also, if 'first' is a stop of a blocked resource, 'then' + * becomes unrunnable + */ + pcmk__ar_then_implies_first = (1U << 4), + + /*! + * If 'first' is required, 'then' becomes required; if 'first' is a stop of + * a blocked resource, 'then' becomes unrunnable + */ + pcmk__ar_first_implies_then = (1U << 5), + + /*! + * If 'then' is required and for a promoted instance, 'first' becomes + * required (and becomes unmigratable if 'then' is) + */ + pcmk__ar_promoted_then_implies_first = (1U << 6), + + /*! + * 'first' is runnable only if 'then' is both runnable and migratable, + * and 'first' becomes required if 'then' is + */ + pcmk__ar_unmigratable_then_blocks = (1U << 7), + + //! 'then' is runnable (and migratable) only if 'first' is runnable + pcmk__ar_unrunnable_first_blocks = (1U << 8), + + //! If 'first' is unrunnable, 'then' becomes a real, unmigratable action + pcmk__ar_first_else_then = (1U << 9), + + //! If 'first' is required, 'then' action for instance on same node is + pcmk__ar_first_implies_same_node_then = (1U << 10), + + /*! + * Disable relation if 'first' is unrunnable and for an active resource, + * otherwise order actions and make 'then' unrunnable if 'first' is. + * + * This is used to order a bundle replica's start of its container before a + * probe of its remote connection resource, in case the connection uses the + * REMOTE_CONTAINER_HACK to replace the connection address with where the + * container is running. + */ + pcmk__ar_nested_remote_probe = (1U << 11), + + /*! + * If 'first' is for a blocked resource, make 'then' unrunnable. + * + * If 'then' is required, make 'first' required, make 'first' unmigratable + * if 'then' is unmigratable, and make 'then' unrunnable if 'first' is + * unrunnable. + * + * If 'then' is unrunnable and for the same resource as 'first', make + * 'first' required if it is runnable, and make 'first' unmigratable if + * 'then' is unmigratable. + * + * This is used for "stop then start primitive" (restarts) and + * "stop group member then stop previous member". + */ + pcmk__ar_intermediate_stop = (1U << 12), + + /*! + * The actions must be serialized if in the same transition but can be in + * either order. (In practice, we always arrange them as 'first' then + * 'then', so they end up being essentially the same as optional orderings.) + * + * @TODO Handle more intelligently -- for example, we could schedule the + * action with the fewest inputs first, so we're more likely to execute at + * least one if there is a failure during the transition. Or, we could + * prefer certain action types over others, or base it on resource priority. + */ + pcmk__ar_serialize = (1U << 14), + + //! Relation applies only if actions are on same node + pcmk__ar_if_on_same_node = (1U << 15), + + //! If 'then' is required, 'first' must be added to the transition graph + pcmk__ar_then_implies_first_graphed = (1U << 16), + + //! If 'first' is required and runnable, 'then' must be in graph + pcmk__ar_first_implies_then_graphed = (1U << 17), + + //! User-configured asymmetric ordering + pcmk__ar_asymmetric = (1U << 20), + + //! Actions are ordered if on same node (or migration target for migrate_to) + pcmk__ar_if_on_same_node_or_target = (1U << 21), + + //! 'then' action is runnable if certain number of 'first' instances are + pcmk__ar_min_runnable = (1U << 22), + + //! Ordering applies only if 'first' is required and on same node as 'then' + pcmk__ar_if_required_on_same_node = (1U << 23), + + //! Ordering applies even if 'first' runs on guest node created by 'then' + pcmk__ar_guest_allowed = (1U << 24), + + //! If 'then' action becomes required, 'first' becomes optional + pcmk__ar_then_cancels_first = (1U << 25), +}; + +typedef struct pe_action_wrapper_s pcmk__related_action_t; + +#endif // PCMK__CRM_COMMON_ACTION_RELATION_INTERNAL__H diff --git a/include/crm/common/actions.h b/include/crm/common/actions.h new file mode 100644 index 0000000..5d2784d --- /dev/null +++ b/include/crm/common/actions.h @@ -0,0 +1,467 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_ACTIONS__H +#define PCMK__CRM_COMMON_ACTIONS__H + +#include // bool +#include // strcasecmp() +#include // gboolean, guint +#include // xmlNode + +#include // lrmd_event_data_t + +#include // GList, GHashTable +#include // xmlNode + +#include +#include // enum rsc_start_requirement, etc. +#include // pcmk_resource_t, pcmk_node_t + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief APIs related to actions + * \ingroup core + */ + +//! Default timeout (in milliseconds) for non-metadata actions +#define PCMK_DEFAULT_ACTION_TIMEOUT_MS 20000 + +// @COMPAT We don't need a separate timeout for metadata, much less a longer one +//! \deprecated Default timeout (in milliseconds) for metadata actions +#define PCMK_DEFAULT_METADATA_TIMEOUT_MS 30000 + +// Action names as strings +#define PCMK_ACTION_CANCEL "cancel" +#define PCMK_ACTION_CLEAR_FAILCOUNT "clear_failcount" +#define PCMK_ACTION_CLONE_ONE_OR_MORE "clone-one-or-more" +#define PCMK_ACTION_DELETE "delete" +#define PCMK_ACTION_DEMOTE "demote" +#define PCMK_ACTION_DEMOTED "demoted" +#define PCMK_ACTION_DO_SHUTDOWN "do_shutdown" +#define PCMK_ACTION_LIST "list" +#define PCMK_ACTION_LRM_DELETE "lrm_delete" +#define PCMK_ACTION_LOAD_STOPPED "load_stopped" +#define PCMK_ACTION_MAINTENANCE_NODES "maintenance_nodes" +#define PCMK_ACTION_META_DATA "meta-data" +#define PCMK_ACTION_MIGRATE_FROM "migrate_from" +#define PCMK_ACTION_MIGRATE_TO "migrate_to" +#define PCMK_ACTION_MONITOR "monitor" +#define PCMK_ACTION_NOTIFIED "notified" +#define PCMK_ACTION_NOTIFY "notify" +#define PCMK_ACTION_OFF "off" +#define PCMK_ACTION_ON "on" +#define PCMK_ACTION_ONE_OR_MORE "one-or-more" +#define PCMK_ACTION_PROMOTE "promote" +#define PCMK_ACTION_PROMOTED "promoted" +#define PCMK_ACTION_REBOOT "reboot" +#define PCMK_ACTION_RELOAD "reload" +#define PCMK_ACTION_RELOAD_AGENT "reload-agent" +#define PCMK_ACTION_RUNNING "running" +#define PCMK_ACTION_START "start" +#define PCMK_ACTION_STATUS "status" +#define PCMK_ACTION_STONITH "stonith" +#define PCMK_ACTION_STOP "stop" +#define PCMK_ACTION_STOPPED "stopped" +#define PCMK_ACTION_VALIDATE_ALL "validate-all" + +//! Possible actions (including some pseudo-actions) +enum action_tasks { + pcmk_action_unspecified = 0, //!< Unspecified or unknown action + pcmk_action_monitor, //!< Monitor + + // Each "completed" action must be the regular action plus 1 + + pcmk_action_stop, //!< Stop + pcmk_action_stopped, //!< Stop completed + + pcmk_action_start, //!< Start + pcmk_action_started, //!< Start completed + + pcmk_action_notify, //!< Notify + pcmk_action_notified, //!< Notify completed + + pcmk_action_promote, //!< Promote + pcmk_action_promoted, //!< Promoted + + pcmk_action_demote, //!< Demote + pcmk_action_demoted, //!< Demoted + + pcmk_action_shutdown, //!< Shut down node + pcmk_action_fence, //!< Fence node + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_action_unspecified instead + no_action = pcmk_action_unspecified, + + //! \deprecated Use pcmk_action_monitor instead + monitor_rsc = pcmk_action_monitor, + + //! \deprecated Use pcmk_action_stop instead + stop_rsc = pcmk_action_stop, + + //! \deprecated Use pcmk_action_stopped instead + stopped_rsc = pcmk_action_stopped, + + //! \deprecated Use pcmk_action_start instead + start_rsc = pcmk_action_start, + + //! \deprecated Use pcmk_action_started instead + started_rsc = pcmk_action_started, + + //! \deprecated Use pcmk_action_notify instead + action_notify = pcmk_action_notify, + + //! \deprecated Use pcmk_action_notified instead + action_notified = pcmk_action_notified, + + //! \deprecated Use pcmk_action_promote instead + action_promote = pcmk_action_promote, + + //! \deprecated Use pcmk_action_promoted instead + action_promoted = pcmk_action_promoted, + + //! \deprecated Use pcmk_action_demote instead + action_demote = pcmk_action_demote, + + //! \deprecated Use pcmk_action_demoted instead + action_demoted = pcmk_action_demoted, + + //! \deprecated Use pcmk_action_shutdown instead + shutdown_crm = pcmk_action_shutdown, + + //! \deprecated Use pcmk_action_fence instead + stonith_node = pcmk_action_fence, +#endif +}; + +//! Possible responses to a resource action failure +enum action_fail_response { + /* The order is (partially) significant here; the values from + * pcmk_on_fail_ignore through pcmk_on_fail_fence_node are in order of + * increasing severity. + * + * @COMPAT The values should be ordered and numbered per the "TODO" comments + * below, so all values are in order of severity and there is room for + * future additions, but that would break API compatibility. + * @TODO For now, we just use a function to compare the values specially, but + * at the next compatibility break, we should arrange things + * properly so we can compare with less than and greater than. + */ + + // @TODO Define as 10 + pcmk_on_fail_ignore = 0, //!< Act as if failure didn't happen + + // @TODO Define as 30 + pcmk_on_fail_restart = 1, //!< Restart resource + + // @TODO Define as 60 + pcmk_on_fail_ban = 2, //!< Ban resource from current node + + // @TODO Define as 70 + pcmk_on_fail_block = 3, //!< Treat resource as unmanaged + + // @TODO Define as 80 + pcmk_on_fail_stop = 4, //!< Stop resource and leave stopped + + // @TODO Define as 90 + pcmk_on_fail_standby_node = 5, //!< Put resource's node in standby + + // @TODO Define as 100 + pcmk_on_fail_fence_node = 6, //!< Fence resource's node + + // @COMPAT Values below here are out of desired order for API compatibility + + // @TODO Define as 50 + pcmk_on_fail_restart_container = 7, //!< Restart resource's container + + // @TODO Define as 40 + /*! + * Fence the remote node created by the resource if fencing is enabled, + * otherwise attempt to restart the resource (used internally for some + * remote connection failures). + */ + pcmk_on_fail_reset_remote = 8, + + // @TODO Define as 20 + pcmk_on_fail_demote = 9, //!< Demote if promotable, else stop + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_on_fail_ignore instead + action_fail_ignore = pcmk_on_fail_ignore, + + //! \deprecated Use pcmk_on_fail_restart instead + action_fail_recover = pcmk_on_fail_restart, + + //! \deprecated Use pcmk_on_fail_ban instead + action_fail_migrate = pcmk_on_fail_ban, + + //! \deprecated Use pcmk_on_fail_block instead + action_fail_block = pcmk_on_fail_block, + + //! \deprecated Use pcmk_on_fail_stop instead + action_fail_stop = pcmk_on_fail_stop, + + //! \deprecated Use pcmk_on_fail_standby_node instead + action_fail_standby = pcmk_on_fail_standby_node, + + //! \deprecated Use pcmk_on_fail_fence_node instead + action_fail_fence = pcmk_on_fail_fence_node, + + //! \deprecated Use pcmk_on_fail_restart_container instead + action_fail_restart_container = pcmk_on_fail_restart_container, + + //! \deprecated Use pcmk_on_fail_reset_remote instead + action_fail_reset_remote = pcmk_on_fail_reset_remote, + + //! \deprecated Use pcmk_on_fail_demote instead + action_fail_demote = pcmk_on_fail_demote, +#endif +}; + +//! Action scheduling flags +enum pe_action_flags { + //! No action flags set (compare with equality rather than bit set) + pcmk_no_action_flags = 0, + + //! Whether action does not require invoking an agent + pcmk_action_pseudo = (1 << 0), + + //! Whether action is runnable + pcmk_action_runnable = (1 << 1), + + //! Whether action should not be executed + pcmk_action_optional = (1 << 2), + + //! Whether action should be added to transition graph even if optional + pcmk_action_always_in_graph = (1 << 3), + + //! Whether operation-specific instance attributes have been unpacked yet + pcmk_action_attrs_evaluated = (1 << 4), + + //! Whether action is allowed to be part of a live migration + pcmk_action_migratable = (1 << 7), + + //! Whether action has been added to transition graph + pcmk_action_added_to_graph = (1 << 8), + + //! Whether action is a stop to abort a dangling migration + pcmk_action_migration_abort = (1 << 11), + + /*! + * Whether action is an ordering point for minimum required instances + * (used to implement ordering after clones with clone-min configured, + * and ordered sets with require-all=false) + */ + pcmk_action_min_runnable = (1 << 12), + + //! Whether action is recurring monitor that must be rescheduled if active + pcmk_action_reschedule = (1 << 13), + + //! Whether action has already been processed by a recursive procedure + pcmk_action_detect_loop = (1 << 14), + + //! Whether action's inputs have been de-duplicated yet + pcmk_action_inputs_deduplicated = (1 << 15), + + //! Whether action can be executed on DC rather than own node + pcmk_action_on_dc = (1 << 16), + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_action_pseudo instead + pe_action_pseudo = pcmk_action_pseudo, + + //! \deprecated Use pcmk_action_runnable instead + pe_action_runnable = pcmk_action_runnable, + + //! \deprecated Use pcmk_action_optional instead + pe_action_optional = pcmk_action_optional, + + //! \deprecated Use pcmk_action_always_in_graph instead + pe_action_print_always = pcmk_action_always_in_graph, + + //! \deprecated Use pcmk_action_attrs_evaluated instead + pe_action_have_node_attrs = pcmk_action_attrs_evaluated, + + //! \deprecated Do not use + pe_action_implied_by_stonith = (1 << 6), + + //! \deprecated Use pcmk_action_migratable instead + pe_action_migrate_runnable = pcmk_action_migratable, + + //! \deprecated Use pcmk_action_added_to_graph instead + pe_action_dumped = pcmk_action_added_to_graph, + + //! \deprecated Do not use + pe_action_processed = (1 << 9), + + //! \deprecated Do not use + pe_action_clear = (1 << 10), + + //! \deprecated Use pcmk_action_migration_abort instead + pe_action_dangle = pcmk_action_migration_abort, + + //! \deprecated Use pcmk_action_min_runnable instead + pe_action_requires_any = pcmk_action_min_runnable, + + //! \deprecated Use pcmk_action_reschedule instead + pe_action_reschedule = pcmk_action_reschedule, + + //! \deprecated Use pcmk_action_detect_loop instead + pe_action_tracking = pcmk_action_detect_loop, + + //! \deprecated Use pcmk_action_inputs_deduplicated instead + pe_action_dedup = pcmk_action_inputs_deduplicated, + + //! \deprecated Use pcmk_action_on_dc instead + pe_action_dc = pcmk_action_on_dc, +#endif +}; + +/* @COMPAT enum pe_link_state and enum pe_ordering are currently needed for + * struct pe_action_wrapper_s (which is public) but should be removed at an + * API compatibility break when that can be refactored and made internal + */ + +//!@{ +//! \deprecated Do not use +enum pe_link_state { + pe_link_not_dumped = 0, + pe_link_dumped = 1, +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + pe_link_dup = 2, +#endif +}; + +enum pe_ordering { + pe_order_none = 0x0, +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + pe_order_optional = 0x1, + pe_order_apply_first_non_migratable = 0x2, + pe_order_implies_first = 0x10, + pe_order_implies_then = 0x20, + pe_order_promoted_implies_first = 0x40, + pe_order_implies_first_migratable = 0x80, + pe_order_runnable_left = 0x100, + pe_order_pseudo_left = 0x200, + pe_order_implies_then_on_node = 0x400, + pe_order_probe = 0x800, + pe_order_restart = 0x1000, + pe_order_stonith_stop = 0x2000, + pe_order_serialize_only = 0x4000, + pe_order_same_node = 0x8000, + pe_order_implies_first_printed = 0x10000, + pe_order_implies_then_printed = 0x20000, + pe_order_asymmetrical = 0x100000, + pe_order_load = 0x200000, + pe_order_one_or_more = 0x400000, + pe_order_anti_colocation = 0x800000, + pe_order_preserve = 0x1000000, + pe_order_then_cancels_first = 0x2000000, + pe_order_trace = 0x4000000, + pe_order_implies_first_master = pe_order_promoted_implies_first, +#endif +}; + +// Action sequenced relative to another action +// @COMPAT This should be internal +struct pe_action_wrapper_s { + // @COMPAT This should be uint32_t + enum pe_ordering type; // Group of enum pcmk__action_relation_flags + + // @COMPAT This should be a bool + enum pe_link_state state; // Whether action has been added to graph yet + + pcmk_action_t *action; // Action to be sequenced +}; +//!@} + +//! Implementation of pcmk_action_t +struct pe_action_s { + int id; //!< Counter to identify action + + /*! + * When the controller aborts a transition graph, it sets an abort priority. + * If this priority is higher, the action will still be executed anyway. + * Pseudo-actions are always allowed, so this is irrelevant for them. + */ + int priority; + + pcmk_resource_t *rsc; //!< Resource to apply action to, if any + pcmk_node_t *node; //!< Node to execute action on, if any + xmlNode *op_entry; //!< Action XML configuration, if any + char *task; //!< Action name + char *uuid; //!< Action key + char *cancel_task; //!< If task is "cancel", the action being cancelled + char *reason; //!< Readable description of why action is needed + + //@ COMPAT Change to uint32_t at a compatibility break + enum pe_action_flags flags; //!< Group of enum pe_action_flags + + enum rsc_start_requirement needs; //!< Prerequisite for recovery + enum action_fail_response on_fail; //!< Response to failure + enum rsc_role_e fail_role; //!< Resource role if action fails + GHashTable *meta; //!< Meta-attributes relevant to action + GHashTable *extra; //!< Action-specific instance attributes + + /* Current count of runnable instance actions for "first" action in an + * ordering dependency with pcmk__ar_min_runnable set. + */ + int runnable_before; //!< For Pacemaker use only + + /*! + * Number of instance actions for "first" action in an ordering dependency + * with pcmk__ar_min_runnable set that must be runnable before this action + * can be runnable. + */ + int required_runnable_before; + + // Actions in a relation with this one (as pcmk__related_action_t *) + GList *actions_before; //!< For Pacemaker use only + GList *actions_after; //!< For Pacemaker use only + + /* This is intended to hold data that varies by the type of action, but is + * not currently used. Some of the above fields could be moved here except + * for API backward compatibility. + */ + void *action_details; //!< For Pacemaker use only +}; + +// For parsing various action-related string specifications +gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, + guint *interval_ms); +gboolean decode_transition_key(const char *key, char **uuid, int *transition_id, + int *action_id, int *target_rc); +gboolean decode_transition_magic(const char *magic, char **uuid, + int *transition_id, int *action_id, + int *op_status, int *op_rc, int *target_rc); + +// @COMPAT Either these shouldn't be in libcrmcommon or lrmd_event_data_t should +int rsc_op_expected_rc(const lrmd_event_data_t *event); +gboolean did_rsc_op_fail(lrmd_event_data_t *event, int target_rc); + +bool crm_op_needs_metadata(const char *rsc_class, const char *op); + +xmlNode *crm_create_op_xml(xmlNode *parent, const char *prefix, + const char *task, const char *interval_spec, + const char *timeout); + +bool pcmk_is_probe(const char *task, guint interval); +bool pcmk_xe_is_probe(const xmlNode *xml_op); +bool pcmk_xe_mask_probe_failure(const xmlNode *xml_op); + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_ACTIONS__H diff --git a/include/crm/common/actions_internal.h b/include/crm/common/actions_internal.h new file mode 100644 index 0000000..7e794e6 --- /dev/null +++ b/include/crm/common/actions_internal.h @@ -0,0 +1,57 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_ACTIONS_INTERNAL__H +#define PCMK__CRM_COMMON_ACTIONS_INTERNAL__H + +#include // bool +#include // guint +#include // xmlNode + +#include // PCMK_ACTION_MONITOR +#include // pcmk__str_eq() + +#ifdef __cplusplus +extern "C" { +#endif + +//! printf-style format to create operation key from resource, action, interval +#define PCMK__OP_FMT "%s_%s_%u" + +char *pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms); +char *pcmk__notify_key(const char *rsc_id, const char *notify_type, + const char *op_type); +char *pcmk__transition_key(int transition_id, int action_id, int target_rc, + const char *node); +void pcmk__filter_op_for_digest(xmlNode *param_set); +bool pcmk__is_fencing_action(const char *action); + +/*! + * \internal + * \brief Get a human-friendly action name + * + * \param[in] action_name Actual action name + * \param[in] interval_ms Action interval (in milliseconds) + * + * \return Action name suitable for display + */ +static inline const char * +pcmk__readable_action(const char *action_name, guint interval_ms) { + if ((interval_ms == 0) + && pcmk__str_eq(action_name, PCMK_ACTION_MONITOR, pcmk__str_none)) { + return "probe"; + } + return action_name; +} + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_ACTIONS_INTERNAL__H diff --git a/include/crm/common/alerts_internal.h b/include/crm/common/alerts_internal.h index ef64fab..dc67427 100644 --- a/include/crm/common/alerts_internal.h +++ b/include/crm/common/alerts_internal.h @@ -1,5 +1,5 @@ /* - * Copyright 2015-2022 the Pacemaker project contributors + * Copyright 2015-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -76,7 +76,6 @@ void pcmk__add_alert_key(GHashTable *table, enum pcmk__alert_keys_e name, const char *value); void pcmk__add_alert_key_int(GHashTable *table, enum pcmk__alert_keys_e name, int value); -bool pcmk__alert_in_patchset(xmlNode *msg, bool config); static inline const char * pcmk__alert_flag2text(enum pcmk__alert_flags flag) diff --git a/include/crm/common/cib_internal.h b/include/crm/common/cib_internal.h new file mode 100644 index 0000000..c41c12e --- /dev/null +++ b/include/crm/common/cib_internal.h @@ -0,0 +1,23 @@ +/* + * Copyright 2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_CIB_INTERNAL__H +#define PCMK__CRM_COMMON_CIB_INTERNAL__H + +#ifdef __cplusplus +extern "C" { +#endif + +const char *pcmk__cib_abs_xpath_for(const char *element); + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__COMMON_CIB_INTERNAL__H diff --git a/include/crm/common/clone_internal.h b/include/crm/common/clone_internal.h new file mode 100644 index 0000000..494ee74 --- /dev/null +++ b/include/crm/common/clone_internal.h @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_CLONE_INTERNAL__H +# define PCMK__CRM_COMMON_CLONE_INTERNAL__H + +#ifdef __cplusplus +extern "C" { +#endif + +// Clone resource flags (used in variant data) +enum pcmk__clone_flags { + // Whether instances should be started sequentially + pcmk__clone_ordered = (1 << 0), + + // Whether promotion scores have been added + pcmk__clone_promotion_added = (1 << 1), + + // Whether promotion constraints have been added + pcmk__clone_promotion_constrained = (1 << 2), +}; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_CLONE_INTERNAL__H diff --git a/include/crm/common/digests_internal.h b/include/crm/common/digests_internal.h new file mode 100644 index 0000000..7598de2 --- /dev/null +++ b/include/crm/common/digests_internal.h @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_DIGESTS_INTERNAL__H +# define PCMK__CRM_COMMON_DIGESTS_INTERNAL__H + +#include // xmlNode + +#ifdef __cplusplus +extern "C" { +#endif + +// Digest comparison results +enum pcmk__digest_result { + pcmk__digest_unknown, // No digest available for comparison + pcmk__digest_match, // Digests match + pcmk__digest_mismatch, // Any parameter changed (potentially reloadable) + pcmk__digest_restart, // Parameters that require a restart changed +}; + +bool pcmk__verify_digest(xmlNode *input, const char *expected); + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_DIGESTS_INTERNAL__H diff --git a/include/crm/common/failcounts_internal.h b/include/crm/common/failcounts_internal.h new file mode 100644 index 0000000..4ad01bf --- /dev/null +++ b/include/crm/common/failcounts_internal.h @@ -0,0 +1,41 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_FAILCOUNTS_INTERNAL__H +# define PCMK__CRM_COMMON_FAILCOUNTS_INTERNAL__H + +#ifdef __cplusplus +extern "C" { +#endif + +// Options when getting resource fail counts +enum pcmk__fc_flags { + pcmk__fc_default = (1 << 0), + pcmk__fc_effective = (1 << 1), // Don't count expired failures + pcmk__fc_fillers = (1 << 2), // If container, include filler failures +}; + +/*! + * \internal + * \enum pcmk__rsc_node + * \brief Type of resource location lookup to perform + */ +enum pcmk__rsc_node { + pcmk__rsc_node_assigned = 0, //!< Where resource is assigned + pcmk__rsc_node_current = 1, //!< Where resource is running + + // @COMPAT: Use in native_location() at a compatibility break + pcmk__rsc_node_pending = 2, //!< Where resource is pending +}; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_FAILCOUNTS_INTERNAL__H diff --git a/include/crm/common/group_internal.h b/include/crm/common/group_internal.h new file mode 100644 index 0000000..9e1424d --- /dev/null +++ b/include/crm/common/group_internal.h @@ -0,0 +1,27 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_GROUP_INTERNAL__H +# define PCMK__CRM_COMMON_GROUP_INTERNAL__H + +#ifdef __cplusplus +extern "C" { +#endif + +// Group resource flags (used in variant data) +enum pcmk__group_flags { + pcmk__group_ordered = (1 << 0), // Members start sequentially + pcmk__group_colocated = (1 << 1), // Members must be on same node +}; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_GROUP_INTERNAL__H diff --git a/include/crm/common/health_internal.h b/include/crm/common/health_internal.h index 277a4c9..f98529c 100644 --- a/include/crm/common/health_internal.h +++ b/include/crm/common/health_internal.h @@ -18,7 +18,7 @@ extern "C" { * \internal * \brief Possible node health strategies * - * \note It would be nice to use this in pe_working_set_t but that will have to + * \note It would be nice to use this in pcmk_scheduler_t but that will have to * wait for an API backward compatibility break. */ enum pcmk__health_strategy { diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h index bd98780..3078606 100644 --- a/include/crm/common/internal.h +++ b/include/crm/common/internal.h @@ -1,5 +1,5 @@ /* - * Copyright 2015-2022 the Pacemaker project contributors + * Copyright 2015-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -20,6 +20,8 @@ #include // crm_strdup_printf() #include // do_crm_log_unlikely(), etc. #include // mainloop_io_t, struct ipc_client_callbacks +#include +#include #include #include #include @@ -50,11 +52,6 @@ int pcmk__substitute_secrets(const char *rsc_id, GHashTable *params); #endif -/* internal digest-related utilities (from digest.c) */ - -bool pcmk__verify_digest(xmlNode *input, const char *expected); - - /* internal main loop utilities (from mainloop.c) */ int pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata, @@ -164,20 +161,6 @@ int pcmk__pidfile_matches(const char *filename, pid_t expected_pid, int pcmk__lock_pidfile(const char *filename, const char *name); -/* internal functions related to resource operations (from operations.c) */ - -// printf-style format to create operation ID from resource, action, interval -#define PCMK__OP_FMT "%s_%s_%u" - -char *pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms); -char *pcmk__notify_key(const char *rsc_id, const char *notify_type, - const char *op_type); -char *pcmk__transition_key(int transition_id, int action_id, int target_rc, - const char *node); -void pcmk__filter_op_for_digest(xmlNode *param_set); -bool pcmk__is_fencing_action(const char *action); - - // bitwise arithmetic utilities /*! diff --git a/include/crm/common/ipc.h b/include/crm/common/ipc.h index 3d4ee10..397c8b1 100644 --- a/include/crm/common/ipc.h +++ b/include/crm/common/ipc.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -170,8 +170,8 @@ void crm_ipc_close(crm_ipc_t * client); void crm_ipc_destroy(crm_ipc_t * client); void pcmk_free_ipc_event(struct iovec *event); -int crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, - int32_t ms_timeout, xmlNode ** reply); +int crm_ipc_send(crm_ipc_t *client, const xmlNode *message, + enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode **reply); int crm_ipc_get_fd(crm_ipc_t * client); bool crm_ipc_connected(crm_ipc_t * client); diff --git a/include/crm/common/ipc_internal.h b/include/crm/common/ipc_internal.h index 5099dda..b391e83 100644 --- a/include/crm/common/ipc_internal.h +++ b/include/crm/common/ipc_internal.h @@ -96,6 +96,10 @@ extern "C" { int pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, gid_t refgid, pid_t *gotpid); +int pcmk__connect_generic_ipc(crm_ipc_t *ipc); +int pcmk__ipc_fd(crm_ipc_t *ipc, int *fd); +int pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type, + int attempts); /* * Server-related @@ -112,6 +116,7 @@ struct pcmk__remote_s { int tcp_socket; mainloop_io_t *source; time_t uptime; + char *start_state; /* CIB-only */ char *token; @@ -245,11 +250,11 @@ int pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, #define pcmk__ipc_send_ack(c, req, flags, tag, ver, st) \ pcmk__ipc_send_ack_as(__func__, __LINE__, (c), (req), (flags), (tag), (ver), (st)) -int pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message, +int pcmk__ipc_prepare_iov(uint32_t request, const xmlNode *message, uint32_t max_send_size, struct iovec **result, ssize_t *bytes); -int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message, - uint32_t flags); +int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, + const xmlNode *message, uint32_t flags); int pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags); xmlNode *pcmk__client_data2xml(pcmk__client_t *c, void *data, uint32_t *id, uint32_t *flags); diff --git a/include/crm/common/logging.h b/include/crm/common/logging.h index 2878fba..eea4cec 100644 --- a/include/crm/common/logging.h +++ b/include/crm/common/logging.h @@ -11,6 +11,7 @@ # define PCMK__CRM_COMMON_LOGGING__H # include +# include // uint8_t, uint32_t # include # include # include @@ -120,7 +121,9 @@ unsigned int set_crm_log_level(unsigned int level); unsigned int get_crm_log_level(void); -void pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml); +void pcmk_log_xml_as(const char *file, const char *function, uint32_t line, + uint32_t tags, uint8_t level, const char *text, + const xmlNode *xml); /* * Throughout the macros below, note the leading, pre-comma, space in the @@ -270,7 +273,8 @@ pcmk__clip_log_level(int level) __LINE__, 0); \ } \ if (crm_is_callsite_active(xml_cs, _level, 0)) { \ - pcmk_log_xml_impl(_level, text, xml); \ + pcmk_log_xml_as(__FILE__, __func__, __LINE__, 0, \ + _level, text, (xml)); \ } \ break; \ } \ diff --git a/include/crm/common/logging_compat.h b/include/crm/common/logging_compat.h index cfdb562..b57a802 100644 --- a/include/crm/common/logging_compat.h +++ b/include/crm/common/logging_compat.h @@ -10,6 +10,7 @@ #ifndef PCMK__CRM_COMMON_LOGGING_COMPAT__H # define PCMK__CRM_COMMON_LOGGING_COMPAT__H +#include // uint8_t #include #include @@ -78,6 +79,9 @@ void log_data_element(int log_level, const char *file, const char *function, int line, const char *prefix, const xmlNode *data, int depth, int legacy_options); +//! \deprecated Do not use Pacemaker for general-purpose logging +void pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml); + #ifdef __cplusplus } #endif diff --git a/include/crm/common/logging_internal.h b/include/crm/common/logging_internal.h index 479dcab..981ddf3 100644 --- a/include/crm/common/logging_internal.h +++ b/include/crm/common/logging_internal.h @@ -19,6 +19,18 @@ extern "C" { # include # include +typedef void (*pcmk__config_error_func) (void *ctx, const char *msg, ...); +typedef void (*pcmk__config_warning_func) (void *ctx, const char *msg, ...); + +extern pcmk__config_error_func pcmk__config_error_handler; +extern pcmk__config_warning_func pcmk__config_warning_handler; + +extern void *pcmk__config_error_context; +extern void *pcmk__config_warning_context; + +void pcmk__set_config_error_handler(pcmk__config_error_func error_handler, void *error_context); +void pcmk__set_config_warning_handler(pcmk__config_warning_func warning_handler, void *warning_context); + /*! * \internal * \brief Log a configuration error @@ -26,9 +38,13 @@ extern "C" { * \param[in] fmt printf(3)-style format string * \param[in] ... Arguments for format string */ -# define pcmk__config_err(fmt...) do { \ - crm_config_error = TRUE; \ - crm_err(fmt); \ +# define pcmk__config_err(fmt...) do { \ + crm_config_error = TRUE; \ + if (pcmk__config_error_handler == NULL) { \ + crm_err(fmt); \ + } else { \ + pcmk__config_error_handler(pcmk__config_error_context, fmt); \ + } \ } while (0) /*! @@ -38,9 +54,13 @@ extern "C" { * \param[in] fmt printf(3)-style format string * \param[in] ... Arguments for format string */ -# define pcmk__config_warn(fmt...) do { \ - crm_config_warning = TRUE; \ - crm_warn(fmt); \ +# define pcmk__config_warn(fmt...) do { \ + crm_config_warning = TRUE; \ + if (pcmk__config_warning_handler == NULL) { \ + crm_warn(fmt); \ + } else { \ + pcmk__config_warning_handler(pcmk__config_warning_context, fmt); \ + } \ } while (0) /*! @@ -72,6 +92,76 @@ extern "C" { } \ } while (0) +/*! + * \internal + * \brief Log XML changes line-by-line in a formatted fashion + * + * \param[in] level Priority at which to log the messages + * \param[in] xml XML to log + * + * \note This does nothing when \p level is \c LOG_STDOUT. + */ +#define pcmk__log_xml_changes(level, xml) do { \ + uint8_t _level = pcmk__clip_log_level(level); \ + static struct qb_log_callsite *xml_cs = NULL; \ + \ + switch (_level) { \ + case LOG_STDOUT: \ + case LOG_NEVER: \ + break; \ + default: \ + if (xml_cs == NULL) { \ + xml_cs = qb_log_callsite_get(__func__, __FILE__, \ + "xml-changes", _level, \ + __LINE__, 0); \ + } \ + if (crm_is_callsite_active(xml_cs, _level, 0)) { \ + pcmk__log_xml_changes_as(__FILE__, __func__, __LINE__, \ + 0, _level, xml); \ + } \ + break; \ + } \ + } while(0) + +/*! + * \internal + * \brief Log an XML patchset line-by-line in a formatted fashion + * + * \param[in] level Priority at which to log the messages + * \param[in] patchset XML patchset to log + * + * \note This does nothing when \p level is \c LOG_STDOUT. + */ +#define pcmk__log_xml_patchset(level, patchset) do { \ + uint8_t _level = pcmk__clip_log_level(level); \ + static struct qb_log_callsite *xml_cs = NULL; \ + \ + switch (_level) { \ + case LOG_STDOUT: \ + case LOG_NEVER: \ + break; \ + default: \ + if (xml_cs == NULL) { \ + xml_cs = qb_log_callsite_get(__func__, __FILE__, \ + "xml-patchset", _level, \ + __LINE__, 0); \ + } \ + if (crm_is_callsite_active(xml_cs, _level, 0)) { \ + pcmk__log_xml_patchset_as(__FILE__, __func__, __LINE__, \ + 0, _level, patchset); \ + } \ + break; \ + } \ + } while(0) + +void pcmk__log_xml_changes_as(const char *file, const char *function, + uint32_t line, uint32_t tags, uint8_t level, + const xmlNode *xml); + +void pcmk__log_xml_patchset_as(const char *file, const char *function, + uint32_t line, uint32_t tags, uint8_t level, + const xmlNode *patchset); + /*! * \internal * \brief Initialize logging for command line tools diff --git a/include/crm/common/nodes.h b/include/crm/common/nodes.h new file mode 100644 index 0000000..fbc3758 --- /dev/null +++ b/include/crm/common/nodes.h @@ -0,0 +1,144 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_NODES__H +# define PCMK__CRM_COMMON_NODES__H + +#include // gboolean, GList, GHashTable + +#include // pcmk_resource_t, pcmk_scheduler_t + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief Scheduler API for nodes + * \ingroup core + */ + +// Special node attributes + +#define PCMK_NODE_ATTR_TERMINATE "terminate" + + +//! Possible node types +enum node_type { + pcmk_node_variant_cluster = 1, //!< Cluster layer node + pcmk_node_variant_remote = 2, //!< Pacemaker Remote node + + node_ping = 0, //!< \deprecated Do not use +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_node_variant_cluster instead + node_member = pcmk_node_variant_cluster, + + //! \deprecated Use pcmk_node_variant_remote instead + node_remote = pcmk_node_variant_remote, +#endif +}; + +//! When to probe a resource on a node (as specified in location constraints) +enum pe_discover_e { + pcmk_probe_always = 0, //! Always probe resource on node + pcmk_probe_never = 1, //! Never probe resource on node + pcmk_probe_exclusive = 2, //! Probe only on designated nodes + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_probe_always instead + pe_discover_always = pcmk_probe_always, + + //! \deprecated Use pcmk_probe_never instead + pe_discover_never = pcmk_probe_never, + + //! \deprecated Use pcmk_probe_exclusive instead + pe_discover_exclusive = pcmk_probe_exclusive, +#endif +}; + +//! Basic node information (all node objects for the same node share this) +struct pe_node_shared_s { + const char *id; //!< Node ID at the cluster layer + const char *uname; //!< Node name in cluster + enum node_type type; //!< Node variant + + // @TODO Convert these into a flag group + gboolean online; //!< Whether online + gboolean standby; //!< Whether in standby mode + gboolean standby_onfail; //!< Whether in standby mode due to on-fail + gboolean pending; //!< Whether controller membership is pending + gboolean unclean; //!< Whether node requires fencing + gboolean unseen; //!< Whether node has never joined cluster + gboolean shutdown; //!< Whether shutting down + gboolean expected_up; //!< Whether expected join state is member + gboolean is_dc; //!< Whether node is cluster's DC + gboolean maintenance; //!< Whether in maintenance mode + gboolean rsc_discovery_enabled; //!< Whether probes are allowed on node + + /*! + * Whether this is a guest node whose guest resource must be recovered or a + * remote node that must be fenced + */ + gboolean remote_requires_reset; + + /*! + * Whether this is a Pacemaker Remote node that was fenced since it was last + * connected by the cluster + */ + gboolean remote_was_fenced; + + /*! + * Whether this is a Pacemaker Remote node previously marked in its + * node state as being in maintenance mode + */ + gboolean remote_maintenance; + + gboolean unpacked; //!< Whether node history has been unpacked + + /*! + * Number of resources active on this node (valid after CIB status section + * has been unpacked, as long as pcmk_sched_no_counts was not set) + */ + int num_resources; + + //! Remote connection resource for node, if it is a Pacemaker Remote node + pcmk_resource_t *remote_rsc; + + GList *running_rsc; //!< List of resources active on node + GList *allocated_rsc; //!< List of resources assigned to node + GHashTable *attrs; //!< Node attributes + GHashTable *utilization; //!< Node utilization attributes + GHashTable *digest_cache; //!< Cache of calculated resource digests + + /*! + * Sum of priorities of all resources active on node and on any guest nodes + * connected to this node, with +1 for promoted instances (used to compare + * nodes for priority-fencing-delay) + */ + int priority; + + pcmk_scheduler_t *data_set; //!< Cluster that node is part of +}; + +//! Implementation of pcmk_node_t +struct pe_node_s { + int weight; //!< Node score for a given resource + gboolean fixed; //!< \deprecated Do not use + int count; //!< Counter reused by assignment and promotion code + struct pe_node_shared_s *details; //!< Basic node information + + // @COMPAT This should be enum pe_discover_e + int rsc_discover_mode; //!< Probe mode (enum pe_discover_e) +}; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_NODES__H diff --git a/include/crm/common/nvpair.h b/include/crm/common/nvpair.h index aebc199..185bdc3 100644 --- a/include/crm/common/nvpair.h +++ b/include/crm/common/nvpair.h @@ -46,7 +46,6 @@ void hash2smartfield(gpointer key, gpointer value, gpointer user_data); GHashTable *xml2list(const xmlNode *parent); const char *crm_xml_add(xmlNode *node, const char *name, const char *value); -const char *crm_xml_replace(xmlNode *node, const char *name, const char *value); const char *crm_xml_add_int(xmlNode *node, const char *name, int value); const char *crm_xml_add_ll(xmlNode *node, const char *name, long long value); const char *crm_xml_add_ms(xmlNode *node, const char *name, guint ms); diff --git a/include/crm/common/options_internal.h b/include/crm/common/options_internal.h index 4157b58..5c561fd 100644 --- a/include/crm/common/options_internal.h +++ b/include/crm/common/options_internal.h @@ -1,5 +1,5 @@ /* - * Copyright 2006-2022 the Pacemaker project contributors + * Copyright 2006-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -26,7 +26,7 @@ _Noreturn void pcmk__cli_help(char cmd); */ const char *pcmk__env_option(const char *option); -void pcmk__set_env_option(const char *option, const char *value); +void pcmk__set_env_option(const char *option, const char *value, bool compat); bool pcmk__env_option_enabled(const char *daemon, const char *option); @@ -76,18 +76,52 @@ long pcmk__auto_watchdog_timeout(void); bool pcmk__valid_sbd_timeout(const char *value); // Constants for environment variable names +#define PCMK__ENV_AUTHKEY_LOCATION "authkey_location" #define PCMK__ENV_BLACKBOX "blackbox" +#define PCMK__ENV_CALLGRIND_ENABLED "callgrind_enabled" #define PCMK__ENV_CLUSTER_TYPE "cluster_type" #define PCMK__ENV_DEBUG "debug" +#define PCMK__ENV_DH_MAX_BITS "dh_max_bits" +#define PCMK__ENV_DH_MIN_BITS "dh_min_bits" +#define PCMK__ENV_FAIL_FAST "fail_fast" +#define PCMK__ENV_IPC_BUFFER "ipc_buffer" +#define PCMK__ENV_IPC_TYPE "ipc_type" #define PCMK__ENV_LOGFACILITY "logfacility" #define PCMK__ENV_LOGFILE "logfile" +#define PCMK__ENV_LOGFILE_MODE "logfile_mode" #define PCMK__ENV_LOGPRIORITY "logpriority" -#define PCMK__ENV_MCP "mcp" +#define PCMK__ENV_NODE_ACTION_LIMIT "node_action_limit" #define PCMK__ENV_NODE_START_STATE "node_start_state" +#define PCMK__ENV_PANIC_ACTION "panic_action" #define PCMK__ENV_PHYSICAL_HOST "physical_host" +#define PCMK__ENV_REMOTE_ADDRESS "remote_address" +#define PCMK__ENV_REMOTE_PID1 "remote_pid1" +#define PCMK__ENV_REMOTE_PORT "remote_port" +#define PCMK__ENV_RESPAWNED "respawned" +#define PCMK__ENV_SCHEMA_DIRECTORY "schema_directory" +#define PCMK__ENV_SERVICE "service" +#define PCMK__ENV_STDERR "stderr" +#define PCMK__ENV_TLS_PRIORITIES "tls_priorities" +#define PCMK__ENV_TRACE_BLACKBOX "trace_blackbox" +#define PCMK__ENV_TRACE_FILES "trace_files" +#define PCMK__ENV_TRACE_FORMATS "trace_formats" +#define PCMK__ENV_TRACE_FUNCTIONS "trace_functions" +#define PCMK__ENV_TRACE_TAGS "trace_tags" +#define PCMK__ENV_VALGRIND_ENABLED "valgrind_enabled" + +// @COMPAT Drop at 3.0.0; default is plenty +#define PCMK__ENV_CIB_TIMEOUT "cib_timeout" + +// @COMPAT Drop at 3.0.0; likely last used in 1.1.24 +#define PCMK__ENV_MCP "mcp" + +// @COMPAT Drop at 3.0.0; added unused in 1.1.9 #define PCMK__ENV_QUORUM_TYPE "quorum_type" + +/* @COMPAT Drop at 3.0.0; added to debug shutdown issues when Pacemaker is + * managed by systemd, but no longer useful. + */ #define PCMK__ENV_SHUTDOWN_DELAY "shutdown_delay" -#define PCMK__ENV_STDERR "stderr" // Constants for cluster option names #define PCMK__OPT_NODE_HEALTH_BASE "node-health-base" diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h index e7b631e..274bd85 100644 --- a/include/crm/common/output_internal.h +++ b/include/crm/common/output_internal.h @@ -763,6 +763,11 @@ pcmk__output_get_log_level(const pcmk__output_t *out); void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level); +void pcmk__output_set_log_filter(pcmk__output_t *out, const char *file, + const char *function, uint32_t line, + uint32_t tags); + + /*! * \internal * \brief Create and return a new XML node with the given name, as a child of the diff --git a/include/crm/common/remote_internal.h b/include/crm/common/remote_internal.h index 8473668..030c7a4 100644 --- a/include/crm/common/remote_internal.h +++ b/include/crm/common/remote_internal.h @@ -1,5 +1,5 @@ /* - * Copyright 2008-2022 the Pacemaker project contributors + * Copyright 2008-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,7 @@ typedef struct pcmk__remote_s pcmk__remote_t; -int pcmk__remote_send_xml(pcmk__remote_t *remote, xmlNode *msg); +int pcmk__remote_send_xml(pcmk__remote_t *remote, const xmlNode *msg); int pcmk__remote_ready(const pcmk__remote_t *remote, int timeout_ms); int pcmk__read_remote_message(pcmk__remote_t *remote, int timeout_ms); xmlNode *pcmk__remote_message_xml(pcmk__remote_t *remote); diff --git a/include/crm/common/resources.h b/include/crm/common/resources.h new file mode 100644 index 0000000..043dc1c --- /dev/null +++ b/include/crm/common/resources.h @@ -0,0 +1,502 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_RESOURCES__H +# define PCMK__CRM_COMMON_RESOURCES__H + +#include // time_t +#include // xmlNode +#include // gboolean, guint, GList, GHashTable + +#include // enum rsc_role_e +#include // pcmk_resource_t, etc. + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief Scheduler API for resources + * \ingroup core + */ + +//! Resource variants supported by Pacemaker +enum pe_obj_types { + // Order matters: some code compares greater or lesser than + pcmk_rsc_variant_unknown = -1, //!< Unknown resource variant + pcmk_rsc_variant_primitive = 0, //!< Primitive resource + pcmk_rsc_variant_group = 1, //!< Group resource + pcmk_rsc_variant_clone = 2, //!< Clone resource + pcmk_rsc_variant_bundle = 3, //!< Bundle resource + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_rsc_variant_unknown instead + pe_unknown = pcmk_rsc_variant_unknown, + + //! \deprecated Use pcmk_rsc_variant_primitive instead + pe_native = pcmk_rsc_variant_primitive, + + //! \deprecated Use pcmk_rsc_variant_group instead + pe_group = pcmk_rsc_variant_group, + + //! \deprecated Use pcmk_rsc_variant_clone instead + pe_clone = pcmk_rsc_variant_clone, + + //! \deprecated Use pcmk_rsc_variant_bundle instead + pe_container = pcmk_rsc_variant_bundle, +#endif +}; + +//! What resource needs before it can be recovered from a failed node +enum rsc_start_requirement { + pcmk_requires_nothing = 0, //!< Resource can be recovered immediately + pcmk_requires_quorum = 1, //!< Resource can be recovered if quorate + pcmk_requires_fencing = 2, //!< Resource can be recovered after fencing + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_requires_nothing instead + rsc_req_nothing = pcmk_requires_nothing, + + //! \deprecated Use pcmk_requires_quorum instead + rsc_req_quorum = pcmk_requires_quorum, + + //! \deprecated Use pcmk_requires_fencing instead + rsc_req_stonith = pcmk_requires_fencing, +#endif +}; + +//! How to recover a resource that is incorrectly active on multiple nodes +enum rsc_recovery_type { + pcmk_multiply_active_restart = 0, //!< Stop on all, start on desired + pcmk_multiply_active_stop = 1, //!< Stop on all and leave stopped + pcmk_multiply_active_block = 2, //!< Do nothing to resource + pcmk_multiply_active_unexpected = 3, //!< Stop unexpected instances + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_multiply_active_restart instead + recovery_stop_start = pcmk_multiply_active_restart, + + //! \deprecated Use pcmk_multiply_active_stop instead + recovery_stop_only = pcmk_multiply_active_stop, + + //! \deprecated Use pcmk_multiply_active_block instead + recovery_block = pcmk_multiply_active_block, + + //! \deprecated Use pcmk_multiply_active_unexpected instead + recovery_stop_unexpected = pcmk_multiply_active_unexpected, +#endif +}; + +//! Resource scheduling flags +enum pcmk_rsc_flags { + //! No resource flags set (compare with equality rather than bit set) + pcmk_no_rsc_flags = 0ULL, + + //! Whether resource has been removed from the configuration + pcmk_rsc_removed = (1ULL << 0), + + //! Whether resource is managed + pcmk_rsc_managed = (1ULL << 1), + + //! Whether resource is blocked from further action + pcmk_rsc_blocked = (1ULL << 2), + + //! Whether resource has been removed but has a container + pcmk_rsc_removed_filler = (1ULL << 3), + + //! Whether resource has clone notifications enabled + pcmk_rsc_notify = (1ULL << 4), + + //! Whether resource is not an anonymous clone instance + pcmk_rsc_unique = (1ULL << 5), + + //! Whether resource's class is "stonith" + pcmk_rsc_fence_device = (1ULL << 6), + + //! Whether resource can be promoted and demoted + pcmk_rsc_promotable = (1ULL << 7), + + //! Whether resource has not yet been assigned to a node + pcmk_rsc_unassigned = (1ULL << 8), + + //! Whether resource is in the process of being assigned to a node + pcmk_rsc_assigning = (1ULL << 9), + + //! Whether resource is in the process of modifying allowed node scores + pcmk_rsc_updating_nodes = (1ULL << 10), + + //! Whether resource is in the process of scheduling actions to restart + pcmk_rsc_restarting = (1ULL << 11), + + //! Whether resource must be stopped (instead of demoted) if it is failed + pcmk_rsc_stop_if_failed = (1ULL << 12), + + //! Whether a reload action has been scheduled for resource + pcmk_rsc_reload = (1ULL << 13), + + //! Whether resource is a remote connection allowed to run on a remote node + pcmk_rsc_remote_nesting_allowed = (1ULL << 14), + + //! Whether resource has "critical" meta-attribute enabled + pcmk_rsc_critical = (1ULL << 15), + + //! Whether resource is considered failed + pcmk_rsc_failed = (1ULL << 16), + + //! Flag for non-scheduler code to use to detect recursion loops + pcmk_rsc_detect_loop = (1ULL << 17), + + //! \deprecated Do not use + pcmk_rsc_runnable = (1ULL << 18), + + //! Whether resource has pending start action in history + pcmk_rsc_start_pending = (1ULL << 19), + + //! \deprecated Do not use + pcmk_rsc_starting = (1ULL << 20), + + //! \deprecated Do not use + pcmk_rsc_stopping = (1ULL << 21), + + //! Whether resource is multiply active with recovery set to stop_unexpected + pcmk_rsc_stop_unexpected = (1ULL << 22), + + //! Whether resource is allowed to live-migrate + pcmk_rsc_migratable = (1ULL << 23), + + //! Whether resource has an ignorable failure + pcmk_rsc_ignore_failure = (1ULL << 24), + + //! Whether resource is an implicit container resource for a bundle replica + pcmk_rsc_replica_container = (1ULL << 25), + + //! Whether resource, its node, or entire cluster is in maintenance mode + pcmk_rsc_maintenance = (1ULL << 26), + + //! \deprecated Do not use + pcmk_rsc_has_filler = (1ULL << 27), + + //! Whether resource can be started or promoted only on quorate nodes + pcmk_rsc_needs_quorum = (1ULL << 28), + + //! Whether resource requires fencing before recovery if on unclean node + pcmk_rsc_needs_fencing = (1ULL << 29), + + //! Whether resource can be started or promoted only on unfenced nodes + pcmk_rsc_needs_unfencing = (1ULL << 30), +}; + +//! Search options for resources (exact resource ID always matches) +enum pe_find { + //! Also match clone instance ID from resource history + pcmk_rsc_match_history = (1 << 0), + + //! Also match anonymous clone instances by base name + pcmk_rsc_match_anon_basename = (1 << 1), + + //! Match only clones and their instances, by either clone or instance ID + pcmk_rsc_match_clone_only = (1 << 2), + + //! If matching by node, compare current node instead of assigned node + pcmk_rsc_match_current_node = (1 << 3), + + //! \deprecated Do not use + pe_find_inactive = (1 << 4), + + //! Match clone instances (even unique) by base name as well as exact ID + pcmk_rsc_match_basename = (1 << 5), + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_rsc_match_history instead + pe_find_renamed = pcmk_rsc_match_history, + + //! \deprecated Use pcmk_rsc_match_anon_basename instead + pe_find_anon = pcmk_rsc_match_anon_basename, + + //! \deprecated Use pcmk_rsc_match_clone_only instead + pe_find_clone = pcmk_rsc_match_clone_only, + + //! \deprecated Use pcmk_rsc_match_current_node instead + pe_find_current = pcmk_rsc_match_current_node, + + //! \deprecated Use pcmk_rsc_match_basename instead + pe_find_any = pcmk_rsc_match_basename, +#endif +}; + +//!@{ +//! \deprecated Do not use +enum pe_restart { + pe_restart_restart, + pe_restart_ignore, +}; + +enum pe_print_options { + pe_print_log = (1 << 0), + pe_print_html = (1 << 1), + pe_print_ncurses = (1 << 2), + pe_print_printf = (1 << 3), + pe_print_dev = (1 << 4), // Ignored + pe_print_details = (1 << 5), // Ignored + pe_print_max_details = (1 << 6), // Ignored + pe_print_rsconly = (1 << 7), + pe_print_ops = (1 << 8), + pe_print_suppres_nl = (1 << 9), + pe_print_xml = (1 << 10), + pe_print_brief = (1 << 11), + pe_print_pending = (1 << 12), + pe_print_clone_details = (1 << 13), + pe_print_clone_active = (1 << 14), // Print clone instances only if active + pe_print_implicit = (1 << 15) // Print implicitly created resources +}; +//!@} + +// Resource assignment methods (implementation defined by libpacemaker) +//! This type should be considered internal to Pacemaker +typedef struct resource_alloc_functions_s pcmk_assignment_methods_t; + +//! Resource object methods +typedef struct resource_object_functions_s { + /*! + * \brief Parse variant-specific resource XML from CIB into struct members + * + * \param[in,out] rsc Partially unpacked resource + * \param[in,out] scheduler Scheduler data + * + * \return TRUE if resource was unpacked successfully, otherwise FALSE + */ + gboolean (*unpack)(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); + + /*! + * \brief Search for a resource ID in a resource and its children + * + * \param[in] rsc Search this resource and its children + * \param[in] id Search for this resource ID + * \param[in] on_node If not NULL, limit search to resources on this node + * \param[in] flags Group of enum pe_find flags + * + * \return Resource that matches search criteria if any, otherwise NULL + */ + pcmk_resource_t *(*find_rsc)(pcmk_resource_t *rsc, const char *search, + const pcmk_node_t *node, int flags); + + /*! + * \brief Get value of a resource instance attribute + * + * \param[in,out] rsc Resource to check + * \param[in] node Node to use to evaluate rules + * \param[in] create Ignored + * \param[in] name Name of instance attribute to check + * \param[in,out] scheduler Scheduler data + * + * \return Value of requested attribute if available, otherwise NULL + * \note The caller is responsible for freeing the result using free(). + */ + char *(*parameter)(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create, + const char *name, pcmk_scheduler_t *scheduler); + + //! \deprecated Do not use + void (*print)(pcmk_resource_t *rsc, const char *pre_text, long options, + void *print_data); + + /*! + * \brief Check whether a resource is active + * + * \param[in] rsc Resource to check + * \param[in] all If \p rsc is collective, all instances must be active + * + * \return TRUE if \p rsc is active, otherwise FALSE + */ + gboolean (*active)(pcmk_resource_t *rsc, gboolean all); + + /*! + * \brief Get resource's current or assigned role + * + * \param[in] rsc Resource to check + * \param[in] current If TRUE, check current role, otherwise assigned role + * + * \return Current or assigned role of \p rsc + */ + enum rsc_role_e (*state)(const pcmk_resource_t *rsc, gboolean current); + + /*! + * \brief List nodes where a resource (or any of its children) is + * + * \param[in] rsc Resource to check + * \param[out] list List to add result to + * \param[in] current If 0, list nodes where \p rsc is assigned; + * if 1, where active; if 2, where active or pending + * + * \return If list contains only one node, that node, otherwise NULL + */ + pcmk_node_t *(*location)(const pcmk_resource_t *rsc, GList **list, + int current); + + /*! + * \brief Free all memory used by a resource + * + * \param[in,out] rsc Resource to free + */ + void (*free)(pcmk_resource_t *rsc); + + /*! + * \brief Increment cluster's instance counts for a resource + * + * Given a resource, increment its cluster's ninstances, disabled_resources, + * and blocked_resources counts for the resource and its descendants. + * + * \param[in,out] rsc Resource to count + */ + void (*count)(pcmk_resource_t *rsc); + + /*! + * \brief Check whether a given resource is in a list of resources + * + * \param[in] rsc Resource ID to check for + * \param[in] only_rsc List of resource IDs to check + * \param[in] check_parent If TRUE, check top ancestor as well + * + * \return TRUE if \p rsc, its top parent if requested, or '*' is in + * \p only_rsc, otherwise FALSE + */ + gboolean (*is_filtered)(const pcmk_resource_t *rsc, GList *only_rsc, + gboolean check_parent); + + /*! + * \brief Find a node (and optionally count all) where resource is active + * + * \param[in] rsc Resource to check + * \param[out] count_all If not NULL, set this to count of active nodes + * \param[out] count_clean If not NULL, set this to count of clean nodes + * + * \return A node where the resource is active, preferring the source node + * if the resource is involved in a partial migration, or a clean, + * online node if the resource's "requires" is "quorum" or + * "nothing", otherwise NULL. + */ + pcmk_node_t *(*active_node)(const pcmk_resource_t *rsc, + unsigned int *count_all, + unsigned int *count_clean); + + /*! + * \brief Get maximum resource instances per node + * + * \param[in] rsc Resource to check + * + * \return Maximum number of \p rsc instances that can be active on one node + */ + unsigned int (*max_per_node)(const pcmk_resource_t *rsc); +} pcmk_rsc_methods_t; + +//! Implementation of pcmk_resource_t +struct pe_resource_s { + char *id; //!< Resource ID in configuration + char *clone_name; //!< Resource instance ID in history + + //! Resource configuration (possibly expanded from template) + xmlNode *xml; + + //! Original resource configuration, if using template + xmlNode *orig_xml; + + //! Configuration of resource operations (possibly expanded from template) + xmlNode *ops_xml; + + pcmk_scheduler_t *cluster; //!< Cluster that resource is part of + pcmk_resource_t *parent; //!< Resource's parent resource, if any + enum pe_obj_types variant; //!< Resource variant + void *variant_opaque; //!< Variant-specific (and private) data + pcmk_rsc_methods_t *fns; //!< Resource object methods + pcmk_assignment_methods_t *cmds; //!< Resource assignment methods + + enum rsc_recovery_type recovery_type; //!< How to recover if failed + + enum pe_restart restart_type; //!< \deprecated Do not use + int priority; //!< Configured priority + int stickiness; //!< Extra preference for current node + int sort_index; //!< Promotion score on assigned node + int failure_timeout; //!< Failure timeout + int migration_threshold; //!< Migration threshold + guint remote_reconnect_ms; //!< Retry interval for remote connections + char *pending_task; //!< Pending action in history, if any + unsigned long long flags; //!< Group of enum pcmk_rsc_flags + + // @TODO Merge these into flags + gboolean is_remote_node; //!< Whether this is a remote connection + gboolean exclusive_discover; //!< Whether exclusive probing is enabled + + /* Pay special attention to whether you want to use rsc_cons_lhs and + * rsc_cons directly, which include only colocations explicitly involving + * this resource, or call libpacemaker's pcmk__with_this_colocations() and + * pcmk__this_with_colocations() functions, which may return relevant + * colocations involving the resource's ancestors as well. + */ + + //!@{ + //! This field should be treated as internal to Pacemaker + GList *rsc_cons_lhs; // Colocations of other resources with this one + GList *rsc_cons; // Colocations of this resource with others + GList *rsc_location; // Location constraints for resource + GList *actions; // Actions scheduled for resource + GList *rsc_tickets; // Ticket constraints for resource + //!@} + + pcmk_node_t *allocated_to; //!< Node resource is assigned to + + //! The destination node, if migrate_to completed but migrate_from has not + pcmk_node_t *partial_migration_target; + + //! The source node, if migrate_to completed but migrate_from has not + pcmk_node_t *partial_migration_source; + + //! Nodes where resource may be active + GList *running_on; + + //! Nodes where resource has been probed (key is node ID, not name) + GHashTable *known_on; + + //! Nodes where resource may run (key is node ID, not name) + GHashTable *allowed_nodes; + + enum rsc_role_e role; //!< Resource's current role + enum rsc_role_e next_role; //!< Resource's scheduled next role + + GHashTable *meta; //!< Resource's meta-attributes + GHashTable *parameters; //!< \deprecated Use pe_rsc_params() instead + GHashTable *utilization; //!< Resource's utilization attributes + + GList *children; //!< Resource's child resources, if any + + // Source nodes where stop is needed after migrate_from and migrate_to + GList *dangling_migrations; + + pcmk_resource_t *container; //!< Resource containing this one, if any + GList *fillers; //!< Resources contained by this one, if any + + // @COMPAT These should be made const at next API compatibility break + pcmk_node_t *pending_node; //!< Node on which pending_task is happening + pcmk_node_t *lock_node; //!< Resource shutdown-locked to this node + + time_t lock_time; //!< When shutdown lock started + + /*! + * Resource parameters may have node-attribute-based rules, which means the + * values can vary by node. This table has node names as keys and parameter + * name/value tables as values. Use pe_rsc_params() to get the table for a + * given node rather than use this directly. + */ + GHashTable *parameter_cache; +}; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_RESOURCES__H diff --git a/include/crm/common/results.h b/include/crm/common/results.h index 224bcbe..87d00d2 100644 --- a/include/crm/common/results.h +++ b/include/crm/common/results.h @@ -108,6 +108,9 @@ enum pcmk_rc_e { /* When adding new values, use consecutively lower numbers, update the array * in lib/common/results.c, and test with crm_error. */ + pcmk_rc_compression = -1039, + pcmk_rc_ns_resolution = -1038, + pcmk_rc_no_transaction = -1037, pcmk_rc_bad_xml_patch = -1036, pcmk_rc_bad_input = -1035, pcmk_rc_disabled = -1034, @@ -360,7 +363,6 @@ int pcmk_rc2legacy(int rc); int pcmk_legacy2rc(int legacy_rc); const char *pcmk_strerror(int rc); const char *pcmk_errorname(int rc); -const char *bz2_strerror(int rc); const char *crm_exit_name(crm_exit_t exit_code); const char *crm_exit_str(crm_exit_t exit_code); _Noreturn crm_exit_t crm_exit(crm_exit_t rc); diff --git a/include/crm/common/results_compat.h b/include/crm/common/results_compat.h index 00ac6b2..278e48e 100644 --- a/include/crm/common/results_compat.h +++ b/include/crm/common/results_compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2022 the Pacemaker project contributors + * Copyright 2022-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -25,6 +25,9 @@ extern "C" { * release. */ +//! \deprecated Do not use +const char *bz2_strerror(int rc); + //! \deprecated Use pcmk_rc2exitc(pcmk_legacy2rc(rc)) instead crm_exit_t crm_errno2exit(int rc); diff --git a/include/crm/common/results_internal.h b/include/crm/common/results_internal.h index be62780..09907e9 100644 --- a/include/crm/common/results_internal.h +++ b/include/crm/common/results_internal.h @@ -69,6 +69,9 @@ void pcmk__reset_result(pcmk__action_result_t *result); void pcmk__copy_result(const pcmk__action_result_t *src, pcmk__action_result_t *dst); +int pcmk__gaierror2rc(int gai); +int pcmk__bzlib2rc(int bz2); + /*! * \internal * \brief Check whether a result is OK diff --git a/include/crm/common/roles.h b/include/crm/common/roles.h new file mode 100644 index 0000000..1498097 --- /dev/null +++ b/include/crm/common/roles.h @@ -0,0 +1,62 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_ROLES__H +# define PCMK__CRM_COMMON_ROLES__H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief Scheduler API for resource roles + * \ingroup core + */ + +/*! + * Possible roles that a resource can be in + * (order matters; values can be compared with less than and greater than) + */ +enum rsc_role_e { + pcmk_role_unknown = 0, //!< Resource role is unknown + pcmk_role_stopped = 1, //!< Stopped + pcmk_role_started = 2, //!< Started + pcmk_role_unpromoted = 3, //!< Unpromoted + pcmk_role_promoted = 4, //!< Promoted + +#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) + //! \deprecated Use pcmk_role_unknown instead + RSC_ROLE_UNKNOWN = pcmk_role_unknown, + + //! \deprecated Use pcmk_role_stopped instead + RSC_ROLE_STOPPED = pcmk_role_stopped, + + //! \deprecated Use pcmk_role_started instead + RSC_ROLE_STARTED = pcmk_role_started, + + //! \deprecated Use pcmk_role_unpromoted instead + RSC_ROLE_UNPROMOTED = pcmk_role_unpromoted, + + //! \deprecated Use pcmk_role_unpromoted instead + RSC_ROLE_SLAVE = pcmk_role_unpromoted, + + //! \deprecated Use pcmk_role_promoted instead + RSC_ROLE_PROMOTED = pcmk_role_promoted, + + //! \deprecated Use pcmk_role_promoted instead + RSC_ROLE_MASTER = pcmk_role_promoted, +#endif +}; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_ROLES__H diff --git a/include/crm/common/roles_internal.h b/include/crm/common/roles_internal.h new file mode 100644 index 0000000..e304f13 --- /dev/null +++ b/include/crm/common/roles_internal.h @@ -0,0 +1,30 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_ROLES_INTERNAL__H +# define PCMK__CRM_COMMON_ROLES_INTERNAL__H + +#ifdef __cplusplus +extern "C" { +#endif + +// String equivalents of enum rsc_role_e +#define PCMK__ROLE_UNKNOWN "Unknown" +#define PCMK__ROLE_STOPPED "Stopped" +#define PCMK__ROLE_STARTED "Started" +#define PCMK__ROLE_UNPROMOTED "Unpromoted" +#define PCMK__ROLE_PROMOTED "Promoted" +#define PCMK__ROLE_UNPROMOTED_LEGACY "Slave" +#define PCMK__ROLE_PROMOTED_LEGACY "Master" + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_ROLES_INTERNAL__H diff --git a/include/crm/common/scheduler.h b/include/crm/common/scheduler.h new file mode 100644 index 0000000..96f9a62 --- /dev/null +++ b/include/crm/common/scheduler.h @@ -0,0 +1,238 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_SCHEDULER__H +# define PCMK__CRM_COMMON_SCHEDULER__H + +#include // time_t +#include // xmlNode +#include // guint, GList, GHashTable + +#include // crm_time_t + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief Scheduler API + * \ingroup core + */ + +//! Possible responses to loss of quorum +enum pe_quorum_policy { + pcmk_no_quorum_freeze, // pcmk_tag_t *) + int blocked_resources; //!< Number of blocked resources in cluster + int disabled_resources; //!< Number of disabled resources in cluster + GList *param_check; //!< History entries that need to be checked + GList *stop_needed; //!< Containers that need stop actions + time_t recheck_by; //!< Hint to controller when to reschedule + int ninstances; //!< Total number of resource instances + guint shutdown_lock; //!< How long to lock resources (seconds) + int priority_fencing_delay; //!< Priority fencing delay + + // pcmk__output_t * + void *priv; //!< For Pacemaker use only + + guint node_pending_timeout; //!< Pending join times out after this (ms) +}; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_SCHEDULER__H diff --git a/include/crm/common/scheduler_internal.h b/include/crm/common/scheduler_internal.h new file mode 100644 index 0000000..1f1da9f --- /dev/null +++ b/include/crm/common/scheduler_internal.h @@ -0,0 +1,67 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H +# define PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Some warnings are too noisy when logged every time a give function is called + * (for example, using a deprecated feature). As an alternative, we allow + * warnings to be logged once per scheduler sequence (transition). Each of those + * warnings needs a flag defined here. + */ +enum pcmk__sched_warnings { + pcmk__wo_blind = (1 << 0), + pcmk__wo_restart_type = (1 << 1), + pcmk__wo_role_after = (1 << 2), + pcmk__wo_poweroff = (1 << 3), + pcmk__wo_require_all = (1 << 4), + pcmk__wo_order_score = (1 << 5), + pcmk__wo_neg_threshold = (1 << 6), + pcmk__wo_remove_after = (1 << 7), + pcmk__wo_ping_node = (1 << 8), + pcmk__wo_order_inst = (1 << 9), + pcmk__wo_coloc_inst = (1 << 10), + pcmk__wo_group_order = (1 << 11), + pcmk__wo_group_coloc = (1 << 12), + pcmk__wo_upstart = (1 << 13), + pcmk__wo_nagios = (1 << 14), + pcmk__wo_set_ordering = (1 << 15), +}; + +enum pcmk__check_parameters { + /* Clear fail count if parameters changed for un-expired start or monitor + * last_failure. + */ + pcmk__check_last_failure, + + /* Clear fail count if parameters changed for start, monitor, promote, or + * migrate_from actions for active resources. + */ + pcmk__check_active, +}; + +// Group of enum pcmk__sched_warnings flags for warnings we want to log once +extern uint32_t pcmk__warnings; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H diff --git a/include/crm/common/scheduler_types.h b/include/crm/common/scheduler_types.h new file mode 100644 index 0000000..5c4a193 --- /dev/null +++ b/include/crm/common/scheduler_types.h @@ -0,0 +1,39 @@ +/* + * Copyright 2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_SCHEDULER_TYPES__H +# define PCMK__CRM_COMMON_SCHEDULER_TYPES__H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief Type aliases needed to define scheduler objects + * \ingroup core + */ + +//! Node object (including information that may vary depending on resource) +typedef struct pe_node_s pcmk_node_t; + +//! Resource object +typedef struct pe_resource_s pcmk_resource_t; + +//! Action object +typedef struct pe_action_s pcmk_action_t; + +//! Scheduler object +typedef struct pe_working_set_s pcmk_scheduler_t; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_SCHEDULER_TYPES__H diff --git a/include/crm/common/tags.h b/include/crm/common/tags.h new file mode 100644 index 0000000..3f4861d --- /dev/null +++ b/include/crm/common/tags.h @@ -0,0 +1,35 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_TAGS__H +# define PCMK__CRM_COMMON_TAGS__H + +#include // GList + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief Scheduler API for configuration tags + * \ingroup core + */ + +//! Configuration tag object +typedef struct pe_tag_s { + char *id; //!< XML ID of tag + GList *refs; //!< XML IDs of objects that reference the tag +} pcmk_tag_t; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_TAGS__H diff --git a/include/crm/common/tickets.h b/include/crm/common/tickets.h new file mode 100644 index 0000000..40079e9 --- /dev/null +++ b/include/crm/common/tickets.h @@ -0,0 +1,39 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_COMMON_TICKETS__H +# define PCMK__CRM_COMMON_TICKETS__H + +#include // time_t +#include // gboolean, GHashTable + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \file + * \brief Scheduler API for tickets + * \ingroup core + */ + +//! Ticket constraint object +typedef struct pe_ticket_s { + char *id; //!< XML ID of ticket constraint or state + gboolean granted; //!< Whether cluster has been granted the ticket + time_t last_granted; //!< When cluster was last granted the ticket + gboolean standby; //!< Whether ticket is temporarily suspended + GHashTable *state; //!< XML attributes from ticket state +} pcmk_ticket_t; + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_COMMON_TICKETS__H diff --git a/include/crm/common/unittest_internal.h b/include/crm/common/unittest_internal.h index b8f78cf..1fc8501 100644 --- a/include/crm/common/unittest_internal.h +++ b/include/crm/common/unittest_internal.h @@ -1,5 +1,5 @@ /* - * Copyright 2022 the Pacemaker project contributors + * Copyright 2022-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -65,6 +65,44 @@ } \ } while (0); +/*! + * \internal + * \brief Assert that a statement exits with the expected exit status. + * + * \param[in] stmt Statement to execute; can be an expression. + * \param[in] rc The expected exit status. + * + * This functions just like \c pcmk__assert_asserts, except that it tests for + * an expected exit status. Abnormal termination or incorrect exit status is + * treated as a failure of the test. + * + * In the event that stmt does not exit at all, the special code \c CRM_EX_NONE + * will be returned. It is expected that this code is not used anywhere, thus + * always causing an error. + */ +#define pcmk__assert_exits(rc, stmt) \ + do { \ + pid_t p = fork(); \ + if (p == 0) { \ + struct rlimit cores = { 0, 0 }; \ + setrlimit(RLIMIT_CORE, &cores); \ + stmt; \ + _exit(CRM_EX_NONE); \ + } else if (p > 0) { \ + int wstatus = 0; \ + if (waitpid(p, &wstatus, 0) == -1) { \ + fail_msg("waitpid failed"); \ + } \ + if (!WIFEXITED(wstatus)) { \ + fail_msg("statement terminated abnormally"); \ + } else if (WEXITSTATUS(wstatus) != rc) { \ + fail_msg("statement exited with %d, not expected %d", WEXITSTATUS(wstatus), rc); \ + } \ + } else { \ + fail_msg("unable to fork for assert test"); \ + } \ + } while (0); + /* Generate the main function of most unit test files. Typically, group_setup * and group_teardown will be NULL. The rest of the arguments are a list of * calls to cmocka_unit_test or cmocka_unit_test_setup_teardown to run the diff --git a/include/crm/common/util.h b/include/crm/common/util.h index 8acdff9..c75a55e 100644 --- a/include/crm/common/util.h +++ b/include/crm/common/util.h @@ -18,10 +18,8 @@ # include # include -# include - -# include # include +# include # include # include @@ -59,26 +57,6 @@ char *crm_strdup_printf(char const *format, ...) G_GNUC_PRINTF(1, 2); guint crm_parse_interval_spec(const char *input); -/* public operation functions (from operations.c) */ -gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, - guint *interval_ms); -gboolean decode_transition_key(const char *key, char **uuid, int *transition_id, - int *action_id, int *target_rc); -gboolean decode_transition_magic(const char *magic, char **uuid, - int *transition_id, int *action_id, - int *op_status, int *op_rc, int *target_rc); -int rsc_op_expected_rc(const lrmd_event_data_t *event); -gboolean did_rsc_op_fail(lrmd_event_data_t *event, int target_rc); -bool crm_op_needs_metadata(const char *rsc_class, const char *op); -xmlNode *crm_create_op_xml(xmlNode *parent, const char *prefix, - const char *task, const char *interval_spec, - const char *timeout); -#define CRM_DEFAULT_OP_TIMEOUT_S "20s" - -bool pcmk_is_probe(const char *task, guint interval); -bool pcmk_xe_is_probe(const xmlNode *xml_op); -bool pcmk_xe_mask_probe_failure(const xmlNode *xml_op); - int compare_version(const char *version1, const char *version2); /* coverity[+kill] */ diff --git a/include/crm/common/util_compat.h b/include/crm/common/util_compat.h index 9e02e12..7a60208 100644 --- a/include/crm/common/util_compat.h +++ b/include/crm/common/util_compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2021 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,6 +11,7 @@ # define PCMK__CRM_COMMON_UTIL_COMPAT__H # include +# include # include #ifdef __cplusplus @@ -29,6 +30,9 @@ extern "C" { //! \deprecated Use crm_parse_interval_spec() instead #define crm_get_interval crm_parse_interval_spec +//! \deprecated Do not use +#define CRM_DEFAULT_OP_TIMEOUT_S "20s" + //! \deprecated Use !pcmk_is_set() or !pcmk_all_flags_set() instead static inline gboolean is_not_set(long long word, long long bit) @@ -69,6 +73,9 @@ int pcmk_scan_nvpair(const char *input, char **name, char **value); char *pcmk_format_nvpair(const char *name, const char *value, const char *units); +//! \deprecated Use \c crm_xml_add() or \c xml_remove_prop() instead +const char *crm_xml_replace(xmlNode *node, const char *name, const char *value); + //! \deprecated Use a standard printf()-style function instead char *pcmk_format_named_time(const char *name, time_t epoch_time); diff --git a/include/crm/common/xml.h b/include/crm/common/xml.h index 682b31c..ac839d3 100644 --- a/include/crm/common/xml.h +++ b/include/crm/common/xml.h @@ -52,8 +52,6 @@ typedef const xmlChar *pcmkXmlStr; gboolean add_message_xml(xmlNode * msg, const char *field, xmlNode * xml); xmlNode *get_message_xml(const xmlNode *msg, const char *field); -xmlDoc *getDocPtr(xmlNode * node); - /* * \brief xmlCopyPropList ACLs-sensitive replacement expading i++ notation * @@ -132,12 +130,13 @@ xmlNode *stdin2xml(void); xmlNode *string2xml(const char *input); -int write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress); -int write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress); +int write_xml_fd(const xmlNode *xml, const char *filename, int fd, + gboolean compress); +int write_xml_file(const xmlNode *xml, const char *filename, gboolean compress); -char *dump_xml_formatted(xmlNode * msg); -char *dump_xml_formatted_with_text(xmlNode * msg); -char *dump_xml_unformatted(xmlNode * msg); +char *dump_xml_formatted(const xmlNode *xml); +char *dump_xml_formatted_with_text(const xmlNode *xml); +char *dump_xml_unformatted(const xmlNode *xml); /* * Diff related Functions @@ -169,26 +168,18 @@ int find_xml_children(xmlNode ** children, xmlNode * root, xmlNode *get_xpath_object(const char *xpath, xmlNode * xml_obj, int error_level); xmlNode *get_xpath_object_relative(const char *xpath, xmlNode * xml_obj, int error_level); -static inline const char * -crm_element_name(const xmlNode *xml) -{ - return xml? (const char *)(xml->name) : NULL; -} - static inline const char * crm_map_element_name(const xmlNode *xml) { - const char *name = crm_element_name(xml); - - if (strcmp(name, "master") == 0) { + if (xml == NULL) { + return NULL; + } else if (strcmp((const char *) xml->name, "master") == 0) { return "clone"; } else { - return name; + return (const char *) xml->name; } } -gboolean xml_has_children(const xmlNode * root); - char *calculate_on_disk_digest(xmlNode * local_cib); char *calculate_operation_digest(xmlNode * local_cib, const char *version); char *calculate_xml_versioned_digest(xmlNode * input, gboolean sort, gboolean do_filter, @@ -196,7 +187,7 @@ char *calculate_xml_versioned_digest(xmlNode * input, gboolean sort, gboolean do /* schema-related functions (from schemas.c) */ gboolean validate_xml(xmlNode * xml_blob, const char *validation, gboolean to_logs); -gboolean validate_xml_verbose(xmlNode * xml_blob); +gboolean validate_xml_verbose(const xmlNode *xml_blob); /*! * \brief Update CIB XML to most recent schema version @@ -258,7 +249,7 @@ xmlNode *first_named_child(const xmlNode *parent, const char *name); xmlNode *crm_next_same_xml(const xmlNode *sibling); xmlNode *sorted_xml(xmlNode * input, xmlNode * parent, gboolean recursive); -xmlXPathObjectPtr xpath_search(xmlNode * xml_top, const char *path); +xmlXPathObjectPtr xpath_search(const xmlNode *xml_top, const char *path); void crm_foreach_xpath_result(xmlNode *xml, const char *xpath, void (*helper)(xmlNode*, void*), void *user_data); xmlNode *expand_idref(xmlNode * input, xmlNode * top); @@ -289,7 +280,8 @@ int xml_apply_patchset(xmlNode *xml, xmlNode *patchset, bool check_version); void patchset_process_digest(xmlNode *patch, xmlNode *source, xmlNode *target, bool with_digest); -void save_xml_to_file(xmlNode * xml, const char *desc, const char *filename); +void save_xml_to_file(const xmlNode *xml, const char *desc, + const char *filename); char * crm_xml_escape(const char *text); void crm_xml_sanitize_id(char *id); diff --git a/include/crm/common/xml_compat.h b/include/crm/common/xml_compat.h index bb49b68..85e39ff 100644 --- a/include/crm/common/xml_compat.h +++ b/include/crm/common/xml_compat.h @@ -30,6 +30,9 @@ extern "C" { //! \deprecated Do not use (will be removed in a future release) #define XML_PARANOIA_CHECKS 0 +//! \deprecated This function will be removed in a future release +xmlDoc *getDocPtr(xmlNode *node); + //! \deprecated This function will be removed in a future release int add_node_nocopy(xmlNode * parent, const char *name, xmlNode * child); @@ -51,13 +54,23 @@ gboolean apply_xml_diff(xmlNode *old_xml, xmlNode *diff, xmlNode **new_xml); //! \deprecated Do not use (will be removed in a future release) void crm_destroy_xml(gpointer data); -//! \deprecated Use crm_xml_add() with "true" or "false" instead +//! \deprecated Check children member directly +gboolean xml_has_children(const xmlNode *root); + +//! \deprecated Use crm_xml_add() with "true" or "false" instead static inline const char * crm_xml_add_boolean(xmlNode *node, const char *name, gboolean value) { return crm_xml_add(node, name, (value? "true" : "false")); } +//! \deprecated Use name member directly +static inline const char * +crm_element_name(const xmlNode *xml) +{ + return (xml == NULL)? NULL : (const char *) xml->name; +} + #ifdef __cplusplus } #endif diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h index 43b3b8c..ddb4384 100644 --- a/include/crm/common/xml_internal.h +++ b/include/crm/common/xml_internal.h @@ -21,6 +21,7 @@ # include /* transitively imports qblog.h */ # include +# include /*! * \brief Base for directing lib{xml2,xslt} log into standard libqb backend @@ -135,9 +136,6 @@ enum pcmk__xml_fmt_options { //! Include indentation and newlines pcmk__xml_fmt_pretty = (1 << 1), - //! Include full XML subtree (with any text), using libxml serialization - pcmk__xml_fmt_full = (1 << 2), - //! Include the opening tag of an XML element, and include XML comments pcmk__xml_fmt_open = (1 << 3), @@ -147,7 +145,6 @@ enum pcmk__xml_fmt_options { //! Include the closing tag of an XML element pcmk__xml_fmt_close = (1 << 5), - // @COMPAT Remove when log_data_element() is removed //! Include XML text nodes pcmk__xml_fmt_text = (1 << 6), @@ -190,6 +187,16 @@ int pcmk__xml_show_changes(pcmk__output_t *out, const xmlNode *xml); #define PCMK__XP_REMOTE_NODE_STATUS \ "//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE \ "[@" XML_NODE_IS_REMOTE "='true']" +/*! + * \internal + * \brief Serialize XML (using libxml) into provided descriptor + * + * \param[in] fd File descriptor to (piece-wise) write to + * \param[in] cur XML subtree to proceed + * + * \return a standard Pacemaker return code + */ +int pcmk__xml2fd(int fd, xmlNode *cur); enum pcmk__xml_artefact_ns { pcmk__xml_artefact_ns_legacy_rng = 1, @@ -233,6 +240,22 @@ pcmk__xml_artefact_root(enum pcmk__xml_artefact_ns ns); char *pcmk__xml_artefact_path(enum pcmk__xml_artefact_ns ns, const char *filespec); +/*! + * \internal + * \brief Check whether an XML element is of a particular type + * + * \param[in] xml XML element to compare + * \param[in] name XML element name to compare + * + * \return \c true if \p xml is of type \p name, otherwise \c false + */ +static inline bool +pcmk__xe_is(const xmlNode *xml, const char *name) +{ + return (xml != NULL) && (xml->name != NULL) && (name != NULL) + && (strcmp((const char *) xml->name, name) == 0); +} + /*! * \internal * \brief Return first non-text child node of an XML node @@ -411,4 +434,15 @@ pcmk__xe_foreach_child(xmlNode *xml, const char *child_element_name, int (*handler)(xmlNode *xml, void *userdata), void *userdata); +static inline const char * +pcmk__xml_attr_value(const xmlAttr *attr) +{ + return ((attr == NULL) || (attr->children == NULL))? NULL + : (const char *) attr->children->content; +} + +gboolean pcmk__validate_xml(xmlNode *xml_blob, const char *validation, + xmlRelaxNGValidityErrorFunc error_handler, + void *error_handler_context); + #endif // PCMK__XML_INTERNAL__H diff --git a/include/crm/compatibility.h b/include/crm/compatibility.h index 1281a3c..f8502cc 100644 --- a/include/crm/compatibility.h +++ b/include/crm/compatibility.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2021 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -196,41 +196,41 @@ extern "C" { /* Clone terminology definitions */ // These can no longer be used in a switch together -#define pe_master pe_clone +#define pe_master pcmk_rsc_variant_clone static inline enum pe_obj_types get_resource_type(const char *name) { if (safe_str_eq(name, XML_CIB_TAG_RESOURCE)) { - return pe_native; + return pcmk_rsc_variant_primitive; } else if (safe_str_eq(name, XML_CIB_TAG_GROUP)) { - return pe_group; + return pcmk_rsc_variant_group; } else if (safe_str_eq(name, XML_CIB_TAG_INCARNATION) || safe_str_eq(name, PCMK_XE_PROMOTABLE_LEGACY)) { - return pe_clone; + return pcmk_rsc_variant_clone; } else if (safe_str_eq(name, XML_CIB_TAG_CONTAINER)) { - return pe_container; + return pcmk_rsc_variant_bundle; } - return pe_unknown; + return pcmk_rsc_variant_unknown; } static inline const char * get_resource_typename(enum pe_obj_types type) { switch (type) { - case pe_native: + case pcmk_rsc_variant_primitive: return XML_CIB_TAG_RESOURCE; - case pe_group: + case pcmk_rsc_variant_group: return XML_CIB_TAG_GROUP; - case pe_clone: + case pcmk_rsc_variant_clone: return XML_CIB_TAG_INCARNATION; - case pe_container: + case pcmk_rsc_variant_bundle: return XML_CIB_TAG_CONTAINER; - case pe_unknown: + case pcmk_rsc_variant_unknown: return "unknown"; } return ""; diff --git a/include/crm/crm.h b/include/crm/crm.h index e824825..aecfcc8 100644 --- a/include/crm/crm.h +++ b/include/crm/crm.h @@ -65,8 +65,9 @@ extern "C" { * XML v2 patchsets are created by default * >=3.0.13: Fail counts include operation name and interval * >=3.2.0: DC supports PCMK_EXEC_INVALID and PCMK_EXEC_NOT_CONNECTED + * >=3.19.0: DC supports PCMK__CIB_REQUEST_COMMIT_TRANSACT */ -# define CRM_FEATURE_SET "3.17.4" +# define CRM_FEATURE_SET "3.19.0" /* Pacemaker's CPG protocols use fixed-width binary fields for the sender and * recipient of a CPG message. This imposes an arbitrary limit on cluster node @@ -79,8 +80,6 @@ extern "C" { extern char *crm_system_name; -/* *INDENT-OFF* */ - // How we represent "infinite" scores # define CRM_SCORE_INFINITY 1000000 # define CRM_INFINITY_S "INFINITY" @@ -110,6 +109,7 @@ extern char *crm_system_name; # define CRM_SYSTEM_MCP "pacemakerd" // Names of internally generated node attributes +// @TODO Replace these with PCMK_NODE_ATTR_* # define CRM_ATTR_UNAME "#uname" # define CRM_ATTR_ID "#id" # define CRM_ATTR_KIND "#kind" @@ -140,22 +140,19 @@ extern char *crm_system_name; # define CRM_OP_QUIT "quit" # define CRM_OP_LOCAL_SHUTDOWN "start_shutdown" # define CRM_OP_SHUTDOWN_REQ "req_shutdown" -# define CRM_OP_SHUTDOWN "do_shutdown" -# define CRM_OP_FENCE "stonith" +# define CRM_OP_SHUTDOWN PCMK_ACTION_DO_SHUTDOWN # define CRM_OP_REGISTER "register" # define CRM_OP_IPC_FWD "ipc_fwd" # define CRM_OP_INVOKE_LRM "lrm_invoke" # define CRM_OP_LRM_REFRESH "lrm_refresh" //!< Deprecated since 1.1.10 -# define CRM_OP_LRM_DELETE "lrm_delete" +# define CRM_OP_LRM_DELETE PCMK_ACTION_LRM_DELETE # define CRM_OP_LRM_FAIL "lrm_fail" # define CRM_OP_PROBED "probe_complete" # define CRM_OP_REPROBE "probe_again" -# define CRM_OP_CLEAR_FAILCOUNT "clear_failcount" +# define CRM_OP_CLEAR_FAILCOUNT PCMK_ACTION_CLEAR_FAILCOUNT # define CRM_OP_REMOTE_STATE "remote_state" -# define CRM_OP_RELAXED_SET "one-or-more" -# define CRM_OP_RELAXED_CLONE "clone-one-or-more" # define CRM_OP_RM_NODE_CACHE "rm_node_cache" -# define CRM_OP_MAINTENANCE_NODES "maintenance_nodes" +# define CRM_OP_MAINTENANCE_NODES PCMK_ACTION_MAINTENANCE_NODES /* Possible cluster membership states */ # define CRMD_JOINSTATE_DOWN "down" @@ -163,70 +160,11 @@ extern char *crm_system_name; # define CRMD_JOINSTATE_MEMBER "member" # define CRMD_JOINSTATE_NACK "banned" -# define CRMD_ACTION_DELETE "delete" -# define CRMD_ACTION_CANCEL "cancel" - -# define CRMD_ACTION_RELOAD "reload" -# define CRMD_ACTION_RELOAD_AGENT "reload-agent" -# define CRMD_ACTION_MIGRATE "migrate_to" -# define CRMD_ACTION_MIGRATED "migrate_from" - -# define CRMD_ACTION_START "start" -# define CRMD_ACTION_STARTED "running" - -# define CRMD_ACTION_STOP "stop" -# define CRMD_ACTION_STOPPED "stopped" - -# define CRMD_ACTION_PROMOTE "promote" -# define CRMD_ACTION_PROMOTED "promoted" -# define CRMD_ACTION_DEMOTE "demote" -# define CRMD_ACTION_DEMOTED "demoted" - -# define CRMD_ACTION_NOTIFY "notify" -# define CRMD_ACTION_NOTIFIED "notified" - -# define CRMD_ACTION_STATUS "monitor" -# define CRMD_ACTION_METADATA "meta-data" -# define CRMD_METADATA_CALL_TIMEOUT 30000 - -/* short names */ -# define RSC_DELETE CRMD_ACTION_DELETE -# define RSC_CANCEL CRMD_ACTION_CANCEL - -# define RSC_MIGRATE CRMD_ACTION_MIGRATE -# define RSC_MIGRATED CRMD_ACTION_MIGRATED - -# define RSC_START CRMD_ACTION_START -# define RSC_STARTED CRMD_ACTION_STARTED - -# define RSC_STOP CRMD_ACTION_STOP -# define RSC_STOPPED CRMD_ACTION_STOPPED - -# define RSC_PROMOTE CRMD_ACTION_PROMOTE -# define RSC_PROMOTED CRMD_ACTION_PROMOTED -# define RSC_DEMOTE CRMD_ACTION_DEMOTE -# define RSC_DEMOTED CRMD_ACTION_DEMOTED - -# define RSC_NOTIFY CRMD_ACTION_NOTIFY -# define RSC_NOTIFIED CRMD_ACTION_NOTIFIED - -# define RSC_STATUS CRMD_ACTION_STATUS -# define RSC_METADATA CRMD_ACTION_METADATA -/* *INDENT-ON* */ - +# include # include # include # include -static inline const char * -crm_action_str(const char *task, guint interval_ms) { - if ((task != NULL) && (interval_ms == 0) - && (strcasecmp(task, RSC_STATUS) == 0)) { - return "probe"; - } - return task; -} - #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) #include #endif diff --git a/include/crm/crm_compat.h b/include/crm/crm_compat.h index 2c0a3dd..bfe1098 100644 --- a/include/crm/crm_compat.h +++ b/include/crm/crm_compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,8 +10,11 @@ #ifndef PCMK__CRM_CRM_COMPAT__H # define PCMK__CRM_CRM_COMPAT__H +#include #include +#include + #ifdef __cplusplus extern "C" { #endif @@ -31,12 +34,126 @@ extern "C" { //! \deprecated This defined constant will be removed in a future release #define MAX_IPC_DELAY 120 +//! \deprecated Use PCMK_ACTION_STONITH instead +#define CRM_OP_FENCE PCMK_ACTION_STONITH + //! \deprecated This defined constant will be removed in a future release #define CRM_OP_LRM_QUERY "lrm_query" +//! \deprecated Use PCMK_ACTION_CLONE_ONE_OR_MORE instead +#define CRM_OP_RELAXED_CLONE PCMK_ACTION_CLONE_ONE_OR_MORE + +//! \deprecated Use PCMK_ACTION_ONE_OR_MORE instead +#define CRM_OP_RELAXED_SET PCMK_ACTION_ONE_OR_MORE + //! \deprecated This defined constant will be removed in a future release #define CRM_ATTR_RA_VERSION "#ra-version" +//! \deprecated Use PCMK_ACTION_CANCEL instead +#define CRMD_ACTION_CANCEL PCMK_ACTION_CANCEL + +//! \deprecated Use PCMK_ACTION_DELETE instead +#define CRMD_ACTION_DELETE PCMK_ACTION_DELETE + +//! \deprecated Use PCMK_ACTION_DEMOTE instead +#define CRMD_ACTION_DEMOTE PCMK_ACTION_DEMOTE + +//! \deprecated Use PCMK_ACTION_DEMOTED instead +#define CRMD_ACTION_DEMOTED PCMK_ACTION_DEMOTED + +//! \deprecated Use PCMK_ACTION_META_DATA instead +#define CRMD_ACTION_METADATA PCMK_ACTION_META_DATA + +//! \deprecated Use PCMK_ACTION_MIGRATE_TO instead +#define CRMD_ACTION_MIGRATE PCMK_ACTION_MIGRATE_TO + +//! \deprecated Use PCMK_ACTION_MIGRATE_FROM instead +#define CRMD_ACTION_MIGRATED PCMK_ACTION_MIGRATE_FROM + +//! \deprecated Use PCMK_ACTION_NOTIFIED instead +#define CRMD_ACTION_NOTIFIED PCMK_ACTION_NOTIFIED + +//! \deprecated Use PCMK_ACTION_NOTIFY instead +#define CRMD_ACTION_NOTIFY PCMK_ACTION_NOTIFY + +//! \deprecated Use PCMK_ACTION_PROMOTE instead +#define CRMD_ACTION_PROMOTE PCMK_ACTION_PROMOTE + +//! \deprecated Use PCMK_ACTION_PROMOTED instead +#define CRMD_ACTION_PROMOTED PCMK_ACTION_PROMOTED + +//! \deprecated Use PCMK_ACTION_RELOAD instead +#define CRMD_ACTION_RELOAD PCMK_ACTION_RELOAD + +//! \deprecated Use PCMK_ACTION_RELOAD_AGENT instead +#define CRMD_ACTION_RELOAD_AGENT PCMK_ACTION_RELOAD_AGENT + +//! \deprecated Use PCMK_ACTION_START instead +#define CRMD_ACTION_START PCMK_ACTION_START + +//! \deprecated Use PCMK_ACTION_RUNNING instead +#define CRMD_ACTION_STARTED PCMK_ACTION_RUNNING + +//! \deprecated Use PCMK_ACTION_MONITOR instead +#define CRMD_ACTION_STATUS PCMK_ACTION_MONITOR + +//! \deprecated Use PCMK_ACTION_STOP instead +#define CRMD_ACTION_STOP PCMK_ACTION_STOP + +//! \deprecated Use PCMK_ACTION_STOPPED instead +#define CRMD_ACTION_STOPPED PCMK_ACTION_STOPPED + +//! \deprecated Do not use +#define CRMD_METADATA_CALL_TIMEOUT PCMK_DEFAULT_METADATA_TIMEOUT_MS + +//! \deprecated Use PCMK_ACTION_CANCEL instead +#define RSC_CANCEL PCMK_ACTION_CANCEL + +//! \deprecated Use PCMK_ACTION_DELETE instead +#define RSC_DELETE PCMK_ACTION_DELETE + +//! \deprecated Use PCMK_ACTION_DEMOTE instead +#define RSC_DEMOTE PCMK_ACTION_DEMOTE + +//! \deprecated Use PCMK_ACTION_DEMOTED instead +#define RSC_DEMOTED PCMK_ACTION_DEMOTED + +//! \deprecated Use PCMK_ACTION_META_DATA instead +#define RSC_METADATA PCMK_ACTION_META_DATA + +//! \deprecated Use PCMK_ACTION_MIGRATE_TO instead +#define RSC_MIGRATE PCMK_ACTION_MIGRATE_TO + +//! \deprecated Use PCMK_ACTION_MIGRATE_FROM instead +#define RSC_MIGRATED PCMK_ACTION_MIGRATE_FROM + +//! \deprecated Use PCMK_ACTION_NOTIFIED instead +#define RSC_NOTIFIED PCMK_ACTION_NOTIFIED + +//! \deprecated Use PCMK_ACTION_NOTIFY instead +#define RSC_NOTIFY PCMK_ACTION_NOTIFY + +//! \deprecated Use PCMK_ACTION_PROMOTE instead +#define RSC_PROMOTE PCMK_ACTION_PROMOTE + +//! \deprecated Use PCMK_ACTION_PROMOTED instead +#define RSC_PROMOTED PCMK_ACTION_PROMOTED + +//! \deprecated Use PCMK_ACTION_START instead +#define RSC_START PCMK_ACTION_START + +//! \deprecated Use PCMK_ACTION_RUNNING instead +#define RSC_STARTED PCMK_ACTION_RUNNING + +//! \deprecated Use PCMK_ACTION_MONITOR instead +#define RSC_STATUS PCMK_ACTION_MONITOR + +//! \deprecated Use PCMK_ACTION_STOP instead +#define RSC_STOP PCMK_ACTION_STOP + +//! \deprecated Use PCMK_ACTION_STOPPED instead +#define RSC_STOPPED PCMK_ACTION_STOPPED + //!@{ //! \deprecated This macro will be removed in a future release @@ -54,6 +171,16 @@ extern "C" { //! \deprecated Use GList * instead typedef GList *GListPtr; +//! \deprecated Do not use +static inline const char * +crm_action_str(const char *task, guint interval_ms) { + if ((task != NULL) && (interval_ms == 0) + && (strcasecmp(task, PCMK_ACTION_MONITOR) == 0)) { + return "probe"; + } + return task; +} + #ifdef __cplusplus } #endif diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h index dfc2f25..0c5a40b 100644 --- a/include/crm/lrmd.h +++ b/include/crm/lrmd.h @@ -13,6 +13,7 @@ #include // bool #include // guint, GList #include +#include #include #ifdef __cplusplus @@ -203,74 +204,6 @@ enum lrmd_call_options { lrmd_opt_notify_changes_only = (1 << 2), }; -enum lrmd_callback_event { - lrmd_event_register, - lrmd_event_unregister, - lrmd_event_exec_complete, - lrmd_event_disconnect, - lrmd_event_connect, - lrmd_event_poke, - lrmd_event_new_client, -}; - -typedef struct lrmd_event_data_s { - /*! Type of event, register, unregister, call_completed... */ - enum lrmd_callback_event type; - - /*! The resource this event occurred on. */ - const char *rsc_id; - /*! The action performed, start, stop, monitor... */ - const char *op_type; - /*! The user data passed by caller of exec() API function */ - const char *user_data; - - /*! The client api call id associated with this event */ - int call_id; - /*! The operation's timeout period in ms. */ - int timeout; - /*! The operation's recurring interval in ms. */ - guint interval_ms; - /*! The operation's start delay value in ms. */ - int start_delay; - /*! This operation that just completed is on a deleted rsc. */ - int rsc_deleted; - - /*! The executed ra return code mapped to OCF */ - enum ocf_exitcode rc; - /*! The executor status returned for exec_complete events */ - int op_status; - /*! stdout from resource agent operation */ - const char *output; - /*! Timestamp of when op ran */ - unsigned int t_run; - /*! Timestamp of last rc change */ - unsigned int t_rcchange; - /*! Time in length op took to execute */ - unsigned int exec_time; - /*! Time in length spent in queue */ - unsigned int queue_time; - - /*! int connection result. Used for connection and poke events */ - int connection_rc; - - /* This is a GHashTable containing the - * parameters given to the operation */ - void *params; - - /*! client node name associated with this connection - * (used to match actions to the proper client when there are multiple) - */ - const char *remote_nodename; - - /*! exit failure reason string from resource agent operation */ - const char *exit_reason; -} lrmd_event_data_t; - -lrmd_event_data_t *lrmd_new_event(const char *rsc_id, const char *task, - guint interval_ms); -lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t * event); -void lrmd_free_event(lrmd_event_data_t * event); - typedef struct lrmd_rsc_info_s { char *id; char *type; diff --git a/include/crm/lrmd_events.h b/include/crm/lrmd_events.h new file mode 100644 index 0000000..3a1c500 --- /dev/null +++ b/include/crm/lrmd_events.h @@ -0,0 +1,108 @@ +/* + * Copyright 2012-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef PCMK__CRM_LRMD_EVENTS__H +# define PCMK__CRM_LRMD_EVENTS__H + +#include // guint +#include // enum ocf_exitcode + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \file + * \brief Resource agent executor events + * \ingroup lrmd + */ + +enum lrmd_callback_event { + lrmd_event_register, + lrmd_event_unregister, + lrmd_event_exec_complete, + lrmd_event_disconnect, + lrmd_event_connect, + lrmd_event_poke, + lrmd_event_new_client, +}; + +typedef struct lrmd_event_data_s { + /*! Type of event, register, unregister, call_completed... */ + enum lrmd_callback_event type; + + /*! The resource this event occurred on. */ + const char *rsc_id; + /*! The action performed, start, stop, monitor... */ + const char *op_type; + /*! The user data passed by caller of exec() API function */ + const char *user_data; + + /*! The client api call id associated with this event */ + int call_id; + + /*! The operation's timeout period in ms. */ + int timeout; + + /*! The operation's recurring interval in ms. */ + guint interval_ms; + + /*! The operation's start delay value in ms. */ + int start_delay; + + /*! This operation that just completed is on a deleted rsc. */ + int rsc_deleted; + + /*! The executed ra return code mapped to OCF */ + enum ocf_exitcode rc; + + /*! The executor status returned for exec_complete events */ + int op_status; + + /*! stdout from resource agent operation */ + const char *output; + + /*! Timestamp of when op ran */ + unsigned int t_run; + + /*! Timestamp of last rc change */ + unsigned int t_rcchange; + + /*! Time in length op took to execute */ + unsigned int exec_time; + + /*! Time in length spent in queue */ + unsigned int queue_time; + + /*! int connection result. Used for connection and poke events */ + int connection_rc; + + /* This is a GHashTable containing the + * parameters given to the operation */ + void *params; + + /*! client node name associated with this connection + * (used to match actions to the proper client when there are multiple) + */ + const char *remote_nodename; + + /*! exit failure reason string from resource agent operation */ + const char *exit_reason; +} lrmd_event_data_t; + +lrmd_event_data_t *lrmd_new_event(const char *rsc_id, const char *task, + guint interval_ms); +lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t *event); +void lrmd_free_event(lrmd_event_data_t *event); + +#ifdef __cplusplus +} +#endif + +#endif // PCMK__CRM_LRMD_EVENTS__H diff --git a/include/crm/lrmd_internal.h b/include/crm/lrmd_internal.h index 5810554..d1cd25d 100644 --- a/include/crm/lrmd_internal.h +++ b/include/crm/lrmd_internal.h @@ -47,6 +47,7 @@ void lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc, void lrmd__reset_result(lrmd_event_data_t *event); time_t lrmd__uptime(lrmd_t *lrmd); +const char *lrmd__node_start_state(lrmd_t *lrmd); /* Shared functions for IPC proxy back end */ diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h index 2e50adb..c616182 100644 --- a/include/crm/msg_xml.h +++ b/include/crm/msg_xml.h @@ -48,6 +48,8 @@ extern "C" { * XML attributes */ +#define PCMK_XA_FORMAT "format" + /* These have been deprecated as CIB element attributes (aliases for * "promoted-max" and "promoted-node-max") since 2.0.0. */ @@ -59,7 +61,14 @@ extern "C" { * Meta attributes */ +#define PCMK_META_CLONE_MAX "clone-max" +#define PCMK_META_CLONE_MIN "clone-min" +#define PCMK_META_CLONE_NODE_MAX "clone-node-max" #define PCMK_META_ENABLED "enabled" +#define PCMK_META_FAILURE_TIMEOUT "failure-timeout" +#define PCMK_META_MIGRATION_THRESHOLD "migration-threshold" +#define PCMK_META_PROMOTED_MAX "promoted-max" +#define PCMK_META_PROMOTED_NODE_MAX "promoted-node-max" /* @@ -149,7 +158,6 @@ extern "C" { # define XML_ATTR_IDREF "id-ref" # define XML_ATTR_ID_LONG "long-id" # define XML_ATTR_TYPE "type" -# define XML_ATTR_VERBOSE "verbose" # define XML_ATTR_OP "op" # define XML_ATTR_DC_UUID "dc-uuid" # define XML_ATTR_UPDATE_ORIG "update-origin" @@ -183,8 +191,6 @@ extern "C" { # define XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE "shutdown_complete" # define XML_PING_ATTR_PACEMAKERDSTATE_REMOTE "remote" -# define XML_TAG_FRAGMENT "cib_fragment" - # define XML_FAIL_TAG_CIB "failed_update" # define XML_FAILCIB_ATTR_ID "id" @@ -198,7 +204,6 @@ extern "C" { # define XML_CIB_TAG_STATUS "status" # define XML_CIB_TAG_RESOURCES "resources" # define XML_CIB_TAG_NODES "nodes" -# define XML_CIB_TAG_DOMAINS "domains" # define XML_CIB_TAG_CONSTRAINTS "constraints" # define XML_CIB_TAG_CRMCONFIG "crm_config" # define XML_CIB_TAG_OPCONFIG "op_defaults" @@ -239,19 +244,12 @@ extern "C" { # define XML_RSC_ATTR_ORDERED "ordered" # define XML_RSC_ATTR_INTERLEAVE "interleave" # define XML_RSC_ATTR_INCARNATION "clone" -# define XML_RSC_ATTR_INCARNATION_MAX "clone-max" -# define XML_RSC_ATTR_INCARNATION_MIN "clone-min" -# define XML_RSC_ATTR_INCARNATION_NODEMAX "clone-node-max" # define XML_RSC_ATTR_PROMOTABLE "promotable" -# define XML_RSC_ATTR_PROMOTED_MAX "promoted-max" -# define XML_RSC_ATTR_PROMOTED_NODEMAX "promoted-node-max" # define XML_RSC_ATTR_MANAGED "is-managed" # define XML_RSC_ATTR_TARGET_ROLE "target-role" # define XML_RSC_ATTR_UNIQUE "globally-unique" # define XML_RSC_ATTR_NOTIFY "notify" # define XML_RSC_ATTR_STICKINESS "resource-stickiness" -# define XML_RSC_ATTR_FAIL_STICKINESS "migration-threshold" -# define XML_RSC_ATTR_FAIL_TIMEOUT "failure-timeout" # define XML_RSC_ATTR_MULTIPLE "multiple-active" # define XML_RSC_ATTR_REQUIRES "requires" # define XML_RSC_ATTR_CONTAINER "container" @@ -285,15 +283,8 @@ extern "C" { //! \deprecated Do not use (will be removed in a future release) # define XML_CIB_ATTR_REPLACE "replace" -# define XML_CIB_ATTR_SOURCE "source" - # define XML_CIB_ATTR_PRIORITY "priority" -# define XML_CIB_ATTR_SOURCE "source" -# define XML_NODE_JOIN_STATE "join" -# define XML_NODE_EXPECTED "expected" -# define XML_NODE_IN_CLUSTER "in_ccm" -# define XML_NODE_IS_PEER "crmd" # define XML_NODE_IS_REMOTE "remote_node" # define XML_NODE_IS_FENCED "node_fenced" # define XML_NODE_IS_MAINTENANCE "node_in_maintenance" @@ -333,7 +324,6 @@ extern "C" { # define XML_LRM_ATTR_EXIT_REASON "exit-reason" # define XML_RSC_OP_LAST_CHANGE "last-rc-change" -# define XML_RSC_OP_LAST_RUN "last-run" // deprecated since 2.0.3 # define XML_RSC_OP_T_EXEC "exec-time" # define XML_RSC_OP_T_QUEUE "queue-time" @@ -413,6 +403,7 @@ extern "C" { # define XML_CONFIG_ATTR_SHUTDOWN_LOCK "shutdown-lock" # define XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT "shutdown-lock-limit" # define XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY "priority-fencing-delay" +# define XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT "node-pending-timeout" # define XML_ALERT_ATTR_PATH "path" # define XML_ALERT_ATTR_TIMEOUT "timeout" @@ -428,7 +419,10 @@ extern "C" { # define XML_ATTR_TE_TARGET_RC "op_target_rc" # define XML_TAG_TRANSIENT_NODEATTRS "transient_attributes" +//! \deprecated Do not use (will be removed in a future release) # define XML_TAG_DIFF_ADDED "diff-added" + +//! \deprecated Do not use (will be removed in a future release) # define XML_TAG_DIFF_REMOVED "diff-removed" # define XML_ACL_TAG_USER "acl_target" @@ -478,7 +472,6 @@ extern "C" { # define XML_DIFF_POSITION "position" # define ID(x) crm_element_value(x, XML_ATTR_ID) -# define TYPE(x) crm_element_name(x) #ifdef __cplusplus } diff --git a/include/crm/msg_xml_compat.h b/include/crm/msg_xml_compat.h index aad98e8..612eebf 100644 --- a/include/crm/msg_xml_compat.h +++ b/include/crm/msg_xml_compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -25,6 +25,21 @@ extern "C" { * release. */ +//! \deprecated Use PCMK_META_CLONE_MAX instead +#define XML_RSC_ATTR_INCARNATION_MAX PCMK_META_CLONE_MAX + +//! \deprecated Use PCMK_META_CLONE_MIN instead +#define XML_RSC_ATTR_INCARNATION_MIN PCMK_META_CLONE_MIN + +//! \deprecated Use PCMK_META_CLONE_NODE_MAX instead +#define XML_RSC_ATTR_INCARNATION_NODEMAX PCMK_META_CLONE_NODE_MAX + +//! \deprecated Use PCMK_META_PROMOTED_MAX instead +#define XML_RSC_ATTR_PROMOTED_MAX PCMK_META_PROMOTED_MAX + +//! \deprecated Use PCMK_META_PROMOTED_NODE_MAX instead +#define XML_RSC_ATTR_PROMOTED_NODEMAX PCMK_META_PROMOTED_NODE_MAX + //! \deprecated Use PCMK_STONITH_PROVIDES instead #define XML_RSC_ATTR_PROVIDES PCMK_STONITH_PROVIDES @@ -40,12 +55,21 @@ extern "C" { //! \deprecated Use PCMK_XA_PROMOTED_NODE_MAX_LEGACY instead #define PCMK_XE_PROMOTED_NODE_MAX_LEGACY PCMK_XA_PROMOTED_NODE_MAX_LEGACY +//! \deprecated Use PCMK_META_MIGRATION_THRESHOLD instead +#define XML_RSC_ATTR_FAIL_STICKINESS PCMK_META_MIGRATION_THRESHOLD + +//! \deprecated Use PCMK_META_FAILURE_TIMEOUT instead +#define XML_RSC_ATTR_FAIL_TIMEOUT PCMK_META_FAILURE_TIMEOUT + //! \deprecated Use PCMK_XA_PROMOTED_NODE_MAX_LEGACY instead #define XML_RSC_ATTR_MASTER_NODEMAX PCMK_XA_PROMOTED_NODE_MAX_LEGACY //! \deprecated Do not use (will be removed in a future release) #define XML_ATTR_RA_VERSION "ra-version" +//! \deprecated Do not use (will be removed in a future release) +#define XML_TAG_FRAGMENT "cib_fragment" + //! \deprecated Do not use (will be removed in a future release) #define XML_TAG_RSC_VER_ATTRS "rsc_versioned_attrs" @@ -58,6 +82,33 @@ extern "C" { //! \deprecated Use \p XML_ATTR_ID instead #define XML_ATTR_UUID "id" +//! \deprecated Do not use (will be removed in a future release) +#define XML_ATTR_VERBOSE "verbose" + +//! \deprecated Do not use (will be removed in a future release) +#define XML_CIB_TAG_DOMAINS "domains" + +//! \deprecated Do not use (will be removed in a future release) +#define XML_CIB_ATTR_SOURCE "source" + +//! \deprecated Do not use +#define XML_NODE_EXPECTED "expected" + +//! \deprecated Do not use +#define XML_NODE_IN_CLUSTER "in_ccm" + +//! \deprecated Do not use +#define XML_NODE_IS_PEER "crmd" + +//! \deprecated Do not use +#define XML_NODE_JOIN_STATE "join" + +//! \deprecated Do not use (will be removed in a future release) +#define XML_RSC_OP_LAST_RUN "last-run" + +//! \deprecated Use name member directly +#define TYPE(x) (((x) == NULL)? NULL : (const char *) ((x)->name)) + #ifdef __cplusplus } #endif diff --git a/include/crm/pengine/Makefile.am b/include/crm/pengine/Makefile.am index fac6031..3560d24 100644 --- a/include/crm/pengine/Makefile.am +++ b/include/crm/pengine/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2006-2021 the Pacemaker project contributors +# Copyright 2006-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -10,8 +10,13 @@ MAINTAINERCLEANFILES = Makefile.in headerdir=$(pkgincludedir)/crm/pengine -noinst_HEADERS = internal.h remote_internal.h rules_internal.h -header_HEADERS = common.h complex.h pe_types.h rules.h status.h \ +noinst_HEADERS = internal.h \ + $(wildcard *_internal.h) +header_HEADERS = common.h \ + complex.h \ + pe_types.h \ + rules.h \ + status.h \ common_compat.h \ pe_types_compat.h \ rules_compat.h diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h index 9fe05bd..2feac8a 100644 --- a/include/crm/pengine/common.h +++ b/include/crm/pengine/common.h @@ -13,6 +13,7 @@ # include # include # include +# include #ifdef __cplusplus extern "C" { @@ -21,120 +22,6 @@ extern "C" { extern gboolean was_processing_error; extern gboolean was_processing_warning; -/* The order is (partially) significant here; the values from action_fail_ignore - * through action_fail_fence are in order of increasing severity. - * - * @COMPAT The values should be ordered and numbered per the "TODO" comments - * below, so all values are in order of severity and there is room for - * future additions, but that would break API compatibility. - * @TODO For now, we just use a function to compare the values specially, but - * at the next compatibility break, we should arrange things properly. - */ -enum action_fail_response { - action_fail_ignore, // @TODO = 10 - // @TODO action_fail_demote = 20, - action_fail_recover, // @TODO = 30 - // @TODO action_fail_reset_remote = 40, - // @TODO action_fail_restart_container = 50, - action_fail_migrate, // @TODO = 60 - action_fail_block, // @TODO = 70 - action_fail_stop, // @TODO = 80 - action_fail_standby, // @TODO = 90 - action_fail_fence, // @TODO = 100 - - // @COMPAT Values below here are out of order for API compatibility - - action_fail_restart_container, - - /* This is reserved for internal use for remote node connection resources. - * Fence the remote node if stonith is enabled, otherwise attempt to recover - * the connection resource. This allows us to specify types of connection - * resource failures that should result in fencing the remote node - * (for example, recurring monitor failures). - */ - action_fail_reset_remote, - - action_fail_demote, -}; - -/* the "done" action must be the "pre" action +1 */ -enum action_tasks { - no_action, - monitor_rsc, - stop_rsc, - stopped_rsc, - start_rsc, - started_rsc, - action_notify, - action_notified, - action_promote, - action_promoted, - action_demote, - action_demoted, - shutdown_crm, - stonith_node -}; - -enum rsc_recovery_type { - recovery_stop_start, - recovery_stop_only, - recovery_block, - recovery_stop_unexpected, -}; - -enum rsc_start_requirement { - rsc_req_nothing, /* Allowed by custom_action() */ - rsc_req_quorum, /* Enforced by custom_action() */ - rsc_req_stonith /* Enforced by native_start_constraints() */ -}; - -//! Possible roles that a resource can be in -enum rsc_role_e { - RSC_ROLE_UNKNOWN = 0, - RSC_ROLE_STOPPED = 1, - RSC_ROLE_STARTED = 2, - RSC_ROLE_UNPROMOTED = 3, - RSC_ROLE_PROMOTED = 4, - -#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) - //! \deprecated Use RSC_ROLE_UNPROMOTED instead - RSC_ROLE_SLAVE = RSC_ROLE_UNPROMOTED, - - //! \deprecated Use RSC_ROLE_PROMOTED instead - RSC_ROLE_MASTER = RSC_ROLE_PROMOTED, -#endif -}; - -# define RSC_ROLE_MAX (RSC_ROLE_PROMOTED + 1) - -# define RSC_ROLE_UNKNOWN_S "Unknown" -# define RSC_ROLE_STOPPED_S "Stopped" -# define RSC_ROLE_STARTED_S "Started" -# define RSC_ROLE_UNPROMOTED_S "Unpromoted" -# define RSC_ROLE_PROMOTED_S "Promoted" -# define RSC_ROLE_UNPROMOTED_LEGACY_S "Slave" -# define RSC_ROLE_PROMOTED_LEGACY_S "Master" - -//! Deprecated -enum pe_print_options { - pe_print_log = (1 << 0), - pe_print_html = (1 << 1), - pe_print_ncurses = (1 << 2), - pe_print_printf = (1 << 3), - pe_print_dev = (1 << 4), //! Ignored - pe_print_details = (1 << 5), //! Ignored - pe_print_max_details = (1 << 6), //! Ignored - pe_print_rsconly = (1 << 7), - pe_print_ops = (1 << 8), - pe_print_suppres_nl = (1 << 9), - pe_print_xml = (1 << 10), - pe_print_brief = (1 << 11), - pe_print_pending = (1 << 12), - pe_print_clone_details = (1 << 13), - pe_print_clone_active = (1 << 14), // Print clone instances only if active - pe_print_implicit = (1 << 15) // Print implicitly created resources -}; - const char *task2text(enum action_tasks task); enum action_tasks text2task(const char *task); enum rsc_role_e text2role(const char *role); @@ -154,13 +41,13 @@ static inline const char * recovery2text(enum rsc_recovery_type type) { switch (type) { - case recovery_stop_only: + case pcmk_multiply_active_stop: return "shutting it down"; - case recovery_stop_start: + case pcmk_multiply_active_restart: return "attempting recovery"; - case recovery_block: + case pcmk_multiply_active_block: return "waiting for an administrator"; - case recovery_stop_unexpected: + case pcmk_multiply_active_unexpected: return "stopping unexpected instances"; } return "Unknown"; diff --git a/include/crm/pengine/common_compat.h b/include/crm/pengine/common_compat.h index 773bb3d..4330ccf 100644 --- a/include/crm/pengine/common_compat.h +++ b/include/crm/pengine/common_compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2021 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,6 +10,8 @@ #ifndef PCMK__CRM_PENGINE_COMMON_COMPAT__H # define PCMK__CRM_PENGINE_COMMON_COMPAT__H +#include + #ifdef __cplusplus extern "C" { #endif @@ -23,12 +25,35 @@ extern "C" { * release. */ -//! \deprecated Use RSC_ROLE_UNPROMOTED_LEGACY_S instead -# define RSC_ROLE_SLAVE_S RSC_ROLE_UNPROMOTED_LEGACY_S +//! \deprecated Use (pcmk_role_promoted + 1) instead +#define RSC_ROLE_MAX (pcmk_role_promoted + 1) + +//! \deprecated Use role2text(pcmk_role_unknown) instead +#define RSC_ROLE_UNKNOWN_S role2text(pcmk_role_unknown) + +//! \deprecated Use role2text(pcmk_role_stopped) instead +#define RSC_ROLE_STOPPED_S role2text(pcmk_role_stopped) + +//! \deprecated Use role2text(pcmk_role_started) instead +#define RSC_ROLE_STARTED_S role2text(pcmk_role_started) + +//! \deprecated Use role2text(pcmk_role_unpromoted) instead +#define RSC_ROLE_UNPROMOTED_S role2text(pcmk_role_unpromoted) + +//! \deprecated Use role2text(pcmk_role_promoted) instead +#define RSC_ROLE_PROMOTED_S role2text(pcmk_role_promoted) + +//! \deprecated Do not use +#define RSC_ROLE_UNPROMOTED_LEGACY_S "Slave" + +//! \deprecated Do not use +#define RSC_ROLE_SLAVE_S RSC_ROLE_UNPROMOTED_LEGACY_S -//! \deprecated Use RSC_ROLE_PROMOTED_LEGACY_S instead -# define RSC_ROLE_MASTER_S RSC_ROLE_PROMOTED_LEGACY_S +//! \deprecated Do not use +#define RSC_ROLE_PROMOTED_LEGACY_S "Master" +//! \deprecated Do not use +#define RSC_ROLE_MASTER_S RSC_ROLE_PROMOTED_LEGACY_S #ifdef __cplusplus } diff --git a/include/crm/pengine/complex.h b/include/crm/pengine/complex.h index 929e4da..9b6ad1b 100644 --- a/include/crm/pengine/complex.h +++ b/include/crm/pengine/complex.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -12,23 +12,23 @@ #include // gboolean, GHashTable #include // xmlNode -#include // pe_node_t, pe_resource_t, etc. +#include // pcmk_node_t, pcmk_resource_t, etc. #ifdef __cplusplus extern "C" { #endif -extern resource_object_functions_t resource_class_functions[]; +extern pcmk_rsc_methods_t resource_class_functions[]; -GHashTable *pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node, - pe_working_set_t *data_set); -void get_meta_attributes(GHashTable * meta_hash, pe_resource_t *rsc, - pe_node_t *node, pe_working_set_t *data_set); -void get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc, - const pe_node_t *node, pe_working_set_t *data_set); +GHashTable *pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node, + pcmk_scheduler_t *scheduler); +void get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t *rsc, + pcmk_node_t *node, pcmk_scheduler_t *scheduler); +void get_rsc_attributes(GHashTable *meta_hash, const pcmk_resource_t *rsc, + const pcmk_node_t *node, pcmk_scheduler_t *scheduler); -gboolean is_parent(pe_resource_t *child, pe_resource_t *rsc); -pe_resource_t *uber_parent(pe_resource_t *rsc); +gboolean is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc); +pcmk_resource_t *uber_parent(pcmk_resource_t *rsc); #ifdef __cplusplus } diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 1b5f6f1..9c8068f 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -10,6 +10,7 @@ #ifndef PE_INTERNAL__H # define PE_INTERNAL__H +# include # include # include # include @@ -18,31 +19,17 @@ # include # include # include +# include -const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts); +const char *pe__resource_description(const pcmk_resource_t *rsc, + uint32_t show_opts); -enum pe__clone_flags { - // Whether instances should be started sequentially - pe__clone_ordered = (1 << 0), +bool pe__clone_is_ordered(const pcmk_resource_t *clone); +int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag); +bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags); - // Whether promotion scores have been added - pe__clone_promotion_added = (1 << 1), - - // Whether promotion constraints have been added - pe__clone_promotion_constrained = (1 << 2), -}; - -bool pe__clone_is_ordered(const pe_resource_t *clone); -int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag); - - -enum pe__group_flags { - pe__group_ordered = (1 << 0), // Members start sequentially - pe__group_colocated = (1 << 1), // Members must be on same node -}; - -bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags); -pe_resource_t *pe__last_group_member(const pe_resource_t *group); +bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags); +pcmk_resource_t *pe__last_group_member(const pcmk_resource_t *group); # define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "", fmt, ##args) @@ -62,16 +49,16 @@ pe_resource_t *pe__last_group_member(const pe_resource_t *group); # define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); } # define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); } -#define pe__set_working_set_flags(working_set, flags_to_set) do { \ - (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \ - LOG_TRACE, "Working set", crm_system_name, \ - (working_set)->flags, (flags_to_set), #flags_to_set); \ +#define pe__set_working_set_flags(scheduler, flags_to_set) do { \ + (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \ + LOG_TRACE, "Scheduler", crm_system_name, \ + (scheduler)->flags, (flags_to_set), #flags_to_set); \ } while (0) -#define pe__clear_working_set_flags(working_set, flags_to_clear) do { \ - (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ - LOG_TRACE, "Working set", crm_system_name, \ - (working_set)->flags, (flags_to_clear), #flags_to_clear); \ +#define pe__clear_working_set_flags(scheduler, flags_to_clear) do { \ + (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ + LOG_TRACE, "Scheduler", crm_system_name, \ + (scheduler)->flags, (flags_to_clear), #flags_to_clear); \ } while (0) #define pe__set_resource_flags(resource, flags_to_set) do { \ @@ -152,144 +139,127 @@ pe_resource_t *pe__last_group_member(const pe_resource_t *group); #flags_to_clear); \ } while (0) -// Some warnings we don't want to print every transition - -enum pe_warn_once_e { - pe_wo_blind = (1 << 0), - pe_wo_restart_type = (1 << 1), - pe_wo_role_after = (1 << 2), - pe_wo_poweroff = (1 << 3), - pe_wo_require_all = (1 << 4), - pe_wo_order_score = (1 << 5), - pe_wo_neg_threshold = (1 << 6), - pe_wo_remove_after = (1 << 7), - pe_wo_ping_node = (1 << 8), - pe_wo_order_inst = (1 << 9), - pe_wo_coloc_inst = (1 << 10), - pe_wo_group_order = (1 << 11), - pe_wo_group_coloc = (1 << 12), - pe_wo_upstart = (1 << 13), - pe_wo_nagios = (1 << 14), -}; - -extern uint32_t pe_wo; - #define pe_warn_once(pe_wo_bit, fmt...) do { \ - if (!pcmk_is_set(pe_wo, pe_wo_bit)) { \ - if (pe_wo_bit == pe_wo_blind) { \ + if (!pcmk_is_set(pcmk__warnings, pe_wo_bit)) { \ + if (pe_wo_bit == pcmk__wo_blind) { \ crm_warn(fmt); \ } else { \ pe_warn(fmt); \ } \ - pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ - "Warn-once", "logging", pe_wo, \ - (pe_wo_bit), #pe_wo_bit); \ - } \ + pcmk__warnings = pcmk__set_flags_as(__func__, __LINE__, \ + LOG_TRACE, \ + "Warn-once", "logging", \ + pcmk__warnings, \ + (pe_wo_bit), #pe_wo_bit); \ + } \ } while (0); typedef struct pe__location_constraint_s { char *id; // Constraint XML ID - pe_resource_t *rsc_lh; // Resource being located + pcmk_resource_t *rsc_lh; // Resource being located enum rsc_role_e role_filter; // Role to locate enum pe_discover_e discover_mode; // Resource discovery - GList *node_list_rh; // List of pe_node_t* + GList *node_list_rh; // List of pcmk_node_t* } pe__location_t; typedef struct pe__order_constraint_s { int id; - uint32_t flags; // Group of enum pe_ordering flags + uint32_t flags; // Group of enum pcmk__action_relation_flags void *lh_opaque; - pe_resource_t *lh_rsc; - pe_action_t *lh_action; + pcmk_resource_t *lh_rsc; + pcmk_action_t *lh_action; char *lh_action_task; void *rh_opaque; - pe_resource_t *rh_rsc; - pe_action_t *rh_action; + pcmk_resource_t *rh_rsc; + pcmk_action_t *rh_action; char *rh_action_task; } pe__ordering_t; -const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc, - bool include_bundle); +const pcmk_resource_t *pe__const_top_resource(const pcmk_resource_t *rsc, + bool include_bundle); -int pe__clone_max(const pe_resource_t *clone); -int pe__clone_node_max(const pe_resource_t *clone); -int pe__clone_promoted_max(const pe_resource_t *clone); -int pe__clone_promoted_node_max(const pe_resource_t *clone); -void pe__create_clone_notifications(pe_resource_t *clone); -void pe__free_clone_notification_data(pe_resource_t *clone); -void pe__create_clone_notif_pseudo_ops(pe_resource_t *clone, - pe_action_t *start, pe_action_t *started, - pe_action_t *stop, pe_action_t *stopped); +int pe__clone_max(const pcmk_resource_t *clone); +int pe__clone_node_max(const pcmk_resource_t *clone); +int pe__clone_promoted_max(const pcmk_resource_t *clone); +int pe__clone_promoted_node_max(const pcmk_resource_t *clone); +void pe__create_clone_notifications(pcmk_resource_t *clone); +void pe__free_clone_notification_data(pcmk_resource_t *clone); +void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone, + pcmk_action_t *start, + pcmk_action_t *started, + pcmk_action_t *stop, + pcmk_action_t *stopped); +pcmk_action_t *pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, + bool optional, bool runnable); -pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, - bool optional, bool runnable); +void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone, + bool any_promoting, bool any_demoting); -void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting, - bool any_demoting); - -bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node); +bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node); void add_hash_param(GHashTable * hash, const char *name, const char *value); -char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name, - pe_working_set_t * data_set); -pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current); +char *native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create, + const char *name, pcmk_scheduler_t *scheduler); +pcmk_node_t *native_location(const pcmk_resource_t *rsc, GList **list, + int current); void pe_metadata(pcmk__output_t *out); void verify_pe_options(GHashTable * options); -void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed); +void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node, + pcmk_scheduler_t *scheduler, gboolean failed); -gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set); -gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set); -gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set); -gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set); +gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); +gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); +gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); +gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); -pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node, - int flags); +pcmk_resource_t *native_find_rsc(pcmk_resource_t *rsc, const char *id, + const pcmk_node_t *node, int flags); -gboolean native_active(pe_resource_t * rsc, gboolean all); -gboolean group_active(pe_resource_t * rsc, gboolean all); -gboolean clone_active(pe_resource_t * rsc, gboolean all); -gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all); +gboolean native_active(pcmk_resource_t *rsc, gboolean all); +gboolean group_active(pcmk_resource_t *rsc, gboolean all); +gboolean clone_active(pcmk_resource_t *rsc, gboolean all); +gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all); //! \deprecated This function will be removed in a future release -void native_print(pe_resource_t *rsc, const char *pre_text, long options, +void native_print(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); //! \deprecated This function will be removed in a future release -void group_print(pe_resource_t *rsc, const char *pre_text, long options, +void group_print(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); //! \deprecated This function will be removed in a future release -void clone_print(pe_resource_t *rsc, const char *pre_text, long options, +void clone_print(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); //! \deprecated This function will be removed in a future release -void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options, +void pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); -gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name, - const pe_node_t *node, uint32_t show_opts, +gchar *pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name, + const pcmk_node_t *node, uint32_t show_opts, const char *target_role, bool show_nodes); int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name , size_t pairs_count, ...); -char *pe__node_display_name(pe_node_t *node, bool print_detail); +char *pe__node_display_name(pcmk_node_t *node, bool print_detail); // Clone notifications (pe_notif.c) -void pe__order_notifs_after_fencing(const pe_action_t *action, - pe_resource_t *rsc, - pe_action_t *stonith_op); +void pe__order_notifs_after_fencing(const pcmk_action_t *action, + pcmk_resource_t *rsc, + pcmk_action_t *stonith_op); static inline const char * -pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag) +pe__rsc_bool_str(const pcmk_resource_t *rsc, uint64_t rsc_flag) { return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag)); } @@ -308,167 +278,156 @@ int pe__resource_xml(pcmk__output_t *out, va_list args); int pe__resource_html(pcmk__output_t *out, va_list args); int pe__resource_text(pcmk__output_t *out, va_list args); -void native_free(pe_resource_t * rsc); -void group_free(pe_resource_t * rsc); -void clone_free(pe_resource_t * rsc); -void pe__free_bundle(pe_resource_t *rsc); - -enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current); -enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current); -enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current); -enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc, +void native_free(pcmk_resource_t *rsc); +void group_free(pcmk_resource_t *rsc); +void clone_free(pcmk_resource_t *rsc); +void pe__free_bundle(pcmk_resource_t *rsc); + +enum rsc_role_e native_resource_state(const pcmk_resource_t *rsc, + gboolean current); +enum rsc_role_e group_resource_state(const pcmk_resource_t *rsc, + gboolean current); +enum rsc_role_e clone_resource_state(const pcmk_resource_t *rsc, + gboolean current); +enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current); -void pe__count_common(pe_resource_t *rsc); -void pe__count_bundle(pe_resource_t *rsc); +void pe__count_common(pcmk_resource_t *rsc); +void pe__count_bundle(pcmk_resource_t *rsc); -void common_free(pe_resource_t * rsc); +void common_free(pcmk_resource_t *rsc); -pe_node_t *pe__copy_node(const pe_node_t *this_node); -extern time_t get_effective_time(pe_working_set_t * data_set); +pcmk_node_t *pe__copy_node(const pcmk_node_t *this_node); +time_t get_effective_time(pcmk_scheduler_t *scheduler); /* Failure handling utilities (from failcounts.c) */ -// bit flags for fail count handling options -enum pe_fc_flags_e { - pe_fc_default = (1 << 0), - pe_fc_effective = (1 << 1), // don't count expired failures - pe_fc_fillers = (1 << 2), // if container, include filler failures in count -}; - -int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc, +int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op); -pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node, - const char *reason, - pe_working_set_t *data_set); +pcmk_action_t *pe__clear_failcount(pcmk_resource_t *rsc, + const pcmk_node_t *node, const char *reason, + pcmk_scheduler_t *scheduler); /* Functions for finding/counting a resource's active nodes */ -bool pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node, - pe_node_t **active, unsigned int *count_all, +bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node, + pcmk_node_t **active, unsigned int *count_all, unsigned int *count_clean); -pe_node_t *pe__find_active_requires(const pe_resource_t *rsc, +pcmk_node_t *pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count); -static inline pe_node_t * -pe__current_node(const pe_resource_t *rsc) +static inline pcmk_node_t * +pe__current_node(const pcmk_resource_t *rsc) { return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL); } /* Binary like operators for lists of nodes */ -extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores); - GHashTable *pe__node_list2table(const GList *list); -static inline gpointer -pe_hash_table_lookup(GHashTable * hash, gconstpointer key) -{ - if (hash) { - return g_hash_table_lookup(hash, key); - } - return NULL; -} - -extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set); -extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order); - -void pe__show_node_weights_as(const char *file, const char *function, - int line, bool to_log, const pe_resource_t *rsc, - const char *comment, GHashTable *nodes, - pe_working_set_t *data_set); - -#define pe__show_node_weights(level, rsc, text, nodes, data_set) \ - pe__show_node_weights_as(__FILE__, __func__, __LINE__, \ - (level), (rsc), (text), (nodes), (data_set)) - -xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key); - -pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task, - const pe_node_t *on_node, gboolean optional, - gboolean foo, pe_working_set_t *data_set); - -# define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0) +pcmk_action_t *get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler); +gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action, + uint32_t flags); + +void pe__show_node_scores_as(const char *file, const char *function, + int line, bool to_log, const pcmk_resource_t *rsc, + const char *comment, GHashTable *nodes, + pcmk_scheduler_t *scheduler); + +#define pe__show_node_scores(level, rsc, text, nodes, scheduler) \ + pe__show_node_scores_as(__FILE__, __func__, __LINE__, \ + (level), (rsc), (text), (nodes), (scheduler)) + +GHashTable *pcmk__unpack_action_meta(pcmk_resource_t *rsc, + const pcmk_node_t *node, + const char *action_name, guint interval_ms, + const xmlNode *action_config); +GHashTable *pcmk__unpack_action_rsc_params(const xmlNode *action_xml, + GHashTable *node_attrs, + pcmk_scheduler_t *data_set); +xmlNode *pcmk__find_action_config(const pcmk_resource_t *rsc, + const char *action_name, guint interval_ms, + bool include_disabled); + +enum rsc_start_requirement pcmk__action_requires(const pcmk_resource_t *rsc, + const char *action_name); + +enum action_fail_response pcmk__parse_on_fail(const pcmk_resource_t *rsc, + const char *action_name, + guint interval_ms, + const char *value); + +enum rsc_role_e pcmk__role_after_failure(const pcmk_resource_t *rsc, + const char *action_name, + enum action_fail_response on_fail, + GHashTable *meta); + +pcmk_action_t *custom_action(pcmk_resource_t *rsc, char *key, const char *task, + const pcmk_node_t *on_node, gboolean optional, + pcmk_scheduler_t *scheduler); + +# define delete_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DELETE, 0) # define delete_action(rsc, node, optional) custom_action( \ - rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \ - optional, TRUE, rsc->cluster); - -# define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0) -# define stopped_action(rsc, node, optional) custom_action( \ - rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \ - optional, TRUE, rsc->cluster); + rsc, delete_key(rsc), PCMK_ACTION_DELETE, node, \ + optional, rsc->cluster); -# define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0) +# define stop_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0) # define stop_action(rsc, node, optional) custom_action( \ - rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \ - optional, TRUE, rsc->cluster); + rsc, stop_key(rsc), PCMK_ACTION_STOP, node, \ + optional, rsc->cluster); -# define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0) -# define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0) +# define reload_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_RELOAD_AGENT, 0) +# define start_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_START, 0) # define start_action(rsc, node, optional) custom_action( \ - rsc, start_key(rsc), CRMD_ACTION_START, node, \ - optional, TRUE, rsc->cluster) + rsc, start_key(rsc), PCMK_ACTION_START, node, \ + optional, rsc->cluster) -# define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0) -# define started_action(rsc, node, optional) custom_action( \ - rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \ - optional, TRUE, rsc->cluster) - -# define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0) +# define promote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0) # define promote_action(rsc, node, optional) custom_action( \ - rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \ - optional, TRUE, rsc->cluster) - -# define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0) -# define promoted_action(rsc, node, optional) custom_action( \ - rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \ - optional, TRUE, rsc->cluster) + rsc, promote_key(rsc), PCMK_ACTION_PROMOTE, node, \ + optional, rsc->cluster) -# define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0) +# define demote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0) # define demote_action(rsc, node, optional) custom_action( \ - rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \ - optional, TRUE, rsc->cluster) + rsc, demote_key(rsc), PCMK_ACTION_DEMOTE, node, \ + optional, rsc->cluster) -# define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0) -# define demoted_action(rsc, node, optional) custom_action( \ - rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \ - optional, TRUE, rsc->cluster) +extern int pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action, + pcmk_scheduler_t *scheduler); -extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action, - pe_working_set_t *data_set); +pcmk_action_t *find_first_action(const GList *input, const char *uuid, + const char *task, const pcmk_node_t *on_node); -pe_action_t *find_first_action(const GList *input, const char *uuid, - const char *task, const pe_node_t *on_node); +enum action_tasks get_complex_task(const pcmk_resource_t *rsc, + const char *name); -enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name); - -extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node); +GList *find_actions(GList *input, const char *key, const pcmk_node_t *on_node); GList *find_actions_exact(GList *input, const char *key, - const pe_node_t *on_node); -GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, + const pcmk_node_t *on_node); +GList *pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node); -extern void pe_free_action(pe_action_t * action); +extern void pe_free_action(pcmk_action_t *action); -void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score, - const char *tag, pe_working_set_t *data_set); +void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, + const char *tag, pcmk_scheduler_t *scheduler); extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, bool same_node_default); extern gint sort_op_by_callid(gconstpointer a, gconstpointer b); -gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role); -void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, +gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role); +void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why); -pe_resource_t *find_clone_instance(const pe_resource_t *rsc, - const char *sub_id); +pcmk_resource_t *find_clone_instance(const pcmk_resource_t *rsc, + const char *sub_id); extern void destroy_ticket(gpointer data); -extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set); +pcmk_ticket_t *ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler); // Resources for manipulating resource names const char *pe_base_name_end(const char *id); @@ -476,7 +435,7 @@ char *clone_strip(const char *last_rsc_id); char *clone_zero(const char *last_rsc_id); static inline bool -pe_base_name_eq(const pe_resource_t *rsc, const char *id) +pe_base_name_eq(const pcmk_resource_t *rsc, const char *id) { if (id && rsc && rsc->id) { // Number of characters in rsc->id before any clone suffix @@ -490,22 +449,10 @@ pe_base_name_eq(const pe_resource_t *rsc, const char *id) int pe__target_rc_from_xml(const xmlNode *xml_op); gint pe__cmp_node_name(gconstpointer a, gconstpointer b); -bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any); - -enum rsc_digest_cmp_val { - /*! Digests are the same */ - RSC_DIGEST_MATCH = 0, - /*! Params that require a restart changed */ - RSC_DIGEST_RESTART, - /*! Some parameter changed. */ - RSC_DIGEST_ALL, - /*! rsc op didn't have a digest associated with it, so - * it is unknown if parameters changed or not. */ - RSC_DIGEST_UNKNOWN, -}; +bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any); typedef struct op_digest_cache_s { - enum rsc_digest_cmp_val rc; + enum pcmk__digest_result rc; xmlNode *params_all; xmlNode *params_secure; xmlNode *params_restart; @@ -514,35 +461,37 @@ typedef struct op_digest_cache_s { char *digest_restart_calc; } op_digest_cache_t; -op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task, +op_digest_cache_t *pe__calculate_digests(pcmk_resource_t *rsc, const char *task, guint *interval_ms, - const pe_node_t *node, + const pcmk_node_t *node, const xmlNode *xml_op, GHashTable *overrides, bool calc_secure, - pe_working_set_t *data_set); + pcmk_scheduler_t *scheduler); void pe__free_digests(gpointer ptr); -op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc, +op_digest_cache_t *rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, - pe_node_t *node, - pe_working_set_t *data_set); - -pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional, - const char *reason, bool priority_delay, - pe_working_set_t *data_set); -void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, - const char *reason, pe_action_t *dependency, - pe_working_set_t *data_set); - -char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag); -void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite); -void pe__add_action_expected_result(pe_action_t *action, int expected_result); - -void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags); -void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags); -void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag); + pcmk_node_t *node, + pcmk_scheduler_t *scheduler); + +pcmk_action_t *pe_fence_op(pcmk_node_t *node, const char *op, bool optional, + const char *reason, bool priority_delay, + pcmk_scheduler_t *scheduler); +void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, + const char *reason, pcmk_action_t *dependency, + pcmk_scheduler_t *scheduler); + +char *pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag); +void pe_action_set_reason(pcmk_action_t *action, const char *reason, + bool overwrite); +void pe__add_action_expected_result(pcmk_action_t *action, int expected_result); + +void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags); +void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags); +void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, + uint64_t flag); gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref); @@ -550,49 +499,75 @@ gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj void print_rscs_brief(GList *rsc_list, const char * pre_text, long options, void * print_data, gboolean print_all); int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options); -void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay); +void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, + const char *reason, bool priority_delay); -pe_node_t *pe_create_node(const char *id, const char *uname, const char *type, - const char *score, pe_working_set_t * data_set); +pcmk_node_t *pe_create_node(const char *id, const char *uname, const char *type, + const char *score, pcmk_scheduler_t *scheduler); //! \deprecated This function will be removed in a future release -void common_print(pe_resource_t *rsc, const char *pre_text, const char *name, - const pe_node_t *node, long options, void *print_data); -int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc, - const char *name, const pe_node_t *node, +void common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name, + const pcmk_node_t *node, long options, void *print_data); +int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc, + const char *name, const pcmk_node_t *node, unsigned int options); -int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc, - const char *name, const pe_node_t *node, +int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc, + const char *name, const pcmk_node_t *node, unsigned int options); -GList *pe__bundle_containers(const pe_resource_t *bundle); - -int pe__bundle_max(const pe_resource_t *rsc); -int pe__bundle_max_per_node(const pe_resource_t *rsc); - -pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle, - const pe_node_t *node); -bool pe__bundle_needs_remote_name(pe_resource_t *rsc); -const char *pe__add_bundle_remote_name(pe_resource_t *rsc, - pe_working_set_t *data_set, +//! A single instance of a bundle +typedef struct { + int offset; //!< 0-origin index of this instance in bundle + char *ipaddr; //!< IP address associated with this instance + pcmk_node_t *node; //!< Node created for this instance + pcmk_resource_t *ip; //!< IP address resource for ipaddr + pcmk_resource_t *child; //!< Instance of bundled resource + pcmk_resource_t *container; //!< Container associated with this instance + pcmk_resource_t *remote; //!< Pacemaker Remote connection into container +} pe__bundle_replica_t; + +GList *pe__bundle_containers(const pcmk_resource_t *bundle); + +int pe__bundle_max(const pcmk_resource_t *rsc); +bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle, + const pcmk_node_t *node); +pcmk_resource_t *pe__bundled_resource(const pcmk_resource_t *rsc); +const pcmk_resource_t *pe__get_rsc_in_container(const pcmk_resource_t *instance); +pcmk_resource_t *pe__first_container(const pcmk_resource_t *bundle); +void pe__foreach_bundle_replica(pcmk_resource_t *bundle, + bool (*fn)(pe__bundle_replica_t *, void *), + void *user_data); +void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle, + bool (*fn)(const pe__bundle_replica_t *, + void *), + void *user_data); +pcmk_resource_t *pe__find_bundle_replica(const pcmk_resource_t *bundle, + const pcmk_node_t *node); +bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc); +const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc, + pcmk_scheduler_t *scheduler, xmlNode *xml, const char *field); -const char *pe_node_attribute_calculated(const pe_node_t *node, - const char *name, - const pe_resource_t *rsc); -const char *pe_node_attribute_raw(const pe_node_t *node, const char *name); -bool pe__is_universal_clone(const pe_resource_t *rsc, - const pe_working_set_t *data_set); -void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc, - pe_node_t *node, enum pe_check_parameters, - pe_working_set_t *data_set); -void pe__foreach_param_check(pe_working_set_t *data_set, - void (*cb)(pe_resource_t*, pe_node_t*, + +const char *pe__node_attribute_calculated(const pcmk_node_t *node, + const char *name, + const pcmk_resource_t *rsc, + enum pcmk__rsc_node node_type, + bool force_host); +const char *pe_node_attribute_raw(const pcmk_node_t *node, const char *name); +bool pe__is_universal_clone(const pcmk_resource_t *rsc, + const pcmk_scheduler_t *scheduler); +void pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc, + pcmk_node_t *node, enum pcmk__check_parameters, + pcmk_scheduler_t *scheduler); +void pe__foreach_param_check(pcmk_scheduler_t *scheduler, + void (*cb)(pcmk_resource_t*, pcmk_node_t*, const xmlNode*, - enum pe_check_parameters)); -void pe__free_param_checks(pe_working_set_t *data_set); + enum pcmk__check_parameters)); +void pe__free_param_checks(pcmk_scheduler_t *scheduler); -bool pe__shutdown_requested(const pe_node_t *node); -void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set); +bool pe__shutdown_requested(const pcmk_node_t *node); +void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler, + const char *reason); /*! * \internal @@ -605,53 +580,55 @@ void pe__register_messages(pcmk__output_t *out); void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name, const pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, - gboolean overwrite, pe_working_set_t *data_set); - -bool pe__resource_is_disabled(const pe_resource_t *rsc); -pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, - const pe_node_t *node, - pe_working_set_t *data_set); - -GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name); -GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name); -bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag); -bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag); - -bool pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node); -bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list); + gboolean overwrite, + pcmk_scheduler_t *scheduler); + +bool pe__resource_is_disabled(const pcmk_resource_t *rsc); +void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node); + +GList *pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name); +GList *pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name); +bool pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc, + const char *tag); +bool pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node, + const char *tag); + +bool pe__rsc_running_on_only(const pcmk_resource_t *rsc, + const pcmk_node_t *node); +bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list); GList *pe__filter_rsc_list(GList *rscs, GList *filter); -GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s); -GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s); +GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s); +GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s); -bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node); +bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node); -gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc, +gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); -gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc, +gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); -gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc, +gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); -gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc, +gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); -xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name); +xmlNode *pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name); -const char *pe__clone_child_id(const pe_resource_t *rsc); +const char *pe__clone_child_id(const pcmk_resource_t *rsc); -int pe__sum_node_health_scores(const pe_node_t *node, int base_health); -int pe__node_health(pe_node_t *node); +int pe__sum_node_health_scores(const pcmk_node_t *node, int base_health); +int pe__node_health(pcmk_node_t *node); static inline enum pcmk__health_strategy -pe__health_strategy(pe_working_set_t *data_set) +pe__health_strategy(pcmk_scheduler_t *scheduler) { - return pcmk__parse_health_strategy(pe_pref(data_set->config_hash, + return pcmk__parse_health_strategy(pe_pref(scheduler->config_hash, PCMK__OPT_NODE_HEALTH_STRATEGY)); } static inline int -pe__health_score(const char *option, pe_working_set_t *data_set) +pe__health_score(const char *option, pcmk_scheduler_t *scheduler) { - return char2score(pe_pref(data_set->config_hash, option)); + return char2score(pe_pref(scheduler->config_hash, option)); } /*! @@ -665,7 +642,7 @@ pe__health_score(const char *option, pe_working_set_t *data_set) * if node has neither a name nor ID. */ static inline const char * -pe__node_name(const pe_node_t *node) +pe__node_name(const pcmk_node_t *node) { if (node == NULL) { return "unspecified node"; @@ -691,7 +668,7 @@ pe__node_name(const pe_node_t *node) * \return true if \p node1 and \p node2 refer to the same node */ static inline bool -pe__same_node(const pe_node_t *node1, const pe_node_t *node2) +pe__same_node(const pcmk_node_t *node1, const pcmk_node_t *node2) { return (node1 != NULL) && (node2 != NULL) && (node1->details == node2->details); diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h index cc626c8..24355f8 100644 --- a/include/crm/pengine/pe_types.h +++ b/include/crm/pengine/pe_types.h @@ -16,6 +16,7 @@ # include // xmlNode # include // gboolean, guint, GList, GHashTable # include +# include # include #ifdef __cplusplus @@ -28,535 +29,6 @@ extern "C" { * \ingroup pengine */ -typedef struct pe_node_s pe_node_t; -typedef struct pe_action_s pe_action_t; -typedef struct pe_resource_s pe_resource_t; -typedef struct pe_working_set_s pe_working_set_t; - -enum pe_obj_types { - pe_unknown = -1, - pe_native = 0, - pe_group = 1, - pe_clone = 2, - pe_container = 3, -}; - -typedef struct resource_object_functions_s { - gboolean (*unpack) (pe_resource_t*, pe_working_set_t*); - pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search, - const pe_node_t *node, int flags); - /* parameter result must be free'd */ - char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*, - pe_working_set_t*); - //! \deprecated will be removed in a future release - void (*print) (pe_resource_t*, const char*, long, void*); - gboolean (*active) (pe_resource_t*, gboolean); - enum rsc_role_e (*state) (const pe_resource_t*, gboolean); - pe_node_t *(*location) (const pe_resource_t*, GList**, int); - void (*free) (pe_resource_t*); - void (*count) (pe_resource_t*); - gboolean (*is_filtered) (const pe_resource_t*, GList *, gboolean); - - /*! - * \brief - * \internal Find a node (and optionally count all) where resource is active - * - * \param[in] rsc Resource to check - * \param[out] count_all If not NULL, set this to count of active nodes - * \param[out] count_clean If not NULL, set this to count of clean nodes - * - * \return A node where the resource is active, preferring the source node - * if the resource is involved in a partial migration or a clean, - * online node if the resource's "requires" is "quorum" or - * "nothing", or NULL if the resource is inactive. - */ - pe_node_t *(*active_node)(const pe_resource_t *rsc, unsigned int *count_all, - unsigned int *count_clean); -} resource_object_functions_t; - -typedef struct resource_alloc_functions_s resource_alloc_functions_t; - -enum pe_quorum_policy { - no_quorum_freeze, - no_quorum_stop, - no_quorum_ignore, - no_quorum_suicide, - no_quorum_demote -}; - -enum node_type { - node_ping, //! \deprecated Do not use - node_member, - node_remote -}; - -//! \deprecated will be removed in a future release -enum pe_restart { - pe_restart_restart, //! \deprecated will be removed in a future release - pe_restart_ignore //! \deprecated will be removed in a future release -}; - -//! Determine behavior of pe_find_resource_with_flags() -enum pe_find { - pe_find_renamed = 0x001, //!< match resource ID or LRM history ID - pe_find_anon = 0x002, //!< match base name of anonymous clone instances - pe_find_clone = 0x004, //!< match only clone instances - pe_find_current = 0x008, //!< match resource active on specified node - pe_find_inactive = 0x010, //!< match resource not running anywhere - pe_find_any = 0x020, //!< match base name of any clone instance -}; - -// @TODO Make these an enum - -# define pe_flag_have_quorum 0x00000001ULL -# define pe_flag_symmetric_cluster 0x00000002ULL -# define pe_flag_maintenance_mode 0x00000008ULL - -# define pe_flag_stonith_enabled 0x00000010ULL -# define pe_flag_have_stonith_resource 0x00000020ULL -# define pe_flag_enable_unfencing 0x00000040ULL -# define pe_flag_concurrent_fencing 0x00000080ULL - -# define pe_flag_stop_rsc_orphans 0x00000100ULL -# define pe_flag_stop_action_orphans 0x00000200ULL -# define pe_flag_stop_everything 0x00000400ULL - -# define pe_flag_start_failure_fatal 0x00001000ULL - -//! \deprecated -# define pe_flag_remove_after_stop 0x00002000ULL - -# define pe_flag_startup_fencing 0x00004000ULL -# define pe_flag_shutdown_lock 0x00008000ULL - -# define pe_flag_startup_probes 0x00010000ULL -# define pe_flag_have_status 0x00020000ULL -# define pe_flag_have_remote_nodes 0x00040000ULL - -# define pe_flag_quick_location 0x00100000ULL -# define pe_flag_sanitized 0x00200000ULL - -//! \deprecated -# define pe_flag_stdout 0x00400000ULL - -//! Don't count total, disabled and blocked resource instances -# define pe_flag_no_counts 0x00800000ULL - -/*! Skip deprecated code that is kept solely for backward API compatibility. - * (Internal code should always set this.) - */ -# define pe_flag_no_compat 0x01000000ULL - -# define pe_flag_show_scores 0x02000000ULL -# define pe_flag_show_utilization 0x04000000ULL - -/*! - * When scheduling, only unpack the CIB (including constraints), calculate - * as much cluster status as possible, and apply node health. - */ -# define pe_flag_check_config 0x08000000ULL - -struct pe_working_set_s { - xmlNode *input; - crm_time_t *now; - - /* options extracted from the input */ - char *dc_uuid; - pe_node_t *dc_node; - const char *stonith_action; - const char *placement_strategy; - - unsigned long long flags; - - int stonith_timeout; - enum pe_quorum_policy no_quorum_policy; - - GHashTable *config_hash; - GHashTable *tickets; - - // Actions for which there can be only one (e.g. fence nodeX) - GHashTable *singletons; - - GList *nodes; - GList *resources; - GList *placement_constraints; - GList *ordering_constraints; - GList *colocation_constraints; - GList *ticket_constraints; - - GList *actions; - xmlNode *failed; - xmlNode *op_defaults; - xmlNode *rsc_defaults; - - /* stats */ - int num_synapse; - int max_valid_nodes; //! Deprecated (will be removed in a future release) - int order_id; - int action_id; - - /* final output */ - xmlNode *graph; - - GHashTable *template_rsc_sets; - const char *localhost; - GHashTable *tags; - - int blocked_resources; - int disabled_resources; - - GList *param_check; // History entries that need to be checked - GList *stop_needed; // Containers that need stop actions - time_t recheck_by; // Hint to controller to re-run scheduler by this time - int ninstances; // Total number of resource instances - guint shutdown_lock;// How long (seconds) to lock resources to shutdown node - int priority_fencing_delay; // Priority fencing delay - - void *priv; -}; - -enum pe_check_parameters { - /* Clear fail count if parameters changed for un-expired start or monitor - * last_failure. - */ - pe_check_last_failure, - - /* Clear fail count if parameters changed for start, monitor, promote, or - * migrate_from actions for active resources. - */ - pe_check_active, -}; - -struct pe_node_shared_s { - const char *id; - const char *uname; - enum node_type type; - - /* @TODO convert these flags into a bitfield */ - gboolean online; - gboolean standby; - gboolean standby_onfail; - gboolean pending; - gboolean unclean; - gboolean unseen; - gboolean shutdown; - gboolean expected_up; - gboolean is_dc; - gboolean maintenance; - gboolean rsc_discovery_enabled; - gboolean remote_requires_reset; - gboolean remote_was_fenced; - gboolean remote_maintenance; /* what the remote-rsc is thinking */ - gboolean unpacked; - - int num_resources; - pe_resource_t *remote_rsc; - GList *running_rsc; /* pe_resource_t* */ - GList *allocated_rsc; /* pe_resource_t* */ - - GHashTable *attrs; /* char* => char* */ - GHashTable *utilization; - GHashTable *digest_cache; //!< cache of calculated resource digests - int priority; // calculated based on the priority of resources running on the node - pe_working_set_t *data_set; //!< Cluster that this node is part of -}; - -struct pe_node_s { - int weight; - gboolean fixed; //!< \deprecated Will be removed in a future release - int count; - struct pe_node_shared_s *details; - int rsc_discover_mode; -}; - -# define pe_rsc_orphan 0x00000001ULL -# define pe_rsc_managed 0x00000002ULL -# define pe_rsc_block 0x00000004ULL -# define pe_rsc_orphan_container_filler 0x00000008ULL - -# define pe_rsc_notify 0x00000010ULL -# define pe_rsc_unique 0x00000020ULL -# define pe_rsc_fence_device 0x00000040ULL -# define pe_rsc_promotable 0x00000080ULL - -# define pe_rsc_provisional 0x00000100ULL -# define pe_rsc_allocating 0x00000200ULL -# define pe_rsc_merging 0x00000400ULL -# define pe_rsc_restarting 0x00000800ULL - -# define pe_rsc_stop 0x00001000ULL -# define pe_rsc_reload 0x00002000ULL -# define pe_rsc_allow_remote_remotes 0x00004000ULL -# define pe_rsc_critical 0x00008000ULL - -# define pe_rsc_failed 0x00010000ULL -# define pe_rsc_detect_loop 0x00020000ULL -# define pe_rsc_runnable 0x00040000ULL -# define pe_rsc_start_pending 0x00080000ULL - -//!< \deprecated Do not use -# define pe_rsc_starting 0x00100000ULL - -//!< \deprecated Do not use -# define pe_rsc_stopping 0x00200000ULL - -# define pe_rsc_stop_unexpected 0x00400000ULL -# define pe_rsc_allow_migrate 0x00800000ULL - -# define pe_rsc_failure_ignored 0x01000000ULL -# define pe_rsc_replica_container 0x02000000ULL -# define pe_rsc_maintenance 0x04000000ULL -# define pe_rsc_is_container 0x08000000ULL - -# define pe_rsc_needs_quorum 0x10000000ULL -# define pe_rsc_needs_fencing 0x20000000ULL -# define pe_rsc_needs_unfencing 0x40000000ULL - -/* *INDENT-OFF* */ -enum pe_action_flags { - pe_action_pseudo = 0x00001, - pe_action_runnable = 0x00002, - pe_action_optional = 0x00004, - pe_action_print_always = 0x00008, - - pe_action_have_node_attrs = 0x00010, - pe_action_implied_by_stonith = 0x00040, - pe_action_migrate_runnable = 0x00080, - - pe_action_dumped = 0x00100, - pe_action_processed = 0x00200, -#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) - pe_action_clear = 0x00400, //! \deprecated Unused -#endif - pe_action_dangle = 0x00800, - - /* This action requires one or more of its dependencies to be runnable. - * We use this to clear the runnable flag before checking dependencies. - */ - pe_action_requires_any = 0x01000, - - pe_action_reschedule = 0x02000, - pe_action_tracking = 0x04000, - pe_action_dedup = 0x08000, //! Internal state tracking when creating graph - - pe_action_dc = 0x10000, //! Action may run on DC instead of target -}; -/* *INDENT-ON* */ - -struct pe_resource_s { - char *id; - char *clone_name; - xmlNode *xml; - xmlNode *orig_xml; - xmlNode *ops_xml; - - pe_working_set_t *cluster; - pe_resource_t *parent; - - enum pe_obj_types variant; - void *variant_opaque; - resource_object_functions_t *fns; - resource_alloc_functions_t *cmds; - - enum rsc_recovery_type recovery_type; - - enum pe_restart restart_type; //!< \deprecated will be removed in future release - - int priority; - int stickiness; - int sort_index; - int failure_timeout; - int migration_threshold; - guint remote_reconnect_ms; - char *pending_task; - - unsigned long long flags; - - // @TODO merge these into flags - gboolean is_remote_node; - gboolean exclusive_discover; - - /* Pay special attention to whether you want to use rsc_cons_lhs and - * rsc_cons directly, which include only colocations explicitly involving - * this resource, or call libpacemaker's pcmk__with_this_colocations() and - * pcmk__this_with_colocations() functions, which may return relevant - * colocations involving the resource's ancestors as well. - */ - - //!@{ - //! This field should be treated as internal to Pacemaker - GList *rsc_cons_lhs; // List of pcmk__colocation_t* - GList *rsc_cons; // List of pcmk__colocation_t* - GList *rsc_location; // List of pe__location_t* - GList *actions; // List of pe_action_t* - GList *rsc_tickets; // List of rsc_ticket* - //!@} - - pe_node_t *allocated_to; - pe_node_t *partial_migration_target; - pe_node_t *partial_migration_source; - GList *running_on; /* pe_node_t* */ - GHashTable *known_on; /* pe_node_t* */ - GHashTable *allowed_nodes; /* pe_node_t* */ - - enum rsc_role_e role; - enum rsc_role_e next_role; - - GHashTable *meta; - GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead - GHashTable *utilization; - - GList *children; /* pe_resource_t* */ - GList *dangling_migrations; /* pe_node_t* */ - - pe_resource_t *container; - GList *fillers; - - // @COMPAT These should be made const at next API compatibility break - pe_node_t *pending_node; // Node on which pending_task is happening - pe_node_t *lock_node; // Resource is shutdown-locked to this node - - time_t lock_time; // When shutdown lock started - - /* Resource parameters may have node-attribute-based rules, which means the - * values can vary by node. This table is a cache of parameter name/value - * tables for each node (as needed). Use pe_rsc_params() to get the table - * for a given node. - */ - GHashTable *parameter_cache; // Key = node name, value = parameters table -}; - -struct pe_action_s { - int id; - int priority; - - pe_resource_t *rsc; - pe_node_t *node; - xmlNode *op_entry; - - char *task; - char *uuid; - char *cancel_task; - char *reason; - - enum pe_action_flags flags; - enum rsc_start_requirement needs; - enum action_fail_response on_fail; - enum rsc_role_e fail_role; - - GHashTable *meta; - GHashTable *extra; - - /* - * These two varables are associated with the constraint logic - * that involves first having one or more actions runnable before - * then allowing this action to execute. - * - * These varables are used with features such as 'clone-min' which - * requires at minimum X number of cloned instances to be running - * before an order dependency can run. Another option that uses - * this is 'require-all=false' in ordering constrants. This option - * says "only require one instance of a resource to start before - * allowing dependencies to start" -- basically, require-all=false is - * the same as clone-min=1. - */ - - /* current number of known runnable actions in the before list. */ - int runnable_before; - /* the number of "before" runnable actions required for this action - * to be considered runnable */ - int required_runnable_before; - - GList *actions_before; /* pe_action_wrapper_t* */ - GList *actions_after; /* pe_action_wrapper_t* */ - - /* Some of the above fields could be moved to the details, - * except for API backward compatibility. - */ - void *action_details; // varies by type of action -}; - -typedef struct pe_ticket_s { - char *id; - gboolean granted; - time_t last_granted; - gboolean standby; - GHashTable *state; -} pe_ticket_t; - -typedef struct pe_tag_s { - char *id; - GList *refs; -} pe_tag_t; - -//! Internal tracking for transition graph creation -enum pe_link_state { - pe_link_not_dumped, //! Internal tracking for transition graph creation - pe_link_dumped, //! Internal tracking for transition graph creation - pe_link_dup, //! \deprecated No longer used by Pacemaker -}; - -enum pe_discover_e { - pe_discover_always = 0, - pe_discover_never, - pe_discover_exclusive, -}; - -/* *INDENT-OFF* */ -enum pe_ordering { - pe_order_none = 0x0, /* deleted */ - pe_order_optional = 0x1, /* pure ordering, nothing implied */ - pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */ - - pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */ - pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */ - pe_order_promoted_implies_first = 0x40, /* If 'then' is required and then's rsc is promoted, ensure 'first' becomes required too */ - - /* first requires then to be both runnable and migrate runnable. */ - pe_order_implies_first_migratable = 0x80, - - pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */ - - pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */ - pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX', - * ensure instances of 'then' on 'nodeX' are too. - * Only really useful if 'then' is a clone and 'first' is not - */ - pe_order_probe = 0x800, /* If 'first->rsc' is - * - running but about to stop, ignore the constraint - * - otherwise, behave as runnable_left - */ - - pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */ - pe_order_stonith_stop = 0x2000, // #endif diff --git a/include/crm/pengine/pe_types_compat.h b/include/crm/pengine/pe_types_compat.h index 6f174c4..1becd12 100644 --- a/include/crm/pengine/pe_types_compat.h +++ b/include/crm/pengine/pe_types_compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,7 +10,7 @@ #ifndef PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H # define PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H -#include +#include #ifdef __cplusplus extern "C" { @@ -25,6 +25,174 @@ extern "C" { * release. */ +//! \deprecated Use pcmk_rsc_removed instead +#define pe_rsc_orphan pcmk_rsc_removed + +//! \deprecated Use pcmk_rsc_managed instead +#define pe_rsc_managed pcmk_rsc_managed + +//! \deprecated Use pcmk_rsc_blocked instead +#define pe_rsc_block pcmk_rsc_blocked + +//! \deprecated Use pcmk_rsc_removed_filler instead +#define pe_rsc_orphan_container_filler pcmk_rsc_removed_filler + +//! \deprecated Use pcmk_rsc_notify instead +#define pe_rsc_notify pcmk_rsc_notify + +//! \deprecated Use pcmk_rsc_unique instead +#define pe_rsc_unique pcmk_rsc_unique + +//! \deprecated Use pcmk_rsc_fence_device instead +#define pe_rsc_fence_device pcmk_rsc_fence_device + +//! \deprecated Use pcmk_rsc_promotable instead +#define pe_rsc_promotable pcmk_rsc_promotable + +//! \deprecated Use pcmk_rsc_unassigned instead +#define pe_rsc_provisional pcmk_rsc_unassigned + +//! \deprecated Use pcmk_rsc_assigning instead +#define pe_rsc_allocating pcmk_rsc_assigning + +//! \deprecated Use pcmk_rsc_updating_nodes instead +#define pe_rsc_merging pcmk_rsc_updating_nodes + +//! \deprecated Use pcmk_rsc_restarting instead +#define pe_rsc_restarting pcmk_rsc_restarting + +//! \deprecated Use pcmk_rsc_stop_if_failed instead +#define pe_rsc_stop pcmk_rsc_stop_if_failed + +//! \deprecated Use pcmk_rsc_reload instead +#define pe_rsc_reload pcmk_rsc_reload + +//! \deprecated Use pcmk_rsc_remote_nesting_allowed instead +#define pe_rsc_allow_remote_remotes pcmk_rsc_remote_nesting_allowed + +//! \deprecated Use pcmk_rsc_critical instead +#define pe_rsc_critical pcmk_rsc_critical + +//! \deprecated Use pcmk_rsc_failed instead +#define pe_rsc_failed pcmk_rsc_failed + +//! \deprecated Use pcmk_rsc_detect_loop instead +#define pe_rsc_detect_loop pcmk_rsc_detect_loop + +//! \deprecated Do not use +#define pe_rsc_runnable pcmk_rsc_runnable + +//! \deprecated Use pcmk_rsc_start_pending instead +#define pe_rsc_start_pending pcmk_rsc_start_pending + +//!< \deprecated Do not use +#define pe_rsc_starting pcmk_rsc_starting + +//!< \deprecated Do not use +#define pe_rsc_stopping pcmk_rsc_stopping + +//! \deprecated Use pcmk_rsc_stop_unexpected instead +#define pe_rsc_stop_unexpected pcmk_rsc_stop_unexpected + +//! \deprecated Use pcmk_rsc_migratable instead +#define pe_rsc_allow_migrate pcmk_rsc_migratable + +//! \deprecated Use pcmk_rsc_ignore_failure instead +#define pe_rsc_failure_ignored pcmk_rsc_ignore_failure + +//! \deprecated Use pcmk_rsc_replica_container instead +#define pe_rsc_replica_container pcmk_rsc_replica_container + +//! \deprecated Use pcmk_rsc_maintenance instead +#define pe_rsc_maintenance pcmk_rsc_maintenance + +//! \deprecated Do not use +#define pe_rsc_is_container pcmk_rsc_has_filler + +//! \deprecated Use pcmk_rsc_needs_quorum instead +#define pe_rsc_needs_quorum pcmk_rsc_needs_quorum + +//! \deprecated Use pcmk_rsc_needs_fencing instead +#define pe_rsc_needs_fencing pcmk_rsc_needs_fencing + +//! \deprecated Use pcmk_rsc_needs_unfencing instead +#define pe_rsc_needs_unfencing pcmk_rsc_needs_unfencing + +//! \deprecated Use pcmk_sched_quorate instead +#define pe_flag_have_quorum pcmk_sched_quorate + +//! \deprecated Use pcmk_sched_symmetric_cluster instead +#define pe_flag_symmetric_cluster pcmk_sched_symmetric_cluster + +//! \deprecated Use pcmk_sched_in_maintenance instead +#define pe_flag_maintenance_mode pcmk_sched_in_maintenance + +//! \deprecated Use pcmk_sched_fencing_enabled instead +#define pe_flag_stonith_enabled pcmk_sched_fencing_enabled + +//! \deprecated Use pcmk_sched_have_fencing instead +#define pe_flag_have_stonith_resource pcmk_sched_have_fencing + +//! \deprecated Use pcmk_sched_enable_unfencing instead +#define pe_flag_enable_unfencing pcmk_sched_enable_unfencing + +//! \deprecated Use pcmk_sched_concurrent_fencing instead +#define pe_flag_concurrent_fencing pcmk_sched_concurrent_fencing + +//! \deprecated Use pcmk_sched_stop_removed_resources instead +#define pe_flag_stop_rsc_orphans pcmk_sched_stop_removed_resources + +//! \deprecated Use pcmk_sched_cancel_removed_actions instead +#define pe_flag_stop_action_orphans pcmk_sched_cancel_removed_actions + +//! \deprecated Use pcmk_sched_stop_all instead +#define pe_flag_stop_everything pcmk_sched_stop_all + +//! \deprecated Use pcmk_sched_start_failure_fatal instead +#define pe_flag_start_failure_fatal pcmk_sched_start_failure_fatal + +//! \deprecated Do not use +#define pe_flag_remove_after_stop pcmk_sched_remove_after_stop + +//! \deprecated Use pcmk_sched_startup_fencing instead +#define pe_flag_startup_fencing pcmk_sched_startup_fencing + +//! \deprecated Use pcmk_sched_shutdown_lock instead +#define pe_flag_shutdown_lock pcmk_sched_shutdown_lock + +//! \deprecated Use pcmk_sched_probe_resources instead +#define pe_flag_startup_probes pcmk_sched_probe_resources + +//! \deprecated Use pcmk_sched_have_status instead +#define pe_flag_have_status pcmk_sched_have_status + +//! \deprecated Use pcmk_sched_have_remote_nodes instead +#define pe_flag_have_remote_nodes pcmk_sched_have_remote_nodes + +//! \deprecated Use pcmk_sched_location_only instead +#define pe_flag_quick_location pcmk_sched_location_only + +//! \deprecated Use pcmk_sched_sanitized instead +#define pe_flag_sanitized pcmk_sched_sanitized + +//! \deprecated Do not use +#define pe_flag_stdout (1ULL << 22) + +//! \deprecated Use pcmk_sched_no_counts instead +#define pe_flag_no_counts pcmk_sched_no_counts + +//! \deprecated Use pcmk_sched_no_compat instead +#define pe_flag_no_compat pcmk_sched_no_compat + +//! \deprecated Use pcmk_sched_output_scores instead +#define pe_flag_show_scores pcmk_sched_output_scores + +//! \deprecated Use pcmk_sched_show_utilization instead +#define pe_flag_show_utilization pcmk_sched_show_utilization + +//! \deprecated Use pcmk_sched_validate_only instead +#define pe_flag_check_config pcmk_sched_validate_only + //!@{ //! \deprecated Do not use (unused by Pacemaker) enum pe_graph_flags { @@ -35,27 +203,62 @@ enum pe_graph_flags { }; //!@} -//!< \deprecated Use pe_action_t instead +//!@{ +//! \deprecated Do not use +enum pe_check_parameters { + pe_check_last_failure, + pe_check_active, +}; +//!@} + +//! \deprecated Use pcmk_action_t instead typedef struct pe_action_s action_t; -//!< \deprecated Use pe_action_wrapper_t instead +//! \deprecated Use pcmk_action_t instead +typedef struct pe_action_s pe_action_t; + +//! \deprecated Do not use typedef struct pe_action_wrapper_s action_wrapper_t; -//!< \deprecated Use pe_node_t instead +//! \deprecated Do not use +typedef struct pe_action_wrapper_s pe_action_wrapper_t; + +//! \deprecated Use pcmk_node_t instead typedef struct pe_node_s node_t; -//!< \deprecated Use enum pe_quorum_policy instead +//! \deprecated Use pcmk_node_t instead +typedef struct pe_node_s pe_node_t; + +//! \deprecated Use enum pe_quorum_policy instead typedef enum pe_quorum_policy no_quorum_policy_t; -//!< \deprecated use pe_resource_t instead +//! \deprecated use pcmk_resource_t instead typedef struct pe_resource_s resource_t; -//!< \deprecated Use pe_tag_t instead +//! \deprecated use pcmk_resource_t instead +typedef struct pe_resource_s pe_resource_t; + +//! \deprecated Use pcmk_tag_t instead typedef struct pe_tag_s tag_t; -//!< \deprecated Use pe_ticket_t instead +//! \deprecated Use pcmk_tag_t instead +typedef struct pe_tag_s pe_tag_t; + +//! \deprecated Use pcmk_ticket_t instead typedef struct pe_ticket_s ticket_t; +//! \deprecated Use pcmk_ticket_t instead +typedef struct pe_ticket_s pe_ticket_t; + +//! \deprecated Use pcmk_scheduler_t instead +typedef struct pe_working_set_s pe_working_set_t; + +//! \deprecated This type should be treated as internal to Pacemaker +typedef struct resource_alloc_functions_s resource_alloc_functions_t; + +//! \deprecated Use pcmk_rsc_methods_t instead +typedef struct resource_object_functions_s resource_object_functions_t; + #ifdef __cplusplus } #endif diff --git a/include/crm/pengine/remote_internal.h b/include/crm/pengine/remote_internal.h index 46d58fc..0e7c044 100644 --- a/include/crm/pengine/remote_internal.h +++ b/include/crm/pengine/remote_internal.h @@ -1,5 +1,5 @@ /* - * Copyright 2013-2019 the Pacemaker project contributors + * Copyright 2013-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -19,16 +19,17 @@ extern "C" { #include bool xml_contains_remote_node(xmlNode *xml); -bool pe__is_remote_node(const pe_node_t *node); -bool pe__is_guest_node(const pe_node_t *node); -bool pe__is_guest_or_remote_node(const pe_node_t *node); -bool pe__is_bundle_node(const pe_node_t *node); -bool pe__resource_is_remote_conn(const pe_resource_t *rsc, - const pe_working_set_t *data_set); -pe_resource_t *pe__resource_contains_guest_node(const pe_working_set_t *data_set, - const pe_resource_t *rsc); -void pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host, - void (*helper)(const pe_node_t*, void*), void *user_data); +bool pe__is_remote_node(const pcmk_node_t *node); +bool pe__is_guest_node(const pcmk_node_t *node); +bool pe__is_guest_or_remote_node(const pcmk_node_t *node); +bool pe__is_bundle_node(const pcmk_node_t *node); +bool pe__resource_is_remote_conn(const pcmk_resource_t *rsc); +pcmk_resource_t *pe__resource_contains_guest_node(const pcmk_scheduler_t *scheduler, + const pcmk_resource_t *rsc); +void pe_foreach_guest_node(const pcmk_scheduler_t *scheduler, + const pcmk_node_t *host, + void (*helper)(const pcmk_node_t*, void*), + void *user_data); xmlNode *pe_create_remote_xml(xmlNode *parent, const char *uname, const char *container_id, const char *migrateable, const char *is_managed, const char *start_timeout, diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h index 145a166..9c85425 100644 --- a/include/crm/pengine/status.h +++ b/include/crm/pengine/status.h @@ -15,7 +15,7 @@ # include // pcmk_is_set() # include # include -# include // pe_node_t, pe_resource_t, etc. +# include // pcmk_node_t, pcmk_resource_t, etc. # include #ifdef __cplusplus @@ -28,24 +28,25 @@ extern "C" { * \ingroup pengine */ -const char *rsc_printable_id(const pe_resource_t *rsc); -gboolean cluster_status(pe_working_set_t * data_set); -pe_working_set_t *pe_new_working_set(void); -void pe_free_working_set(pe_working_set_t *data_set); -void set_working_set_defaults(pe_working_set_t * data_set); -void cleanup_calculations(pe_working_set_t * data_set); -void pe_reset_working_set(pe_working_set_t *data_set); -pe_resource_t *pe_find_resource(GList *rsc_list, const char *id_rh); -pe_resource_t *pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags); -pe_node_t *pe_find_node(const GList *node_list, const char *node_name); -pe_node_t *pe_find_node_id(const GList *node_list, const char *id); -pe_node_t *pe_find_node_any(const GList *node_list, const char *id, +const char *rsc_printable_id(const pcmk_resource_t *rsc); +gboolean cluster_status(pcmk_scheduler_t *scheduler); +pcmk_scheduler_t *pe_new_working_set(void); +void pe_free_working_set(pcmk_scheduler_t *scheduler); +void set_working_set_defaults(pcmk_scheduler_t *scheduler); +void cleanup_calculations(pcmk_scheduler_t *scheduler); +void pe_reset_working_set(pcmk_scheduler_t *scheduler); +pcmk_resource_t *pe_find_resource(GList *rsc_list, const char *id_rh); +pcmk_resource_t *pe_find_resource_with_flags(GList *rsc_list, const char *id, + enum pe_find flags); +pcmk_node_t *pe_find_node(const GList *node_list, const char *node_name); +pcmk_node_t *pe_find_node_id(const GList *node_list, const char *id); +pcmk_node_t *pe_find_node_any(const GList *node_list, const char *id, const char *node_name); GList *find_operations(const char *rsc, const char *node, gboolean active_filter, - pe_working_set_t * data_set); + pcmk_scheduler_t *scheduler); void calculate_active_ops(const GList *sorted_op_list, int *start_index, int *stop_index); -int pe_bundle_replicas(const pe_resource_t *rsc); +int pe_bundle_replicas(const pcmk_resource_t *rsc); /*! * \brief Check whether a resource is any clone type @@ -55,9 +56,9 @@ int pe_bundle_replicas(const pe_resource_t *rsc); * \return true if resource is clone, false otherwise */ static inline bool -pe_rsc_is_clone(const pe_resource_t *rsc) +pe_rsc_is_clone(const pcmk_resource_t *rsc) { - return rsc && (rsc->variant == pe_clone); + return (rsc != NULL) && (rsc->variant == pcmk_rsc_variant_clone); } /*! @@ -68,9 +69,9 @@ pe_rsc_is_clone(const pe_resource_t *rsc) * \return true if resource is unique clone, false otherwise */ static inline bool -pe_rsc_is_unique_clone(const pe_resource_t *rsc) +pe_rsc_is_unique_clone(const pcmk_resource_t *rsc) { - return pe_rsc_is_clone(rsc) && pcmk_is_set(rsc->flags, pe_rsc_unique); + return pe_rsc_is_clone(rsc) && pcmk_is_set(rsc->flags, pcmk_rsc_unique); } /*! @@ -81,9 +82,9 @@ pe_rsc_is_unique_clone(const pe_resource_t *rsc) * \return true if resource is anonymous clone, false otherwise */ static inline bool -pe_rsc_is_anon_clone(const pe_resource_t *rsc) +pe_rsc_is_anon_clone(const pcmk_resource_t *rsc) { - return pe_rsc_is_clone(rsc) && !pcmk_is_set(rsc->flags, pe_rsc_unique); + return pe_rsc_is_clone(rsc) && !pcmk_is_set(rsc->flags, pcmk_rsc_unique); } /*! @@ -94,7 +95,7 @@ pe_rsc_is_anon_clone(const pe_resource_t *rsc) * \return true if resource is part of a bundle, false otherwise */ static inline bool -pe_rsc_is_bundled(const pe_resource_t *rsc) +pe_rsc_is_bundled(const pcmk_resource_t *rsc) { if (rsc == NULL) { return false; @@ -102,7 +103,7 @@ pe_rsc_is_bundled(const pe_resource_t *rsc) while (rsc->parent != NULL) { rsc = rsc->parent; } - return rsc->variant == pe_container; + return rsc->variant == pcmk_rsc_variant_bundle; } #ifdef __cplusplus diff --git a/include/crm/services_compat.h b/include/crm/services_compat.h index 97310f4..456d351 100644 --- a/include/crm/services_compat.h +++ b/include/crm/services_compat.h @@ -1,5 +1,5 @@ /* - * Copyright 2010-2022 the Pacemaker project contributors + * Copyright 2010-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,7 +10,7 @@ #ifndef PCMK__CRM_SERVICES_COMPAT__H # define PCMK__CRM_SERVICES_COMPAT__H - +#include #include #include #include @@ -68,7 +68,8 @@ static inline enum ocf_exitcode services_get_ocf_exitcode(const char *action, int lsb_exitcode) { /* For non-status actions, LSB and OCF share error code meaning <= 7 */ - if (action && strcmp(action, "status") && strcmp(action, "monitor")) { + if ((action != NULL) && (strcmp(action, PCMK_ACTION_STATUS) != 0) + && (strcmp(action, PCMK_ACTION_MONITOR) != 0)) { if ((lsb_exitcode < 0) || (lsb_exitcode > PCMK_LSB_NOT_RUNNING)) { return PCMK_OCF_UNKNOWN_ERROR; } diff --git a/include/crm_internal.h b/include/crm_internal.h index 5f6531f..71a0f7e 100644 --- a/include/crm_internal.h +++ b/include/crm_internal.h @@ -81,9 +81,14 @@ #define PCMK__XA_CONFIG_ERRORS "config-errors" #define PCMK__XA_CONFIG_WARNINGS "config-warnings" #define PCMK__XA_CONFIRM "confirm" +#define PCMK__XA_CRMD "crmd" +#define PCMK__XA_EXPECTED "expected" #define PCMK__XA_GRAPH_ERRORS "graph-errors" #define PCMK__XA_GRAPH_WARNINGS "graph-warnings" +#define PCMK__XA_IN_CCM "in_ccm" +#define PCMK__XA_JOIN "join" #define PCMK__XA_MODE "mode" +#define PCMK__XA_NODE_START_STATE "node_start_state" #define PCMK__XA_TASK "task" #define PCMK__XA_UPTIME "uptime" #define PCMK__XA_CONN_HOST "connection_host" diff --git a/include/pacemaker-internal.h b/include/pacemaker-internal.h index 8610d1e..9e6ff21 100644 --- a/include/pacemaker-internal.h +++ b/include/pacemaker-internal.h @@ -1,5 +1,5 @@ /* - * Copyright 2019-2022 the Pacemaker project contributors + * Copyright 2019-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,14 +11,13 @@ # define PACEMAKER_INTERNAL__H # include +# include # include # include # include # include # include # include -# include -# include # include # include # include diff --git a/include/pacemaker.h b/include/pacemaker.h index f5c375a..ffa99ff 100644 --- a/include/pacemaker.h +++ b/include/pacemaker.h @@ -12,8 +12,8 @@ # include # include +# include # include -# include # include @@ -202,13 +202,13 @@ int pcmk_pacemakerd_status(xmlNodePtr *xml, const char *ipc_name, * \param[in,out] rsc Resource to calculate digests for * \param[in] node Node whose operation history should be used * \param[in] overrides Hash table of configuration parameters to override - * \param[in] data_set Cluster working set (with status) + * \param[in] scheduler Scheduler data (with status) * * \return Standard Pacemaker return code */ -int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, - const pe_node_t *node, GHashTable *overrides, - pe_working_set_t *data_set); +int pcmk_resource_digests(xmlNodePtr *xml, pcmk_resource_t *rsc, + const pcmk_node_t *node, GHashTable *overrides, + pcmk_scheduler_t *scheduler); /*! * \brief Simulate a cluster's response to events @@ -219,7 +219,7 @@ int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, * simulation. Output can be modified with various flags. * * \param[in,out] xml The destination for the result, as an XML tree - * \param[in,out] data_set Working set for the cluster + * \param[in,out] scheduler Scheduler data * \param[in] injections A structure containing cluster events * (node up/down, tickets, injected operations) * \param[in] flags A bitfield of :pcmk_sim_flags to modify @@ -238,7 +238,7 @@ int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc, * * \return Standard Pacemaker return code */ -int pcmk_simulate(xmlNodePtr *xml, pe_working_set_t *data_set, +int pcmk_simulate(xmlNodePtr *xml, pcmk_scheduler_t *scheduler, const pcmk_injections_t *injections, unsigned int flags, unsigned int section_opts, const char *use_date, const char *input_file, const char *graph_file, @@ -337,6 +337,45 @@ int pcmk_show_result_code(xmlNodePtr *xml, int code, enum pcmk_result_type type, int pcmk_list_result_codes(xmlNodePtr *xml, enum pcmk_result_type type, uint32_t flags); +/*! + * \brief List available providers for the given OCF agent + * + * \param[in,out] xml The destination for the result, as an XML tree + * \param[in] agent_spec Resource agent name + * + * \return Standard Pacemaker return code + */ +int pcmk_list_alternatives(xmlNodePtr *xml, const char *agent_spec); + +/*! + * \brief List all agents available for the named standard and/or provider + * + * \param[in,out] xml The destination for the result, as an XML tree + * \param[in] agent_spec STD[:PROV] + * + * \return Standard Pacemaker return code + */ +int pcmk_list_agents(xmlNodePtr *xml, char *agent_spec); + +/*! + * \brief List all available OCF providers for the given agent + * + * \param[in,out] xml The destination for the result, as an XML tree + * \param[in] agent_spec Resource agent name + * + * \return Standard Pacemaker return code + */ +int pcmk_list_providers(xmlNodePtr *xml, const char *agent_spec); + +/*! + * \brief List all available resource agent standards + * + * \param[in,out] xml The destination for the result, as an XML tree + * + * \return Standard Pacemaker return code + */ +int pcmk_list_standards(xmlNodePtr *xml); + #ifdef BUILD_PUBLIC_LIBPACEMAKER /*! diff --git a/include/pcmki/Makefile.am b/include/pcmki/Makefile.am index b379fdb..b9475af 100644 --- a/include/pcmki/Makefile.am +++ b/include/pcmki/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2019-2022 the Pacemaker project contributors +# Copyright 2019-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -9,18 +9,6 @@ MAINTAINERCLEANFILES = Makefile.in -noinst_HEADERS = pcmki_acl.h \ - pcmki_cluster_queries.h \ - pcmki_fence.h \ - pcmki_output.h \ - pcmki_resource.h \ - pcmki_result_code.h \ - pcmki_rule.h \ - pcmki_sched_allocate.h \ - pcmki_sched_utils.h \ - pcmki_scheduler.h \ - pcmki_simulate.h \ - pcmki_status.h \ - pcmki_transition.h +noinst_HEADERS = $(wildcard *.h) .PHONY: $(ARCHIVE_VERSION) diff --git a/include/pcmki/pcmki_agents.h b/include/pcmki/pcmki_agents.h new file mode 100644 index 0000000..eefe3e5 --- /dev/null +++ b/include/pcmki/pcmki_agents.h @@ -0,0 +1,19 @@ +/* + * Copyright 2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ +#ifndef PCMK__PCMKI_PCMKI_AGENTS__H +#define PCMK__PCMKI_PCMKI_AGENTS__H + +#include + +int pcmk__list_alternatives(pcmk__output_t *out, const char *agent_spec); +int pcmk__list_agents(pcmk__output_t *out, char *agent_spec); +int pcmk__list_providers(pcmk__output_t *out, const char *agent_spec); +int pcmk__list_standards(pcmk__output_t *out); + +#endif /* PCMK__PCMKI_PCMKI_AGENTS__H */ diff --git a/include/pcmki/pcmki_cluster_queries.h b/include/pcmki/pcmki_cluster_queries.h index 776aa27..3fa4c23 100644 --- a/include/pcmki/pcmki_cluster_queries.h +++ b/include/pcmki/pcmki_cluster_queries.h @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the Pacemaker project contributors + * Copyright 2020-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,7 +10,8 @@ #ifndef PCMK__PCMKI_PCMKI_CLUSTER_QUERIES__H # define PCMK__PCMKI_PCMKI_CLUSTER_QUERIES__H -#include // gboolean, GMainLoop, etc. +#include +#include #include #include @@ -19,7 +20,7 @@ // CIB queries int pcmk__list_nodes(pcmk__output_t *out, const char *node_types, - gboolean bash_export); + bool bash_export); // Controller queries int pcmk__controller_status(pcmk__output_t *out, const char *node_name, diff --git a/include/pcmki/pcmki_resource.h b/include/pcmki/pcmki_resource.h index dc8ac69..442bb1f 100644 --- a/include/pcmki/pcmki_resource.h +++ b/include/pcmki/pcmki_resource.h @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the Pacemaker project contributors + * Copyright 2021-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,10 +11,10 @@ #include +#include #include -#include -int pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc, - const pe_node_t *node, GHashTable *overrides); +int pcmk__resource_digests(pcmk__output_t *out, pcmk_resource_t *rsc, + const pcmk_node_t *node, GHashTable *overrides); #endif /* PCMK__PCMKI_PCMKI_RESOURCE__H */ diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h deleted file mode 100644 index 32044ea..0000000 --- a/include/pcmki/pcmki_sched_allocate.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2004-2023 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#ifndef PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H -# define PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H - -# include -# include -# include -# include -# include -# include -# include -# include - -pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer); -void pcmk__bundle_create_actions(pe_resource_t *rsc); -bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node); -void pcmk__bundle_internal_constraints(pe_resource_t *rsc); -void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint); -enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action, - const pe_node_t *node); -void pcmk__bundle_expand(pe_resource_t *rsc); -void pcmk__bundle_add_utilization(const pe_resource_t *rsc, - const pe_resource_t *orig_rsc, - GList *all_rscs, GHashTable *utilization); -void pcmk__bundle_shutdown_lock(pe_resource_t *rsc); - -void clone_create_actions(pe_resource_t *rsc); -void clone_internal_constraints(pe_resource_t *rsc); -void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint); -enum pe_action_flags clone_action_flags(pe_action_t *action, - const pe_node_t *node); -void clone_expand(pe_resource_t *rsc); -bool clone_create_probe(pe_resource_t *rsc, pe_node_t *node); -void clone_append_meta(const pe_resource_t *rsc, xmlNode *xml); -void pcmk__clone_add_utilization(const pe_resource_t *rsc, - const pe_resource_t *orig_rsc, - GList *all_rscs, GHashTable *utilization); -void pcmk__clone_shutdown_lock(pe_resource_t *rsc); - -void pcmk__log_transition_summary(const char *filename); - -#endif diff --git a/include/pcmki/pcmki_sched_utils.h b/include/pcmki/pcmki_sched_utils.h deleted file mode 100644 index 3e6d52f..0000000 --- a/include/pcmki/pcmki_sched_utils.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2004-2023 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#ifndef PCMK__PCMKI_PCMKI_SCHED_UTILS__H -# define PCMK__PCMKI_PCMKI_SCHED_UTILS__H - -#include // bool -#include // GList, GHashTable, gboolean, guint -#include // lrmd_event_data_t -#include // cib_t -#include -#include -#include -#include -#include -#include - -/* Constraint helper functions */ -GList *pcmk__copy_node_list(const GList *list, bool reset); - -int copies_per_node(pe_resource_t * rsc); - -xmlNode *pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *event, - const char *caller_version, int target_rc, - const char *node, const char *origin); - -#endif diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h index dde50a5..9adb9a9 100644 --- a/include/pcmki/pcmki_scheduler.h +++ b/include/pcmki/pcmki_scheduler.h @@ -8,36 +8,37 @@ */ #ifndef PCMK__PCMKI_PCMKI_SCHEDULER__H -# define PCMK__PCMKI_PCMKI_SCHEDULER__H +#define PCMK__PCMKI_PCMKI_SCHEDULER__H -# include -# include -# include -# include -# include -# include +#include // GList +#include // bool +#include // xmlNode -# include +#include // lrmd_event_data_t +#include // pcmk_resource_t, pcmk_scheduler_t typedef struct { const char *id; const char *node_attribute; - pe_resource_t *dependent; // The resource being colocated - pe_resource_t *primary; // The resource the dependent is colocated with + pcmk_resource_t *dependent; // The resource being colocated + pcmk_resource_t *primary; // The resource the dependent is colocated with int dependent_role; // Colocation applies only if dependent has this role int primary_role; // Colocation applies only if primary has this role int score; - bool influence; // Whether dependent influences active primary placement + uint32_t flags; // Group of enum pcmk__coloc_flags } pcmk__colocation_t; -void pcmk__unpack_constraints(pe_working_set_t *data_set); +void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler); void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, - pe_working_set_t *data_set); + pcmk_scheduler_t *scheduler); -GList *pcmk__with_this_colocations(const pe_resource_t *rsc); -GList *pcmk__this_with_colocations(const pe_resource_t *rsc); +GList *pcmk__copy_node_list(const GList *list, bool reset); + +xmlNode *pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *event, + const char *caller_version, int target_rc, + const char *node, const char *origin); #endif diff --git a/include/pcmki/pcmki_simulate.h b/include/pcmki/pcmki_simulate.h index 0b09903..ab73411 100644 --- a/include/pcmki/pcmki_simulate.h +++ b/include/pcmki/pcmki_simulate.h @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the Pacemaker project contributors + * Copyright 2021-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,7 +11,7 @@ # define PCMK__PCMKI_PCMKI_SIMULATE__H #include -#include +#include #include #include // cib_t #include @@ -24,28 +24,28 @@ * CIB file in a given directory, printing the profiling timings for * each. * - * \note \p data_set->priv must have been set to a valid \p pcmk__output_t + * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t * object before this function is called. * - * \param[in] dir A directory full of CIB files to be profiled - * \param[in] repeat Number of times to run on each input file - * \param[in,out] data_set Working set for the cluster - * \param[in] use_date The date to set the cluster's time to (may be NULL) + * \param[in] dir A directory full of CIB files to be profiled + * \param[in] repeat Number of times to run on each input file + * \param[in,out] scheduler Scheduler data + * \param[in] use_date The date to set the cluster's time to (may be NULL) */ -void pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set, - const char *use_date); +void pcmk__profile_dir(const char *dir, long long repeat, + pcmk_scheduler_t *scheduler, const char *use_date); /*! * \internal * \brief Simulate executing a transition * - * \param[in,out] data_set Cluster working set + * \param[in,out] scheduler Scheduler data * \param[in,out] cib CIB object for scheduler input * \param[in] op_fail_list List of actions to simulate as failing * * \return Transition status after simulated execution */ -enum pcmk__graph_status pcmk__simulate_transition(pe_working_set_t *data_set, +enum pcmk__graph_status pcmk__simulate_transition(pcmk_scheduler_t *scheduler, cib_t *cib, const GList *op_fail_list); @@ -58,7 +58,7 @@ enum pcmk__graph_status pcmk__simulate_transition(pe_working_set_t *data_set, * optionally writes out a variety of artifacts to show the results of the * simulation. Output can be modified with various flags. * - * \param[in,out] data_set Working set for the cluster + * \param[in,out] scheduler Scheduler data * \param[in,out] out The output functions structure * \param[in] injections A structure containing cluster events * (node up/down, tickets, injected operations) @@ -80,7 +80,7 @@ enum pcmk__graph_status pcmk__simulate_transition(pe_working_set_t *data_set, * * \return Standard Pacemaker return code */ -int pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out, +int pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out, const pcmk_injections_t *injections, unsigned int flags, uint32_t section_opts, const char *use_date, const char *input_file, const char *graph_file, diff --git a/include/pcmki/pcmki_status.h b/include/pcmki/pcmki_status.h index 6b48069..01139bb 100644 --- a/include/pcmki/pcmki_status.h +++ b/include/pcmki/pcmki_status.h @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include @@ -26,8 +26,8 @@ extern "C" { * \internal * \brief Print one-line status suitable for use with monitoring software * - * \param[in,out] out Output object - * \param[in] data_set Cluster working set + * \param[in,out] out Output object + * \param[in] scheduler Scheduler data * * \return Standard Pacemaker return code * @@ -39,7 +39,7 @@ extern "C" { * callers should be added. */ int pcmk__output_simple_status(pcmk__output_t *out, - const pe_working_set_t *data_set); + const pcmk_scheduler_t *scheduler); int pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith, cib_t *cib, xmlNode *current_cib, diff --git a/include/pcmki/pcmki_transition.h b/include/pcmki/pcmki_transition.h index 5dc3101..93237ed 100644 --- a/include/pcmki/pcmki_transition.h +++ b/include/pcmki/pcmki_transition.h @@ -14,6 +14,7 @@ # include # include # include +# include // lrmd_event_data_t #ifdef __cplusplus extern "C" { @@ -164,6 +165,7 @@ void pcmk__free_graph(pcmk__graph_t *graph); const char *pcmk__graph_status2text(enum pcmk__graph_status state); void pcmk__log_graph(unsigned int log_level, pcmk__graph_t *graph); void pcmk__log_graph_action(int log_level, pcmk__graph_action_t *action); +void pcmk__log_transition_summary(const char *filename); lrmd_event_data_t *pcmk__event_from_graph_action(const xmlNode *resource, const pcmk__graph_action_t *action, int status, int rc, diff --git a/include/portability.h b/include/portability.h index 932642d..368f887 100644 --- a/include/portability.h +++ b/include/portability.h @@ -1,5 +1,5 @@ /* - * Copyright 2001-2021 the Pacemaker project contributors + * Copyright 2001-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -27,45 +27,6 @@ # endif # endif -/* Prototypes for libreplace functions */ - -# ifndef HAVE_DAEMON - /* We supply a replacement function, but need a prototype */ -int daemon(int nochdir, int noclose); -# endif - -# ifndef HAVE_SETENV - /* We supply a replacement function, but need a prototype */ -int setenv(const char *name, const char *value, int why); -# endif - -# ifndef HAVE_STRERROR - /* We supply a replacement function, but need a prototype */ -char *strerror(int errnum); -# endif - -# ifndef HAVE_STRCHRNUL - /* We supply a replacement function, but need a prototype */ -char *strchrnul(const char *s, int c_in); -# endif - -# ifndef HAVE_ALPHASORT -# include -int alphasort(const void *dirent1, const void *dirent2); -# endif - -# ifndef HAVE_STRNLEN -size_t strnlen(const char *s, size_t maxlen); -# else -# define USE_GNU -# endif - -# ifndef HAVE_STRNDUP -char *strndup(const char *str, size_t len); -# else -# define USE_GNU -# endif - # if HAVE_DBUS # ifndef HAVE_DBUSBASICVALUE # include diff --git a/lib/Makefile.am b/lib/Makefile.am index ed5bfa3..52cf974 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2003-2021 the Pacemaker project contributors +# Copyright 2003-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -8,7 +8,11 @@ # MAINTAINERCLEANFILES = Makefile.in -LIBS = cib lrmd service fencing cluster +LIBS = cib \ + lrmd \ + service \ + fencing \ + cluster pkgconfig_DATA = $(LIBS:%=pacemaker-%.pc) \ libpacemaker.pc \ @@ -18,4 +22,12 @@ pkgconfig_DATA = $(LIBS:%=pacemaker-%.pc) \ EXTRA_DIST = $(pkgconfig_DATA:%=%.in) -SUBDIRS = gnu common pengine cib services fencing lrmd cluster pacemaker +SUBDIRS = gnu \ + common \ + pengine \ + cib \ + services \ + fencing \ + lrmd \ + cluster \ + pacemaker diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am index 721fca1..a74c4b1 100644 --- a/lib/cib/Makefile.am +++ b/lib/cib/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2018 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -11,18 +11,20 @@ include $(top_srcdir)/mk/common.mk ## libraries lib_LTLIBRARIES = libcib.la -## SOURCES -libcib_la_SOURCES = cib_ops.c cib_utils.c cib_client.c cib_native.c cib_attrs.c -libcib_la_SOURCES += cib_file.c cib_remote.c +## Library sources (*must* use += format for bumplibs) +libcib_la_SOURCES = cib_attrs.c +libcib_la_SOURCES += cib_client.c +libcib_la_SOURCES += cib_file.c +libcib_la_SOURCES += cib_native.c +libcib_la_SOURCES += cib_ops.c +libcib_la_SOURCES += cib_remote.c +libcib_la_SOURCES += cib_utils.c -libcib_la_LDFLAGS = -version-info 31:0:4 +libcib_la_LDFLAGS = -version-info 32:0:5 libcib_la_CPPFLAGS = -I$(top_srcdir) $(AM_CPPFLAGS) libcib_la_CFLAGS = $(CFLAGS_HARDENED_LIB) libcib_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) -libcib_la_LIBADD = $(top_builddir)/lib/pengine/libpe_rules.la \ - $(top_builddir)/lib/common/libcrmcommon.la - -clean-generic: - rm -f *.log *.debug *.xml *~ +libcib_la_LIBADD = $(top_builddir)/lib/pengine/libpe_rules.la \ + $(top_builddir)/lib/common/libcrmcommon.la diff --git a/lib/cib/cib_attrs.c b/lib/cib/cib_attrs.c index 5f3a722..11629b8 100644 --- a/lib/cib/cib_attrs.c +++ b/lib/cib/cib_attrs.c @@ -152,16 +152,15 @@ find_attr(cib_t *cib, const char *section, const char *node_uuid, static int handle_multiples(pcmk__output_t *out, xmlNode *search, const char *attr_name) { - if (xml_has_children(search)) { + if ((search != NULL) && (search->children != NULL)) { xmlNode *child = NULL; - out->info(out, "Multiple attributes match name=%s", attr_name); + out->info(out, "Multiple attributes match name=%s", attr_name); for (child = pcmk__xml_first_child(search); child != NULL; child = pcmk__xml_next(child)) { out->info(out, " Value: %s \t(id=%s)", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } - return ENOTUNIQ; } else { @@ -184,9 +183,9 @@ cib__update_node_attr(pcmk__output_t *out, cib_t *cib, int call_options, const c char *local_attr_id = NULL; char *local_set_name = NULL; - CRM_CHECK(section != NULL, return EINVAL); - CRM_CHECK(attr_value != NULL, return EINVAL); - CRM_CHECK(attr_name != NULL || attr_id != NULL, return EINVAL); + CRM_CHECK((out != NULL) && (cib != NULL) && (section != NULL) + && ((attr_id != NULL) || (attr_name != NULL)) + && (attr_value != NULL), return EINVAL); rc = find_attr(cib, section, node_uuid, set_type, set_name, attr_id, attr_name, user_name, &xml_search); @@ -360,7 +359,7 @@ cib__get_node_attrs(pcmk__output_t *out, cib_t *cib, const char *section, crm_trace("Query failed for attribute %s (section=%s node=%s set=%s): %s", pcmk__s(attr_name, "with unspecified name"), section, pcmk__s(set_name, ""), - pcmk__s(node_uuid, ""), pcmk_strerror(rc)); + pcmk__s(node_uuid, ""), pcmk_rc_str(rc)); } return rc; @@ -487,7 +486,7 @@ read_attr_delegate(cib_t *cib, const char *section, const char *node_uuid, attr_id, attr_name, user_name, &result); if (rc == pcmk_rc_ok) { - if (!xml_has_children(result)) { + if (result->children == NULL) { pcmk__str_update(attr_value, crm_element_value(result, XML_NVPAIR_ATTR_VALUE)); } else { rc = ENOTUNIQ; @@ -677,9 +676,7 @@ query_node_uname(cib_t * the_cib, const char *uuid, char **uname) } xml_obj = fragment; - CRM_CHECK(pcmk__str_eq(crm_element_name(xml_obj), XML_CIB_TAG_NODES, pcmk__str_casei), - return -ENOMSG); - CRM_ASSERT(xml_obj != NULL); + CRM_CHECK(pcmk__xe_is(xml_obj, XML_CIB_TAG_NODES), return -ENOMSG); crm_log_xml_trace(xml_obj, "Result section"); rc = -ENXIO; diff --git a/lib/cib/cib_client.c b/lib/cib/cib_client.c index 2d179e0..32e1f83 100644 --- a/lib/cib/cib_client.c +++ b/lib/cib/cib_client.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -253,14 +253,15 @@ cib_client_noop(cib_t * cib, int call_options) { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_NOOP, NULL, NULL, NULL, NULL, - call_options, NULL); + call_options, cib->user); } static int cib_client_ping(cib_t * cib, xmlNode ** output_data, int call_options) { op_common(cib); - return cib_internal_op(cib, CRM_OP_PING, NULL, NULL, NULL, output_data, call_options, NULL); + return cib_internal_op(cib, CRM_OP_PING, NULL, NULL, NULL, output_data, + call_options, cib->user); } static int @@ -275,7 +276,7 @@ cib_client_query_from(cib_t * cib, const char *host, const char *section, { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_QUERY, host, section, NULL, - output_data, call_options, NULL); + output_data, call_options, cib->user); } static int @@ -283,7 +284,7 @@ is_primary(cib_t *cib) { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_IS_PRIMARY, NULL, NULL, NULL, - NULL, cib_scope_local|cib_sync_call, NULL); + NULL, cib_scope_local|cib_sync_call, cib->user); } static int @@ -291,7 +292,7 @@ set_secondary(cib_t *cib, int call_options) { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_SECONDARY, NULL, NULL, NULL, - NULL, call_options, NULL); + NULL, call_options, cib->user); } static int @@ -306,7 +307,7 @@ set_primary(cib_t *cib, int call_options) op_common(cib); crm_trace("Adding cib_scope_local to options"); return cib_internal_op(cib, PCMK__CIB_REQUEST_PRIMARY, NULL, NULL, NULL, - NULL, call_options|cib_scope_local, NULL); + NULL, call_options|cib_scope_local, cib->user); } static int @@ -314,7 +315,7 @@ cib_client_bump_epoch(cib_t * cib, int call_options) { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_BUMP, NULL, NULL, NULL, NULL, - call_options, NULL); + call_options, cib->user); } static int @@ -322,7 +323,7 @@ cib_client_upgrade(cib_t * cib, int call_options) { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_UPGRADE, NULL, NULL, NULL, - NULL, call_options, NULL); + NULL, call_options, cib->user); } static int @@ -336,7 +337,7 @@ cib_client_sync_from(cib_t * cib, const char *host, const char *section, int cal { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_SYNC_TO_ALL, host, section, - NULL, NULL, call_options, NULL); + NULL, NULL, call_options, cib->user); } static int @@ -344,7 +345,7 @@ cib_client_create(cib_t * cib, const char *section, xmlNode * data, int call_opt { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_CREATE, NULL, section, data, - NULL, call_options, NULL); + NULL, call_options, cib->user); } static int @@ -352,7 +353,7 @@ cib_client_modify(cib_t * cib, const char *section, xmlNode * data, int call_opt { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_MODIFY, NULL, section, data, - NULL, call_options, NULL); + NULL, call_options, cib->user); } static int @@ -360,7 +361,7 @@ cib_client_replace(cib_t * cib, const char *section, xmlNode * data, int call_op { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_REPLACE, NULL, section, data, - NULL, call_options, NULL); + NULL, call_options, cib->user); } static int @@ -368,7 +369,7 @@ cib_client_delete(cib_t * cib, const char *section, xmlNode * data, int call_opt { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_DELETE, NULL, section, data, - NULL, call_options, NULL); + NULL, call_options, cib->user); } static int @@ -376,7 +377,7 @@ cib_client_delete_absolute(cib_t * cib, const char *section, xmlNode * data, int { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_ABS_DELETE, NULL, section, - data, NULL, call_options, NULL); + data, NULL, call_options, cib->user); } static int @@ -384,7 +385,76 @@ cib_client_erase(cib_t * cib, xmlNode ** output_data, int call_options) { op_common(cib); return cib_internal_op(cib, PCMK__CIB_REQUEST_ERASE, NULL, NULL, NULL, - output_data, call_options, NULL); + output_data, call_options, cib->user); +} + +static int +cib_client_init_transaction(cib_t *cib) +{ + int rc = pcmk_rc_ok; + + op_common(cib); + + if (cib->transaction != NULL) { + // A client can have at most one transaction at a time + rc = pcmk_rc_already; + } + + if (rc == pcmk_rc_ok) { + cib->transaction = create_xml_node(NULL, T_CIB_TRANSACTION); + if (cib->transaction == NULL) { + rc = ENOMEM; + } + } + + if (rc != pcmk_rc_ok) { + const char *client_id = NULL; + + cib->cmds->client_id(cib, NULL, &client_id); + crm_err("Failed to initialize CIB transaction for client %s: %s", + client_id, pcmk_rc_str(rc)); + } + return pcmk_rc2legacy(rc); +} + +static int +cib_client_end_transaction(cib_t *cib, bool commit, int call_options) +{ + const char *client_id = NULL; + int rc = pcmk_ok; + + op_common(cib); + cib->cmds->client_id(cib, NULL, &client_id); + client_id = pcmk__s(client_id, "(unidentified)"); + + if (commit) { + if (cib->transaction == NULL) { + rc = pcmk_rc_no_transaction; + + crm_err("Failed to commit transaction for CIB client %s: %s", + client_id, pcmk_rc_str(rc)); + return pcmk_rc2legacy(rc); + } + rc = cib_internal_op(cib, PCMK__CIB_REQUEST_COMMIT_TRANSACT, NULL, NULL, + cib->transaction, NULL, call_options, cib->user); + + } else { + // Discard always succeeds + if (cib->transaction != NULL) { + crm_trace("Discarded transaction for CIB client %s", client_id); + } else { + crm_trace("No transaction found for CIB client %s", client_id); + } + } + free_xml(cib->transaction); + cib->transaction = NULL; + return rc; +} + +static void +cib_client_set_user(cib_t *cib, const char *user) +{ + pcmk__str_update(&(cib->user), user); } static void @@ -622,13 +692,15 @@ cib_new_variant(void) return NULL; } + // Deprecated method new_cib->cmds->set_op_callback = cib_client_set_op_callback; + new_cib->cmds->add_notify_callback = cib_client_add_notify_callback; new_cib->cmds->del_notify_callback = cib_client_del_notify_callback; new_cib->cmds->register_callback = cib_client_register_callback; new_cib->cmds->register_callback_full = cib_client_register_callback_full; - new_cib->cmds->noop = cib_client_noop; + new_cib->cmds->noop = cib_client_noop; // Deprecated method new_cib->cmds->ping = cib_client_ping; new_cib->cmds->query = cib_client_query; new_cib->cmds->sync = cib_client_sync; @@ -656,8 +728,14 @@ cib_new_variant(void) new_cib->cmds->remove = cib_client_delete; new_cib->cmds->erase = cib_client_erase; + // Deprecated method new_cib->cmds->delete_absolute = cib_client_delete_absolute; + new_cib->cmds->init_transaction = cib_client_init_transaction; + new_cib->cmds->end_transaction = cib_client_end_transaction; + + new_cib->cmds->set_user = cib_client_set_user; + return new_cib; } diff --git a/lib/cib/cib_file.c b/lib/cib/cib_file.c index 7d05965..a279823 100644 --- a/lib/cib/cib_file.c +++ b/lib/cib/cib_file.c @@ -37,35 +37,100 @@ #define CIB_LIVE_NAME CIB_SERIES ".xml" +// key: client ID (const char *) -> value: client (cib_t *) +static GHashTable *client_table = NULL; + enum cib_file_flags { cib_file_flag_dirty = (1 << 0), cib_file_flag_live = (1 << 1), }; typedef struct cib_file_opaque_s { - uint32_t flags; // Group of enum cib_file_flags + char *id; char *filename; + uint32_t flags; // Group of enum cib_file_flags + xmlNode *cib_xml; } cib_file_opaque_t; -struct cib_func_entry { - const char *op; - gboolean read_only; - cib_op_t fn; -}; +static int cib_file_process_commit_transaction(const char *op, int options, + const char *section, + xmlNode *req, xmlNode *input, + xmlNode *existing_cib, + xmlNode **result_cib, + xmlNode **answer); -static struct cib_func_entry cib_file_ops[] = { - { PCMK__CIB_REQUEST_QUERY, TRUE, cib_process_query }, - { PCMK__CIB_REQUEST_MODIFY, FALSE, cib_process_modify }, - { PCMK__CIB_REQUEST_APPLY_PATCH, FALSE, cib_process_diff }, - { PCMK__CIB_REQUEST_BUMP, FALSE, cib_process_bump }, - { PCMK__CIB_REQUEST_REPLACE, FALSE, cib_process_replace }, - { PCMK__CIB_REQUEST_CREATE, FALSE, cib_process_create }, - { PCMK__CIB_REQUEST_DELETE, FALSE, cib_process_delete }, - { PCMK__CIB_REQUEST_ERASE, FALSE, cib_process_erase }, - { PCMK__CIB_REQUEST_UPGRADE, FALSE, cib_process_upgrade }, -}; +/*! + * \internal + * \brief Add a CIB file client to client table + * + * \param[in] cib CIB client + */ +static void +register_client(const cib_t *cib) +{ + cib_file_opaque_t *private = cib->variant_opaque; + + if (client_table == NULL) { + client_table = pcmk__strkey_table(NULL, NULL); + } + g_hash_table_insert(client_table, private->id, (gpointer) cib); +} + +/*! + * \internal + * \brief Remove a CIB file client from client table + * + * \param[in] cib CIB client + */ +static void +unregister_client(const cib_t *cib) +{ + cib_file_opaque_t *private = cib->variant_opaque; -static xmlNode *in_mem_cib = NULL; + if (client_table == NULL) { + return; + } + + g_hash_table_remove(client_table, private->id); + + /* @COMPAT: Add to crm_exit() when libcib and libcrmcommon are merged, + * instead of destroying the client table when there are no more clients. + */ + if (g_hash_table_size(client_table) == 0) { + g_hash_table_destroy(client_table); + client_table = NULL; + } +} + +/*! + * \internal + * \brief Look up a CIB file client by its ID + * + * \param[in] client_id CIB client ID + * + * \return CIB client with matching ID if found, or \p NULL otherwise + */ +static cib_t * +get_client(const char *client_id) +{ + if (client_table == NULL) { + return NULL; + } + return g_hash_table_lookup(client_table, (gpointer) client_id); +} + +static const cib__op_fn_t cib_op_functions[] = { + [cib__op_apply_patch] = cib_process_diff, + [cib__op_bump] = cib_process_bump, + [cib__op_commit_transact] = cib_file_process_commit_transaction, + [cib__op_create] = cib_process_create, + [cib__op_delete] = cib_process_delete, + [cib__op_erase] = cib_process_erase, + [cib__op_modify] = cib_process_modify, + [cib__op_query] = cib_process_query, + [cib__op_replace] = cib_process_replace, + [cib__op_upgrade] = cib_process_upgrade, +}; /* cib_file_backup() and cib_file_write_with_digest() need to chown the * written files only in limited circumstances, so these variables allow @@ -93,6 +158,27 @@ static gboolean cib_do_chown = FALSE; #flags_to_clear); \ } while (0) +/*! + * \internal + * \brief Get the function that performs a given CIB file operation + * + * \param[in] operation Operation whose function to look up + * + * \return Function that performs \p operation for a CIB file client + */ +static cib__op_fn_t +file_get_op_function(const cib__operation_t *operation) +{ + enum cib__op_type type = operation->type; + + CRM_ASSERT(type >= 0); + + if (type >= PCMK__NELEM(cib_op_functions)) { + return NULL; + } + return cib_op_functions[type]; +} + /*! * \internal * \brief Check whether a file is the live CIB @@ -125,114 +211,148 @@ cib_file_is_live(const char *filename) } static int -cib_file_perform_op_delegate(cib_t *cib, const char *op, const char *host, - const char *section, xmlNode *data, - xmlNode **output_data, int call_options, - const char *user_name) +cib_file_process_request(cib_t *cib, xmlNode *request, xmlNode **output) { int rc = pcmk_ok; - char *effective_user = NULL; - gboolean query = FALSE; - gboolean changed = FALSE; - xmlNode *request = NULL; - xmlNode *output = NULL; - xmlNode *cib_diff = NULL; + const cib__operation_t *operation = NULL; + cib__op_fn_t op_function = NULL; + + int call_id = 0; + int call_options = cib_none; + const char *op = crm_element_value(request, F_CIB_OPERATION); + const char *section = crm_element_value(request, F_CIB_SECTION); + xmlNode *data = get_message_xml(request, F_CIB_CALLDATA); + + bool changed = false; + bool read_only = false; xmlNode *result_cib = NULL; - cib_op_t *fn = NULL; - int lpc = 0; - static int max_msg_types = PCMK__NELEM(cib_file_ops); + xmlNode *cib_diff = NULL; + cib_file_opaque_t *private = cib->variant_opaque; - crm_info("Handling %s operation for %s as %s", - (op? op : "invalid"), (section? section : "entire CIB"), - (user_name? user_name : "default user")); + // We error checked these in callers + cib__get_operation(op, &operation); + op_function = file_get_op_function(operation); - cib__set_call_options(call_options, "file operation", - cib_no_mtime|cib_inhibit_bcast|cib_scope_local); + crm_element_value_int(request, F_CIB_CALLID, &call_id); + crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); - if (cib->state == cib_disconnected) { - return -ENOTCONN; - } + read_only = !pcmk_is_set(operation->flags, cib__op_attr_modifies); - if (output_data != NULL) { - *output_data = NULL; + // Mirror the logic in prepare_input() in pacemaker-based + if ((section != NULL) && pcmk__xe_is(data, XML_TAG_CIB)) { + + data = pcmk_find_cib_element(data, section); } - if (op == NULL) { - return -EINVAL; + rc = cib_perform_op(op, call_options, op_function, read_only, section, + request, data, true, &changed, &private->cib_xml, + &result_cib, &cib_diff, output); + + if (pcmk_is_set(call_options, cib_transaction)) { + /* The rest of the logic applies only to the transaction as a whole, not + * to individual requests. + */ + goto done; } - for (lpc = 0; lpc < max_msg_types; lpc++) { - if (pcmk__str_eq(op, cib_file_ops[lpc].op, pcmk__str_casei)) { - fn = &(cib_file_ops[lpc].fn); - query = cib_file_ops[lpc].read_only; - break; + if (rc == -pcmk_err_schema_validation) { + validate_xml_verbose(result_cib); + + } else if ((rc == pcmk_ok) && !read_only) { + pcmk__log_xml_patchset(LOG_DEBUG, cib_diff); + + if (result_cib != private->cib_xml) { + free_xml(private->cib_xml); + private->cib_xml = result_cib; } + cib_set_file_flags(private, cib_file_flag_dirty); } - if (fn == NULL) { - return -EPROTONOSUPPORT; + // Global operation callback (deprecated) + if (cib->op_callback != NULL) { + cib->op_callback(NULL, call_id, rc, *output); } - cib->call_id++; - request = cib_create_op(cib->call_id, op, host, section, data, call_options, - user_name); - if(user_name) { - crm_xml_add(request, XML_ACL_TAG_USER, user_name); +done: + if ((result_cib != private->cib_xml) && (result_cib != *output)) { + free_xml(result_cib); } + free_xml(cib_diff); + return rc; +} - /* Mirror the logic in cib_prepare_common() */ - if (section != NULL && data != NULL && pcmk__str_eq(crm_element_name(data), XML_TAG_CIB, pcmk__str_none)) { - data = pcmk_find_cib_element(data, section); - } +static int +cib_file_perform_op_delegate(cib_t *cib, const char *op, const char *host, + const char *section, xmlNode *data, + xmlNode **output_data, int call_options, + const char *user_name) +{ + int rc = pcmk_ok; + xmlNode *request = NULL; + xmlNode *output = NULL; + cib_file_opaque_t *private = cib->variant_opaque; - rc = cib_perform_op(op, call_options, fn, query, - section, request, data, TRUE, &changed, in_mem_cib, &result_cib, &cib_diff, - &output); + const cib__operation_t *operation = NULL; - free_xml(request); - if (rc == -pcmk_err_schema_validation) { - validate_xml_verbose(result_cib); + crm_info("Handling %s operation for %s as %s", + pcmk__s(op, "invalid"), pcmk__s(section, "entire CIB"), + pcmk__s(user_name, "default user")); + + if (output_data != NULL) { + *output_data = NULL; } - if (rc != pcmk_ok) { - free_xml(result_cib); + if (cib->state == cib_disconnected) { + return -ENOTCONN; + } - } else if (query == FALSE) { - pcmk__output_t *out = NULL; + rc = cib__get_operation(op, &operation); + rc = pcmk_rc2legacy(rc); + if (rc != pcmk_ok) { + // @COMPAT: At compatibility break, use rc directly + return -EPROTONOSUPPORT; + } - rc = pcmk_rc2legacy(pcmk__log_output_new(&out)); - CRM_CHECK(rc == pcmk_ok, goto done); + if (file_get_op_function(operation) == NULL) { + // @COMPAT: At compatibility break, use EOPNOTSUPP + crm_err("Operation %s is not supported by CIB file clients", op); + return -EPROTONOSUPPORT; + } - pcmk__output_set_log_level(out, LOG_DEBUG); - rc = out->message(out, "xml-patchset", cib_diff); - out->finish(out, pcmk_rc2exitc(rc), true, NULL); - pcmk__output_free(out); - rc = pcmk_ok; + cib__set_call_options(call_options, "file operation", cib_no_mtime); - free_xml(in_mem_cib); - in_mem_cib = result_cib; - cib_set_file_flags(private, cib_file_flag_dirty); + rc = cib__create_op(cib, op, host, section, data, call_options, user_name, + NULL, &request); + if (rc != pcmk_ok) { + return rc; } + crm_xml_add(request, XML_ACL_TAG_USER, user_name); + crm_xml_add(request, F_CIB_CLIENTID, private->id); - if (cib->op_callback != NULL) { - cib->op_callback(NULL, cib->call_id, rc, output); + if (pcmk_is_set(call_options, cib_transaction)) { + rc = cib__extend_transaction(cib, request); + goto done; } + rc = cib_file_process_request(cib, request, &output); + if ((output_data != NULL) && (output != NULL)) { - *output_data = (output == in_mem_cib)? copy_xml(output) : output; + if (output->doc == private->cib_xml->doc) { + *output_data = copy_xml(output); + } else { + *output_data = output; + } } done: - free_xml(cib_diff); + if ((output != NULL) + && (output->doc != private->cib_xml->doc) + && ((output_data == NULL) || (output != *output_data))) { - if ((output_data == NULL) && (output != in_mem_cib)) { - /* Don't free output if we're still using it. (output_data != NULL) - * means we may have assigned *output_data = output above. - */ free_xml(output); } - free(effective_user); + free_xml(request); return rc; } @@ -240,7 +360,8 @@ done: * \internal * \brief Read CIB from disk and validate it against XML schema * - * \param[in] filename Name of file to read CIB from + * \param[in] filename Name of file to read CIB from + * \param[out] output Where to store the read CIB XML * * \return pcmk_ok on success, * -ENXIO if file does not exist (or stat() otherwise fails), or @@ -251,7 +372,7 @@ done: * because some callers might not need to write. */ static int -load_file_cib(const char *filename) +load_file_cib(const char *filename, xmlNode **output) { struct stat buf; xmlNode *root = NULL; @@ -282,7 +403,7 @@ load_file_cib(const char *filename) } /* Remember the parsed XML for later use */ - in_mem_cib = root; + *output = root; return pcmk_ok; } @@ -295,7 +416,7 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type) if (private->filename == NULL) { rc = -EINVAL; } else { - rc = load_file_cib(private->filename); + rc = load_file_cib(private->filename, &private->cib_xml); } if (rc == pcmk_ok) { @@ -303,10 +424,11 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type) private->filename, name); cib->state = cib_connected_command; cib->type = cib_command; + register_client(cib); } else { - crm_info("Connection to local file '%s' for %s failed: %s\n", - private->filename, name, pcmk_strerror(rc)); + crm_info("Connection to local file '%s' for %s (client %s) failed: %s", + private->filename, name, private->id, pcmk_strerror(rc)); } return rc; } @@ -315,12 +437,13 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type) * \internal * \brief Write out the in-memory CIB to a live CIB file * - * param[in,out] path Full path to file to write + * param[in] cib_root Root of XML tree to write + * param[in,out] path Full path to file to write * * \return 0 on success, -1 on failure */ static int -cib_file_write_live(char *path) +cib_file_write_live(xmlNode *cib_root, char *path) { uid_t uid = geteuid(); struct passwd *daemon_pwent; @@ -370,7 +493,7 @@ cib_file_write_live(char *path) } /* write the file */ - if (cib_file_write_with_digest(in_mem_cib, cib_dirname, + if (cib_file_write_with_digest(cib_root, cib_dirname, cib_filename) != pcmk_ok) { rc = -1; } @@ -410,13 +533,15 @@ cib_file_signoff(cib_t *cib) crm_debug("Disconnecting from the CIB manager"); cib->state = cib_disconnected; cib->type = cib_no_connection; + unregister_client(cib); + cib->cmds->end_transaction(cib, false, cib_none); /* If the in-memory CIB has been changed, write it to disk */ if (pcmk_is_set(private->flags, cib_file_flag_dirty)) { /* If this is the live CIB, write it out with a digest */ if (pcmk_is_set(private->flags, cib_file_flag_live)) { - if (cib_file_write_live(private->filename) < 0) { + if (cib_file_write_live(private->cib_xml, private->filename) < 0) { rc = pcmk_err_generic; } @@ -424,7 +549,8 @@ cib_file_signoff(cib_t *cib) } else { gboolean do_bzip = pcmk__ends_with_ext(private->filename, ".bz2"); - if (write_xml_file(in_mem_cib, private->filename, do_bzip) <= 0) { + if (write_xml_file(private->cib_xml, private->filename, + do_bzip) <= 0) { rc = pcmk_err_generic; } } @@ -438,8 +564,8 @@ cib_file_signoff(cib_t *cib) } /* Free the in-memory CIB */ - free_xml(in_mem_cib); - in_mem_cib = NULL; + free_xml(private->cib_xml); + private->cib_xml = NULL; return rc; } @@ -455,9 +581,11 @@ cib_file_free(cib_t *cib) if (rc == pcmk_ok) { cib_file_opaque_t *private = cib->variant_opaque; + free(private->id); free(private->filename); - free(cib->cmds); free(private); + free(cib->cmds); + free(cib->user); free(cib); } else { @@ -494,24 +622,24 @@ cib_file_set_connection_dnotify(cib_t *cib, * \param[out] async_id If not \p NULL, where to store asynchronous client ID * \param[out] sync_id If not \p NULL, where to store synchronous client ID * - * \return Legacy Pacemaker return code (specifically, \p -EPROTONOSUPPORT) + * \return Legacy Pacemaker return code * * \note This is the \p cib_file variant implementation of * \p cib_api_operations_t:client_id(). - * \note A \p cib_file object doesn't connect to the CIB and is never assigned a - * client ID. */ static int cib_file_client_id(const cib_t *cib, const char **async_id, const char **sync_id) { + cib_file_opaque_t *private = cib->variant_opaque; + if (async_id != NULL) { - *async_id = NULL; + *async_id = private->id; } if (sync_id != NULL) { - *sync_id = NULL; + *sync_id = private->id; } - return -EPROTONOSUPPORT; + return pcmk_ok; } cib_t * @@ -530,6 +658,7 @@ cib_file_new(const char *cib_location) free(cib); return NULL; } + private->id = crm_generate_uuid(); cib->variant = cib_file; cib->variant_opaque = private; @@ -550,7 +679,7 @@ cib_file_new(const char *cib_location) cib->cmds->signon = cib_file_signon; cib->cmds->signoff = cib_file_signoff; cib->cmds->free = cib_file_free; - cib->cmds->inputfd = cib_file_inputfd; + cib->cmds->inputfd = cib_file_inputfd; // Deprecated method cib->cmds->register_notification = cib_file_register_notification; cib->cmds->set_connection_dnotify = cib_file_set_connection_dnotify; @@ -917,3 +1046,133 @@ cib_file_write_with_digest(xmlNode *cib_root, const char *cib_dirname, free(tmp_cib); return exit_rc; } + +/*! + * \internal + * \brief Process requests in a CIB transaction + * + * Stop when a request fails or when all requests have been processed. + * + * \param[in,out] cib CIB client + * \param[in,out] transaction CIB transaction + * + * \return Standard Pacemaker return code + */ +static int +cib_file_process_transaction_requests(cib_t *cib, xmlNode *transaction) +{ + cib_file_opaque_t *private = cib->variant_opaque; + + for (xmlNode *request = first_named_child(transaction, T_CIB_COMMAND); + request != NULL; request = crm_next_same_xml(request)) { + + xmlNode *output = NULL; + const char *op = crm_element_value(request, F_CIB_OPERATION); + + int rc = cib_file_process_request(cib, request, &output); + + rc = pcmk_legacy2rc(rc); + if (rc != pcmk_rc_ok) { + crm_err("Aborting transaction for CIB file client (%s) on file " + "'%s' due to failed %s request: %s", + private->id, private->filename, op, pcmk_rc_str(rc)); + crm_log_xml_info(request, "Failed request"); + return rc; + } + + crm_trace("Applied %s request to transaction working CIB for CIB file " + "client (%s) on file '%s'", + op, private->id, private->filename); + crm_log_xml_trace(request, "Successful request"); + } + + return pcmk_rc_ok; +} + +/*! + * \internal + * \brief Commit a given CIB file client's transaction to a working CIB copy + * + * \param[in,out] cib CIB file client + * \param[in] transaction CIB transaction + * \param[in,out] result_cib Where to store result CIB + * + * \return Standard Pacemaker return code + * + * \note The caller is responsible for replacing the \p cib argument's + * \p private->cib_xml with \p result_cib on success, and for freeing + * \p result_cib using \p free_xml() on failure. + */ +static int +cib_file_commit_transaction(cib_t *cib, xmlNode *transaction, + xmlNode **result_cib) +{ + int rc = pcmk_rc_ok; + cib_file_opaque_t *private = cib->variant_opaque; + xmlNode *saved_cib = private->cib_xml; + + CRM_CHECK(pcmk__xe_is(transaction, T_CIB_TRANSACTION), + return pcmk_rc_no_transaction); + + /* *result_cib should be a copy of private->cib_xml (created by + * cib_perform_op()). If not, make a copy now. Change tracking isn't + * strictly required here because: + * * Each request in the transaction will have changes tracked and ACLs + * checked if appropriate. + * * cib_perform_op() will infer changes for the commit request at the end. + */ + CRM_CHECK((*result_cib != NULL) && (*result_cib != private->cib_xml), + *result_cib = copy_xml(private->cib_xml)); + + crm_trace("Committing transaction for CIB file client (%s) on file '%s' to " + "working CIB", + private->id, private->filename); + + // Apply all changes to a working copy of the CIB + private->cib_xml = *result_cib; + + rc = cib_file_process_transaction_requests(cib, transaction); + + crm_trace("Transaction commit %s for CIB file client (%s) on file '%s'", + ((rc == pcmk_rc_ok)? "succeeded" : "failed"), + private->id, private->filename); + + /* Some request types (for example, erase) may have freed private->cib_xml + * (the working copy) and pointed it at a new XML object. In that case, it + * follows that *result_cib (the working copy) was freed. + * + * Point *result_cib at the updated working copy stored in private->cib_xml. + */ + *result_cib = private->cib_xml; + + // Point private->cib_xml back to the unchanged original copy + private->cib_xml = saved_cib; + + return rc; +} + +static int +cib_file_process_commit_transaction(const char *op, int options, + const char *section, xmlNode *req, + xmlNode *input, xmlNode *existing_cib, + xmlNode **result_cib, xmlNode **answer) +{ + int rc = pcmk_rc_ok; + const char *client_id = crm_element_value(req, F_CIB_CLIENTID); + cib_t *cib = NULL; + + CRM_CHECK(client_id != NULL, return -EINVAL); + + cib = get_client(client_id); + CRM_CHECK(cib != NULL, return -EINVAL); + + rc = cib_file_commit_transaction(cib, input, result_cib); + if (rc != pcmk_rc_ok) { + cib_file_opaque_t *private = cib->variant_opaque; + + crm_err("Could not commit transaction for CIB file client (%s) on " + "file '%s': %s", + private->id, private->filename, pcmk_rc_str(rc)); + } + return pcmk_rc2legacy(rc); +} diff --git a/lib/cib/cib_native.c b/lib/cib/cib_native.c index 4a87f56..c5e8b9e 100644 --- a/lib/cib/cib_native.c +++ b/lib/cib/cib_native.c @@ -69,20 +69,19 @@ cib_native_perform_op_delegate(cib_t *cib, const char *op, const char *host, pcmk__set_ipc_flags(ipc_flags, "client", crm_ipc_client_response); } - cib->call_id++; - if (cib->call_id < 1) { - cib->call_id = 1; + rc = cib__create_op(cib, op, host, section, data, call_options, user_name, + NULL, &op_msg); + if (rc != pcmk_ok) { + return rc; } - op_msg = cib_create_op(cib->call_id, op, host, section, data, call_options, - user_name); - if (op_msg == NULL) { - return -EPROTO; + if (pcmk_is_set(call_options, cib_transaction)) { + rc = cib__extend_transaction(cib, op_msg); + goto done; } crm_trace("Sending %s message to the CIB manager (timeout=%ds)", op, cib->call_timeout); rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, cib->call_timeout * 1000, &op_reply); - free_xml(op_msg); if (rc < 0) { crm_err("Couldn't perform %s operation (timeout=%ds): %s (%d)", op, @@ -168,6 +167,7 @@ cib_native_perform_op_delegate(cib_t *cib, const char *op, const char *host, cib->state = cib_disconnected; } + free_xml(op_msg); free_xml(op_reply); return rc; } @@ -255,6 +255,7 @@ cib_native_signoff(cib_t *cib) crm_ipc_destroy(ipc); } + cib->cmds->end_transaction(cib, false, cib_none); cib->state = cib_disconnected; cib->type = cib_no_connection; @@ -268,6 +269,7 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type, int rc = pcmk_ok; const char *channel = NULL; cib_native_opaque_t *native = cib->variant_opaque; + xmlNode *hello = NULL; struct ipc_client_callbacks cib_callbacks = { .dispatch = cib_native_dispatch_internal, @@ -296,12 +298,16 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type, if (async_fd != NULL) { native->ipc = crm_ipc_new(channel, 0); - - if (native->ipc && crm_ipc_connect(native->ipc)) { - *async_fd = crm_ipc_get_fd(native->ipc); - - } else if (native->ipc) { - rc = -ENOTCONN; + if (native->ipc != NULL) { + rc = pcmk__connect_generic_ipc(native->ipc); + if (rc == pcmk_rc_ok) { + rc = pcmk__ipc_fd(native->ipc, async_fd); + if (rc != pcmk_rc_ok) { + crm_info("Couldn't get file descriptor for %s IPC", + channel); + } + } + rc = pcmk_rc2legacy(rc); } } else { @@ -317,23 +323,23 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type, } if (rc == pcmk_ok) { - xmlNode *reply = NULL; - xmlNode *hello = create_xml_node(NULL, "cib_command"); + rc = cib__create_op(cib, CRM_OP_REGISTER, NULL, NULL, NULL, + cib_sync_call, NULL, name, &hello); + } - crm_xml_add(hello, F_TYPE, T_CIB); - crm_xml_add(hello, F_CIB_OPERATION, CRM_OP_REGISTER); - crm_xml_add(hello, F_CIB_CLIENTNAME, name); - crm_xml_add_int(hello, F_CIB_CALLOPTS, cib_sync_call); + if (rc == pcmk_ok) { + xmlNode *reply = NULL; - if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply) > 0) { + if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, + &reply) > 0) { const char *msg_type = crm_element_value(reply, F_CIB_OPERATION); - rc = pcmk_ok; crm_log_xml_trace(reply, "reg-reply"); if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) { - crm_info("Reply to CIB registration message has " - "unknown type '%s'", msg_type); + crm_info("Reply to CIB registration message has unknown type " + "'%s'", + msg_type); rc = -EPROTO; } else { @@ -347,7 +353,6 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type, } else { rc = -ECOMM; } - free_xml(hello); } @@ -383,6 +388,7 @@ cib_native_free(cib_t *cib) free(native->token); free(cib->variant_opaque); free(cib->cmds); + free(cib->user); free(cib); } diff --git a/lib/cib/cib_ops.c b/lib/cib/cib_ops.c index d3293c4..c324304 100644 --- a/lib/cib/cib_ops.c +++ b/lib/cib/cib_ops.c @@ -19,6 +19,9 @@ #include #include +#include +#include + #include #include #include @@ -26,6 +29,139 @@ #include #include +// @TODO: Free this via crm_exit() when libcib gets merged with libcrmcommon +static GHashTable *operation_table = NULL; + +static const cib__operation_t cib_ops[] = { + { + PCMK__CIB_REQUEST_ABS_DELETE, cib__op_abs_delete, + cib__op_attr_modifies|cib__op_attr_privileged + }, + { + PCMK__CIB_REQUEST_APPLY_PATCH, cib__op_apply_patch, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_transaction + }, + { + PCMK__CIB_REQUEST_BUMP, cib__op_bump, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_transaction + }, + { + PCMK__CIB_REQUEST_COMMIT_TRANSACT, cib__op_commit_transact, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_replaces + |cib__op_attr_writes_through + }, + { + PCMK__CIB_REQUEST_CREATE, cib__op_create, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_transaction + }, + { + PCMK__CIB_REQUEST_DELETE, cib__op_delete, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_transaction + }, + { + PCMK__CIB_REQUEST_ERASE, cib__op_erase, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_replaces + |cib__op_attr_transaction + }, + { + PCMK__CIB_REQUEST_IS_PRIMARY, cib__op_is_primary, + cib__op_attr_privileged + }, + { + PCMK__CIB_REQUEST_MODIFY, cib__op_modify, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_transaction + }, + { + PCMK__CIB_REQUEST_NOOP, cib__op_noop, cib__op_attr_none + }, + { + CRM_OP_PING, cib__op_ping, cib__op_attr_none + }, + { + // @COMPAT: Drop cib__op_attr_modifies when we drop legacy mode support + PCMK__CIB_REQUEST_PRIMARY, cib__op_primary, + cib__op_attr_modifies|cib__op_attr_privileged|cib__op_attr_local + }, + { + PCMK__CIB_REQUEST_QUERY, cib__op_query, cib__op_attr_none + }, + { + PCMK__CIB_REQUEST_REPLACE, cib__op_replace, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_replaces + |cib__op_attr_writes_through + |cib__op_attr_transaction + }, + { + PCMK__CIB_REQUEST_SECONDARY, cib__op_secondary, + cib__op_attr_privileged|cib__op_attr_local + }, + { + PCMK__CIB_REQUEST_SHUTDOWN, cib__op_shutdown, cib__op_attr_privileged + }, + { + PCMK__CIB_REQUEST_SYNC_TO_ALL, cib__op_sync_all, cib__op_attr_privileged + }, + { + PCMK__CIB_REQUEST_SYNC_TO_ONE, cib__op_sync_one, cib__op_attr_privileged + }, + { + PCMK__CIB_REQUEST_UPGRADE, cib__op_upgrade, + cib__op_attr_modifies + |cib__op_attr_privileged + |cib__op_attr_writes_through + |cib__op_attr_transaction + }, +}; + +/*! + * \internal + * \brief Get the \c cib__operation_t object for a given CIB operation name + * + * \param[in] op CIB operation name + * \param[out] operation Where to store CIB operation object + * + * \return Standard Pacemaker return code + */ +int +cib__get_operation(const char *op, const cib__operation_t **operation) +{ + CRM_ASSERT((op != NULL) && (operation != NULL)); + + if (operation_table == NULL) { + operation_table = pcmk__strkey_table(NULL, NULL); + + for (int lpc = 0; lpc < PCMK__NELEM(cib_ops); lpc++) { + const cib__operation_t *oper = &(cib_ops[lpc]); + + g_hash_table_insert(operation_table, (gpointer) oper->name, + (gpointer) oper); + } + } + + *operation = g_hash_table_lookup(operation_table, op); + if (*operation == NULL) { + crm_err("Operation %s is invalid", op); + return EINVAL; + } + return pcmk_rc_ok; +} + int cib_process_query(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) @@ -54,8 +190,8 @@ cib_process_query(const char *op, int options, const char *section, xmlNode * re result = -ENXIO; } else if (options & cib_no_children) { - const char *tag = TYPE(obj_root); - xmlNode *shallow = create_xml_node(*answer, tag); + xmlNode *shallow = create_xml_node(*answer, + (const char *) obj_root->name); copy_in_properties(shallow, obj_root); *answer = shallow; @@ -107,12 +243,14 @@ cib_process_erase(const char *op, int options, const char *section, xmlNode * re int result = pcmk_ok; crm_trace("Processing \"%s\" event", op); - *answer = NULL; - free_xml(*result_cib); - *result_cib = createEmptyCib(0); + if (*result_cib != existing_cib) { + free_xml(*result_cib); + } + *result_cib = createEmptyCib(0); copy_in_properties(*result_cib, existing_cib); update_counter(*result_cib, XML_ATTR_GENERATION_ADMIN, false); + *answer = NULL; return result; } @@ -172,7 +310,6 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode * xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { - const char *tag = NULL; int result = pcmk_ok; crm_trace("Processing %s for %s section", @@ -189,16 +326,14 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode * return -EINVAL; } - tag = crm_element_name(input); - if (pcmk__str_eq(XML_CIB_TAG_SECTION_ALL, section, pcmk__str_casei)) { section = NULL; - } else if (pcmk__str_eq(tag, section, pcmk__str_casei)) { + } else if (pcmk__xe_is(input, section)) { section = NULL; } - if (pcmk__str_eq(tag, XML_TAG_CIB, pcmk__str_casei)) { + if (pcmk__xe_is(input, XML_TAG_CIB)) { int updates = 0; int epoch = 0; int admin_epoch = 0; @@ -262,7 +397,9 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode * replace_admin_epoch, replace_epoch, replace_updates, peer); } - free_xml(*result_cib); + if (*result_cib != existing_cib) { + free_xml(*result_cib); + } *result_cib = copy_xml(input); } else { @@ -299,7 +436,7 @@ cib_process_delete(const char *op, int options, const char *section, xmlNode * r } obj_root = pcmk_find_cib_element(*result_cib, section); - if(pcmk__str_eq(crm_element_name(input), section, pcmk__str_casei)) { + if (pcmk__xe_is(input, section)) { xmlNode *child = NULL; for (child = pcmk__xml_first_child(input); child; child = pcmk__xml_next(child)) { @@ -360,7 +497,8 @@ cib_process_modify(const char *op, int options, const char *section, xmlNode * r } } - if(options & cib_mixed_update) { + // @COMPAT cib_mixed_update is deprecated as of 2.1.7 + if (pcmk_is_set(options, cib_mixed_update)) { int max = 0, lpc; xmlXPathObjectPtr xpathObj = xpath_search(*result_cib, "//@__delete__"); @@ -396,7 +534,7 @@ update_cib_object(xmlNode * parent, xmlNode * update) CRM_CHECK(update != NULL, return -EINVAL); CRM_CHECK(parent != NULL, return -EINVAL); - object_name = crm_element_name(update); + object_name = (const char *) update->name; CRM_CHECK(object_name != NULL, return -EINVAL); object_id = ID(update); @@ -425,33 +563,25 @@ update_cib_object(xmlNode * parent, xmlNode * update) // @COMPAT: XML_CIB_ATTR_REPLACE is unused internally. Remove at break. replace = crm_element_value(update, XML_CIB_ATTR_REPLACE); if (replace != NULL) { - xmlNode *remove = NULL; - int last = 0, lpc = 0, len = 0; + int last = 0; + int len = strlen(replace); - len = strlen(replace); - while (lpc <= len) { + for (int lpc = 0; lpc <= len; ++lpc) { if (replace[lpc] == ',' || replace[lpc] == 0) { - char *replace_item = NULL; - - if (last == lpc) { - /* nothing to do */ - last = lpc + 1; - goto incr; - } - - replace_item = strndup(replace + last, lpc - last); - remove = find_xml_node(target, replace_item, FALSE); - if (remove != NULL) { - crm_trace("Replacing node <%s> in <%s>", - replace_item, crm_element_name(target)); - free_xml(remove); - remove = NULL; + if (last != lpc) { + char *replace_item = strndup(replace + last, lpc - last); + xmlNode *remove = find_xml_node(target, replace_item, + FALSE); + + if (remove != NULL) { + crm_trace("Replacing node <%s> in <%s>", + replace_item, target->name); + free_xml(remove); + } + free(replace_item); } - free(replace_item); last = lpc + 1; } - incr: - lpc++; } xml_remove_prop(update, XML_CIB_ATTR_REPLACE); xml_remove_prop(target, XML_CIB_ATTR_REPLACE); @@ -475,7 +605,7 @@ update_cib_object(xmlNode * parent, xmlNode * update) a_child = pcmk__xml_next(a_child)) { int tmp_result = 0; - crm_trace("Updating child <%s%s%s%s>", crm_element_name(a_child), + crm_trace("Updating child <%s%s%s%s>", a_child->name, ((ID(a_child) == NULL)? "" : " " XML_ATTR_ID "='"), pcmk__s(ID(a_child), ""), ((ID(a_child) == NULL)? "" : "'")); @@ -484,7 +614,7 @@ update_cib_object(xmlNode * parent, xmlNode * update) /* only the first error is likely to be interesting */ if (tmp_result != pcmk_ok) { crm_err("Error updating child <%s%s%s%s>", - crm_element_name(a_child), + a_child->name, ((ID(a_child) == NULL)? "" : " " XML_ATTR_ID "='"), pcmk__s(ID(a_child), ""), ((ID(a_child) == NULL)? "" : "'")); @@ -514,7 +644,7 @@ add_cib_object(xmlNode * parent, xmlNode * new_obj) return -EINVAL; } - object_name = crm_element_name(new_obj); + object_name = (const char *) new_obj->name; if (object_name == NULL) { return -EINVAL; } @@ -555,7 +685,8 @@ update_results(xmlNode *failed, xmlNode *target, const char *operation, add_node_copy(xml_node, target); crm_xml_add(xml_node, XML_FAILCIB_ATTR_ID, ID(target)); - crm_xml_add(xml_node, XML_FAILCIB_ATTR_OBJTYPE, TYPE(target)); + crm_xml_add(xml_node, XML_FAILCIB_ATTR_OBJTYPE, + (const char *) target->name); crm_xml_add(xml_node, XML_FAILCIB_ATTR_OP, operation); crm_xml_add(xml_node, XML_FAILCIB_ATTR_REASON, error_msg); @@ -582,7 +713,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r } else if (pcmk__str_eq(XML_TAG_CIB, section, pcmk__str_casei)) { section = NULL; - } else if (pcmk__str_eq(crm_element_name(input), XML_TAG_CIB, pcmk__str_casei)) { + } else if (pcmk__xe_is(input, XML_TAG_CIB)) { section = NULL; } @@ -601,7 +732,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r failed = create_xml_node(NULL, XML_TAG_FAILED); update_section = pcmk_find_cib_element(*result_cib, section); - if (pcmk__str_eq(crm_element_name(input), section, pcmk__str_casei)) { + if (pcmk__xe_is(input, section)) { xmlNode *a_child = NULL; for (a_child = pcmk__xml_first_child(input); a_child != NULL; @@ -617,7 +748,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r update_results(failed, input, op, result); } - if ((result == pcmk_ok) && xml_has_children(failed)) { + if ((result == pcmk_ok) && (failed->children != NULL)) { result = -EINVAL; } @@ -646,8 +777,11 @@ cib_process_diff(const char *op, int options, const char *section, xmlNode * req op, originator, (pcmk_is_set(options, cib_force_diff)? " (global update)" : "")); - free_xml(*result_cib); + if (*result_cib != existing_cib) { + free_xml(*result_cib); + } *result_cib = copy_xml(existing_cib); + return xml_apply_patchset(*result_cib, input, TRUE); } @@ -670,7 +804,7 @@ cib__config_changed_v1(xmlNode *last, xmlNode *next, xmlNode **diff) goto done; } - crm_element_value_int(*diff, "format", &format); + crm_element_value_int(*diff, PCMK_XA_FORMAT, &format); CRM_LOG_ASSERT(format == 1); xpathObj = xpath_search(*diff, "//" XML_CIB_TAG_CONFIGURATION); @@ -803,8 +937,8 @@ cib_process_xpath(const char *op, int options, const char *section, } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none)) { if (options & cib_no_children) { - const char *tag = TYPE(match); - xmlNode *shallow = create_xml_node(*answer, tag); + xmlNode *shallow = create_xml_node(*answer, + (const char *) match->name); copy_in_properties(shallow, match); diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c index 28095b3..77479d7 100644 --- a/lib/cib/cib_remote.c +++ b/lib/cib/cib_remote.c @@ -55,7 +55,8 @@ typedef struct cib_remote_opaque_s { static int cib_remote_perform_op(cib_t *cib, const char *op, const char *host, const char *section, xmlNode *data, - xmlNode **output_data, int call_options, const char *name) + xmlNode **output_data, int call_options, + const char *user_name) { int rc; int remaining_time = 0; @@ -79,15 +80,16 @@ cib_remote_perform_op(cib_t *cib, const char *op, const char *host, return -EINVAL; } - cib->call_id++; - if (cib->call_id < 1) { - cib->call_id = 1; + rc = cib__create_op(cib, op, host, section, data, call_options, user_name, + NULL, &op_msg); + if (rc != pcmk_ok) { + return rc; } - op_msg = cib_create_op(cib->call_id, op, host, section, data, call_options, - NULL); - if (op_msg == NULL) { - return -EPROTO; + if (pcmk_is_set(call_options, cib_transaction)) { + rc = cib__extend_transaction(cib, op_msg); + free_xml(op_msg); + return rc; } crm_trace("Sending %s message to the CIB manager", op); @@ -378,7 +380,7 @@ cib_tls_signon(cib_t *cib, pcmk__remote_t *connection, gboolean event_channel) } /* login to server */ - login = create_xml_node(NULL, "cib_command"); + login = create_xml_node(NULL, T_CIB_COMMAND); crm_xml_add(login, "op", "authenticate"); crm_xml_add(login, "user", private->user); crm_xml_add(login, "password", private->passwd); @@ -434,6 +436,7 @@ cib_remote_signon(cib_t *cib, const char *name, enum cib_conn_type type) { int rc = pcmk_ok; cib_remote_opaque_t *private = cib->variant_opaque; + xmlNode *hello = NULL; if (private->passwd == NULL) { if (private->out == NULL) { @@ -459,10 +462,13 @@ cib_remote_signon(cib_t *cib, const char *name, enum cib_conn_type type) } if (rc == pcmk_ok) { - xmlNode *hello = cib_create_op(0, CRM_OP_REGISTER, NULL, NULL, NULL, 0, - NULL); - crm_xml_add(hello, F_CIB_CLIENTNAME, name); - pcmk__remote_send_xml(&private->command, hello); + rc = cib__create_op(cib, CRM_OP_REGISTER, NULL, NULL, NULL, cib_none, + NULL, name, &hello); + } + + if (rc == pcmk_ok) { + rc = pcmk__remote_send_xml(&private->command, hello); + rc = pcmk_rc2legacy(rc); free_xml(hello); } @@ -490,6 +496,7 @@ cib_remote_signoff(cib_t *cib) cib_tls_close(cib); #endif + cib->cmds->end_transaction(cib, false, cib_none); cib->state = cib_disconnected; cib->type = cib_no_connection; @@ -511,6 +518,7 @@ cib_remote_free(cib_t *cib) free(private->user); free(private->passwd); free(cib->cmds); + free(cib->user); free(private); free(cib); } @@ -530,7 +538,7 @@ cib_remote_inputfd(cib_t * cib) static int cib_remote_register_notification(cib_t * cib, const char *callback, int enabled) { - xmlNode *notify_msg = create_xml_node(NULL, "cib_command"); + xmlNode *notify_msg = create_xml_node(NULL, T_CIB_COMMAND); cib_remote_opaque_t *private = cib->variant_opaque; crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY); @@ -614,7 +622,7 @@ cib_remote_new(const char *server, const char *user, const char *passwd, int por cib->cmds->signon = cib_remote_signon; cib->cmds->signoff = cib_remote_signoff; cib->cmds->free = cib_remote_free; - cib->cmds->inputfd = cib_remote_inputfd; + cib->cmds->inputfd = cib_remote_inputfd; // Deprecated method cib->cmds->register_notification = cib_remote_register_notification; cib->cmds->set_connection_dnotify = cib_remote_set_connection_dnotify; diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c index c75d844..0082eef 100644 --- a/lib/cib/cib_utils.c +++ b/lib/cib/cib_utils.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -77,6 +78,154 @@ cib_diff_version_details(xmlNode * diff, int *admin_epoch, int *epoch, int *upda return TRUE; } +/*! + * \internal + * \brief Get the XML patchset from a CIB diff notification + * + * \param[in] msg CIB diff notification + * \param[out] patchset Where to store XML patchset + * + * \return Standard Pacemaker return code + */ +int +cib__get_notify_patchset(const xmlNode *msg, const xmlNode **patchset) +{ + int rc = pcmk_err_generic; + + CRM_ASSERT(patchset != NULL); + *patchset = NULL; + + if (msg == NULL) { + crm_err("CIB diff notification received with no XML"); + return ENOMSG; + } + + if ((crm_element_value_int(msg, F_CIB_RC, &rc) != 0) || (rc != pcmk_ok)) { + crm_warn("Ignore failed CIB update: %s " CRM_XS " rc=%d", + pcmk_strerror(rc), rc); + crm_log_xml_debug(msg, "failed"); + return pcmk_legacy2rc(rc); + } + + *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + + if (*patchset == NULL) { + crm_err("CIB diff notification received with no patchset"); + return ENOMSG; + } + return pcmk_rc_ok; +} + +#define XPATH_DIFF_V1 "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED + +/*! + * \internal + * \brief Check whether a given CIB element was modified in a CIB patchset (v1) + * + * \param[in] patchset CIB XML patchset + * \param[in] element XML tag of CIB element to check (\c NULL is equivalent + * to \c XML_TAG_CIB) + * + * \return \c true if \p element was modified, or \c false otherwise + */ +static bool +element_in_patchset_v1(const xmlNode *patchset, const char *element) +{ + char *xpath = crm_strdup_printf(XPATH_DIFF_V1 "//%s", + pcmk__s(element, XML_TAG_CIB)); + xmlXPathObject *xpath_obj = xpath_search(patchset, xpath); + + free(xpath); + + if (xpath_obj == NULL) { + return false; + } + freeXpathObject(xpath_obj); + return true; +} + +/*! + * \internal + * \brief Check whether a given CIB element was modified in a CIB patchset (v2) + * + * \param[in] patchset CIB XML patchset + * \param[in] element XML tag of CIB element to check (\c NULL is equivalent + * to \c XML_TAG_CIB). Supported values include any CIB + * element supported by \c pcmk__cib_abs_xpath_for(). + * + * \return \c true if \p element was modified, or \c false otherwise + */ +static bool +element_in_patchset_v2(const xmlNode *patchset, const char *element) +{ + const char *element_xpath = pcmk__cib_abs_xpath_for(element); + const char *parent_xpath = pcmk_cib_parent_name_for(element); + char *element_regex = NULL; + bool rc = false; + + CRM_CHECK(element_xpath != NULL, return false); // Unsupported element + + // Matches if and only if element_xpath is part of a changed path + element_regex = crm_strdup_printf("^%s(/|$)", element_xpath); + + for (const xmlNode *change = first_named_child(patchset, XML_DIFF_CHANGE); + change != NULL; change = crm_next_same_xml(change)) { + + const char *op = crm_element_value(change, F_CIB_OPERATION); + const char *diff_xpath = crm_element_value(change, XML_DIFF_PATH); + + if (pcmk__str_eq(diff_xpath, element_regex, pcmk__str_regex)) { + // Change to an existing element + rc = true; + break; + } + + if (pcmk__str_eq(op, "create", pcmk__str_none) + && pcmk__str_eq(diff_xpath, parent_xpath, pcmk__str_none) + && pcmk__xe_is(pcmk__xml_first_child(change), element)) { + + // Newly added element + rc = true; + break; + } + } + + free(element_regex); + return rc; +} + +/*! + * \internal + * \brief Check whether a given CIB element was modified in a CIB patchset + * + * \param[in] patchset CIB XML patchset + * \param[in] element XML tag of CIB element to check (\c NULL is equivalent + * to \c XML_TAG_CIB). Supported values include any CIB + * element supported by \c pcmk__cib_abs_xpath_for(). + * + * \return \c true if \p element was modified, or \c false otherwise + */ +bool +cib__element_in_patchset(const xmlNode *patchset, const char *element) +{ + int format = 1; + + CRM_ASSERT(patchset != NULL); + + crm_element_value_int(patchset, PCMK_XA_FORMAT, &format); + switch (format) { + case 1: + return element_in_patchset_v1(patchset, element); + + case 2: + return element_in_patchset_v2(patchset, element); + + default: + crm_warn("Unknown patch format: %d", format); + return false; + } +} + /*! * \brief Create XML for a new (empty) CIB * @@ -141,30 +290,79 @@ cib_acl_enabled(xmlNode *xml, const char *user) return rc; } +/*! + * \internal + * \brief Determine whether to perform operations on a scratch copy of the CIB + * + * \param[in] op CIB operation + * \param[in] section CIB section + * \param[in] call_options CIB call options + * + * \return \p true if we should make a copy of the CIB, or \p false otherwise + */ +static bool +should_copy_cib(const char *op, const char *section, int call_options) +{ + if (pcmk_is_set(call_options, cib_dryrun)) { + // cib_dryrun implies a scratch copy by definition; no side effects + return true; + } + + if (pcmk__str_eq(op, PCMK__CIB_REQUEST_COMMIT_TRANSACT, pcmk__str_none)) { + /* Commit-transaction must make a copy for atomicity. We must revert to + * the original CIB if the entire transaction cannot be applied + * successfully. + */ + return true; + } + + if (pcmk_is_set(call_options, cib_transaction)) { + /* If cib_transaction is set, then we're in the process of committing a + * transaction. The commit-transaction request already made a scratch + * copy, and we're accumulating changes in that copy. + */ + return false; + } + + if (pcmk__str_eq(section, XML_CIB_TAG_STATUS, pcmk__str_none)) { + /* Copying large CIBs accounts for a huge percentage of our CIB usage, + * and this avoids some of it. + * + * @TODO: Is this safe? See discussion at + * https://github.com/ClusterLabs/pacemaker/pull/3094#discussion_r1211400690. + */ + return false; + } + + // Default behavior is to operate on a scratch copy + return true; +} + int -cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_query, - const char *section, xmlNode * req, xmlNode * input, - gboolean manage_counters, gboolean * config_changed, - xmlNode * current_cib, xmlNode ** result_cib, xmlNode ** diff, xmlNode ** output) +cib_perform_op(const char *op, int call_options, cib__op_fn_t fn, bool is_query, + const char *section, xmlNode *req, xmlNode *input, + bool manage_counters, bool *config_changed, + xmlNode **current_cib, xmlNode **result_cib, xmlNode **diff, + xmlNode **output) { int rc = pcmk_ok; - gboolean check_schema = TRUE; + bool check_schema = true; + bool make_copy = true; xmlNode *top = NULL; xmlNode *scratch = NULL; + xmlNode *patchset_cib = NULL; xmlNode *local_diff = NULL; const char *new_version = NULL; const char *user = crm_element_value(req, F_CIB_USER); - bool with_digest = FALSE; - - pcmk__output_t *out = NULL; - int out_rc = pcmk_rc_no_output; + bool with_digest = false; crm_trace("Begin %s%s%s op", (pcmk_is_set(call_options, cib_dryrun)? "dry run of " : ""), (is_query? "read-only " : ""), op); CRM_CHECK(output != NULL, return -ENOMSG); + CRM_CHECK(current_cib != NULL, return -ENOMSG); CRM_CHECK(result_cib != NULL, return -ENOMSG); CRM_CHECK(config_changed != NULL, return -ENOMSG); @@ -173,25 +371,26 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer } *result_cib = NULL; - *config_changed = FALSE; + *config_changed = false; if (fn == NULL) { return -EINVAL; } if (is_query) { - xmlNode *cib_ro = current_cib; + xmlNode *cib_ro = *current_cib; xmlNode *cib_filtered = NULL; - if(cib_acl_enabled(cib_ro, user)) { - if(xml_acl_filtered_copy(user, current_cib, current_cib, &cib_filtered)) { - if (cib_filtered == NULL) { - crm_debug("Pre-filtered the entire cib"); - return -EACCES; - } - cib_ro = cib_filtered; - crm_log_xml_trace(cib_ro, "filtered"); + if (cib_acl_enabled(cib_ro, user) + && xml_acl_filtered_copy(user, *current_cib, *current_cib, + &cib_filtered)) { + + if (cib_filtered == NULL) { + crm_debug("Pre-filtered the entire cib"); + return -EACCES; } + cib_ro = cib_filtered; + crm_log_xml_trace(cib_ro, "filtered"); } rc = (*fn) (op, call_options, section, req, input, cib_ro, result_cib, output); @@ -202,14 +401,14 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer } else if(cib_filtered == *output) { cib_filtered = NULL; /* Let them have this copy */ - } else if(*output == current_cib) { + } else if (*output == *current_cib) { /* They already know not to free it */ } else if(cib_filtered && (*output)->doc == cib_filtered->doc) { /* We're about to free the document of which *output is a part */ *output = copy_xml(*output); - } else if((*output)->doc == current_cib->doc) { + } else if ((*output)->doc == (*current_cib)->doc) { /* Give them a copy they can free */ *output = copy_xml(*output); } @@ -218,31 +417,41 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer return rc; } + make_copy = should_copy_cib(op, section, call_options); - if (pcmk_is_set(call_options, cib_zero_copy)) { + if (!make_copy) { /* Conditional on v2 patch style */ - scratch = current_cib; + scratch = *current_cib; - /* Create a shallow copy of current_cib for the version details */ - current_cib = create_xml_node(NULL, (const char *)scratch->name); - copy_in_properties(current_cib, scratch); - top = current_cib; + // Make a copy of the top-level element to store version details + top = create_xml_node(NULL, (const char *) scratch->name); + copy_in_properties(top, scratch); + patchset_cib = top; xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user)); rc = (*fn) (op, call_options, section, req, input, scratch, &scratch, output); + /* If scratch points to a new object now (for example, after an erase + * operation), then *current_cib should point to the same object. + */ + *current_cib = scratch; + } else { - scratch = copy_xml(current_cib); + scratch = copy_xml(*current_cib); + patchset_cib = *current_cib; + xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user)); - rc = (*fn) (op, call_options, section, req, input, current_cib, &scratch, output); + rc = (*fn) (op, call_options, section, req, input, *current_cib, + &scratch, output); - if(scratch && xml_tracking_changes(scratch) == FALSE) { + if ((scratch != NULL) && !xml_tracking_changes(scratch)) { crm_trace("Inferring changes after %s op", op); - xml_track_changes(scratch, user, current_cib, cib_acl_enabled(current_cib, user)); - xml_calculate_changes(current_cib, scratch); + xml_track_changes(scratch, user, *current_cib, + cib_acl_enabled(*current_cib, user)); + xml_calculate_changes(*current_cib, scratch); } - CRM_CHECK(current_cib != scratch, return -EINVAL); + CRM_CHECK(*current_cib != scratch, return -EINVAL); } xml_acl_disable(scratch); /* Allow the system to make any additional changes */ @@ -271,12 +480,12 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer } } - if (current_cib) { + if (patchset_cib != NULL) { int old = 0; int new = 0; crm_element_value_int(scratch, XML_ATTR_GENERATION_ADMIN, &new); - crm_element_value_int(current_cib, XML_ATTR_GENERATION_ADMIN, &old); + crm_element_value_int(patchset_cib, XML_ATTR_GENERATION_ADMIN, &old); if (old > new) { crm_err("%s went backwards: %d -> %d (Opts: %#x)", @@ -287,7 +496,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer } else if (old == new) { crm_element_value_int(scratch, XML_ATTR_GENERATION, &new); - crm_element_value_int(current_cib, XML_ATTR_GENERATION, &old); + crm_element_value_int(patchset_cib, XML_ATTR_GENERATION, &old); if (old > new) { crm_err("%s went backwards: %d -> %d (Opts: %#x)", XML_ATTR_GENERATION, old, new, call_options); @@ -302,13 +511,14 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer pcmk__strip_xml_text(scratch); fix_plus_plus_recursive(scratch); - if (pcmk_is_set(call_options, cib_zero_copy)) { - /* At this point, current_cib is just the 'cib' tag and its properties, + if (!make_copy) { + /* At this point, patchset_cib is just the "cib" tag and its properties. * * The v1 format would barf on this, but we know the v2 patch * format only needs it for the top-level version fields */ - local_diff = xml_create_patchset(2, current_cib, scratch, (bool*)config_changed, manage_counters); + local_diff = xml_create_patchset(2, patchset_cib, scratch, + config_changed, manage_counters); } else { static time_t expires = 0; @@ -316,63 +526,38 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer if (expires < tm_now) { expires = tm_now + 60; /* Validate clients are correctly applying v2-style diffs at most once a minute */ - with_digest = TRUE; + with_digest = true; } - local_diff = xml_create_patchset(0, current_cib, scratch, (bool*)config_changed, manage_counters); + local_diff = xml_create_patchset(0, patchset_cib, scratch, + config_changed, manage_counters); } - // Create a log output object only if we're going to use it - pcmk__if_tracing( - { - rc = pcmk_rc2legacy(pcmk__log_output_new(&out)); - CRM_CHECK(rc == pcmk_ok, goto done); - - pcmk__output_set_log_level(out, LOG_TRACE); - out_rc = pcmk__xml_show_changes(out, scratch); - }, - {} - ); + pcmk__log_xml_changes(LOG_TRACE, scratch); xml_accept_changes(scratch); if(local_diff) { - int temp_rc = pcmk_rc_no_output; - - patchset_process_digest(local_diff, current_cib, scratch, with_digest); - - if (out == NULL) { - rc = pcmk_rc2legacy(pcmk__log_output_new(&out)); - CRM_CHECK(rc == pcmk_ok, goto done); - } - pcmk__output_set_log_level(out, LOG_INFO); - temp_rc = out->message(out, "xml-patchset", local_diff); - out_rc = pcmk__output_select_rc(rc, temp_rc); - + patchset_process_digest(local_diff, patchset_cib, scratch, with_digest); + pcmk__log_xml_patchset(LOG_INFO, local_diff); crm_log_xml_trace(local_diff, "raw patch"); } - if (out != NULL) { - out->finish(out, pcmk_rc2exitc(out_rc), true, NULL); - pcmk__output_free(out); - out = NULL; - } - - if (!pcmk_is_set(call_options, cib_zero_copy) && (local_diff != NULL)) { + if (make_copy && (local_diff != NULL)) { // Original to compare against doesn't exist pcmk__if_tracing( { // Validate the calculated patch set int test_rc = pcmk_ok; int format = 1; - xmlNode *cib_copy = copy_xml(current_cib); + xmlNode *cib_copy = copy_xml(patchset_cib); - crm_element_value_int(local_diff, "format", &format); + crm_element_value_int(local_diff, PCMK_XA_FORMAT, &format); test_rc = xml_apply_patchset(cib_copy, local_diff, manage_counters); if (test_rc != pcmk_ok) { save_xml_to_file(cib_copy, "PatchApply:calculated", NULL); - save_xml_to_file(current_cib, "PatchApply:input", NULL); + save_xml_to_file(patchset_cib, "PatchApply:input", NULL); save_xml_to_file(scratch, "PatchApply:actual", NULL); save_xml_to_file(local_diff, "PatchApply:diff", NULL); crm_err("v%d patchset error, patch failed to apply: %s " @@ -391,7 +576,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer * a) we don't really care whats in the status section * b) we don't validate any of its contents at the moment anyway */ - check_schema = FALSE; + check_schema = false; } /* === scratch must not be modified after this point === @@ -420,19 +605,35 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer /* Does the CIB support the "update-*" attributes... */ if (current_schema >= minimum_schema) { + /* Ensure values of origin, client, and user in scratch match + * the values in req + */ const char *origin = crm_element_value(req, F_ORIG); + const char *client = crm_element_value(req, F_CIB_CLIENTNAME); + + if (origin != NULL) { + crm_xml_add(scratch, XML_ATTR_UPDATE_ORIG, origin); + } else { + xml_remove_prop(scratch, XML_ATTR_UPDATE_ORIG); + } - CRM_LOG_ASSERT(origin != NULL); - crm_xml_replace(scratch, XML_ATTR_UPDATE_ORIG, origin); - crm_xml_replace(scratch, XML_ATTR_UPDATE_CLIENT, - crm_element_value(req, F_CIB_CLIENTNAME)); - crm_xml_replace(scratch, XML_ATTR_UPDATE_USER, crm_element_value(req, F_CIB_USER)); + if (client != NULL) { + crm_xml_add(scratch, XML_ATTR_UPDATE_CLIENT, user); + } else { + xml_remove_prop(scratch, XML_ATTR_UPDATE_CLIENT); + } + + if (user != NULL) { + crm_xml_add(scratch, XML_ATTR_UPDATE_USER, user); + } else { + xml_remove_prop(scratch, XML_ATTR_UPDATE_USER); + } } } } crm_trace("Perform validation: %s", pcmk__btoa(check_schema)); - if ((rc == pcmk_ok) && check_schema && !validate_xml(scratch, NULL, TRUE)) { + if ((rc == pcmk_ok) && check_schema && !validate_xml(scratch, NULL, true)) { const char *current_schema = crm_element_value(scratch, XML_ATTR_VALIDATION); @@ -444,13 +645,17 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer done: *result_cib = scratch; - if(rc != pcmk_ok && cib_acl_enabled(current_cib, user)) { - if(xml_acl_filtered_copy(user, current_cib, scratch, result_cib)) { - if (*result_cib == NULL) { - crm_debug("Pre-filtered the entire cib result"); - } - free_xml(scratch); + + /* @TODO: This may not work correctly with !make_copy, since we don't + * keep the original CIB. + */ + if ((rc != pcmk_ok) && cib_acl_enabled(patchset_cib, user) + && xml_acl_filtered_copy(user, patchset_cib, scratch, result_cib)) { + + if (*result_cib == NULL) { + crm_debug("Pre-filtered the entire cib result"); } + free_xml(scratch); } if(diff) { @@ -464,36 +669,117 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer return rc; } -xmlNode * -cib_create_op(int call_id, const char *op, const char *host, - const char *section, xmlNode *data, int call_options, - const char *user_name) +int +cib__create_op(cib_t *cib, const char *op, const char *host, + const char *section, xmlNode *data, int call_options, + const char *user_name, const char *client_name, + xmlNode **op_msg) { - xmlNode *op_msg = create_xml_node(NULL, "cib_command"); + CRM_CHECK((cib != NULL) && (op_msg != NULL), return -EPROTO); - CRM_CHECK(op_msg != NULL, return NULL); - - crm_xml_add(op_msg, F_XML_TAGNAME, "cib_command"); + *op_msg = create_xml_node(NULL, T_CIB_COMMAND); + if (*op_msg == NULL) { + return -EPROTO; + } - crm_xml_add(op_msg, F_TYPE, T_CIB); - crm_xml_add(op_msg, F_CIB_OPERATION, op); - crm_xml_add(op_msg, F_CIB_HOST, host); - crm_xml_add(op_msg, F_CIB_SECTION, section); - crm_xml_add_int(op_msg, F_CIB_CALLID, call_id); - if (user_name) { - crm_xml_add(op_msg, F_CIB_USER, user_name); + cib->call_id++; + if (cib->call_id < 1) { + cib->call_id = 1; } + + crm_xml_add(*op_msg, F_XML_TAGNAME, T_CIB_COMMAND); + crm_xml_add(*op_msg, F_TYPE, T_CIB); + crm_xml_add(*op_msg, F_CIB_OPERATION, op); + crm_xml_add(*op_msg, F_CIB_HOST, host); + crm_xml_add(*op_msg, F_CIB_SECTION, section); + crm_xml_add(*op_msg, F_CIB_USER, user_name); + crm_xml_add(*op_msg, F_CIB_CLIENTNAME, client_name); + crm_xml_add_int(*op_msg, F_CIB_CALLID, cib->call_id); + crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); - crm_xml_add_int(op_msg, F_CIB_CALLOPTS, call_options); + crm_xml_add_int(*op_msg, F_CIB_CALLOPTS, call_options); if (data != NULL) { - add_message_xml(op_msg, F_CIB_CALLDATA, data); + add_message_xml(*op_msg, F_CIB_CALLDATA, data); } - if (call_options & cib_inhibit_bcast) { - CRM_CHECK((call_options & cib_scope_local), return NULL); + if (pcmk_is_set(call_options, cib_inhibit_bcast)) { + CRM_CHECK(pcmk_is_set(call_options, cib_scope_local), + free_xml(*op_msg); return -EPROTO); } - return op_msg; + return pcmk_ok; +} + +/*! + * \internal + * \brief Check whether a CIB request is supported in a transaction + * + * \param[in] request CIB request + * + * \return Standard Pacemaker return code + */ +static int +validate_transaction_request(const xmlNode *request) +{ + const char *op = crm_element_value(request, F_CIB_OPERATION); + const char *host = crm_element_value(request, F_CIB_HOST); + const cib__operation_t *operation = NULL; + int rc = cib__get_operation(op, &operation); + + if (rc != pcmk_rc_ok) { + // cib__get_operation() logs error + return rc; + } + + if (!pcmk_is_set(operation->flags, cib__op_attr_transaction)) { + crm_err("Operation %s is not supported in CIB transactions", op); + return EOPNOTSUPP; + } + + if (host != NULL) { + crm_err("Operation targeting a specific node (%s) is not supported in " + "a CIB transaction", + host); + return EOPNOTSUPP; + } + return pcmk_rc_ok; +} + +/*! + * \internal + * \brief Append a CIB request to a CIB transaction + * + * \param[in,out] cib CIB client whose transaction to extend + * \param[in,out] request Request to add to transaction + * + * \return Legacy Pacemaker return code + */ +int +cib__extend_transaction(cib_t *cib, xmlNode *request) +{ + int rc = pcmk_rc_ok; + + CRM_ASSERT((cib != NULL) && (request != NULL)); + + rc = validate_transaction_request(request); + + if ((rc == pcmk_rc_ok) && (cib->transaction == NULL)) { + rc = pcmk_rc_no_transaction; + } + + if (rc == pcmk_rc_ok) { + add_node_copy(cib->transaction, request); + + } else { + const char *op = crm_element_value(request, F_CIB_OPERATION); + const char *client_id = NULL; + + cib->cmds->client_id(cib, NULL, &client_id); + crm_err("Failed to add '%s' operation to transaction for client %s: %s", + op, pcmk__s(client_id, "(unidentified)"), pcmk_rc_str(rc)); + crm_log_xml_info(request, "failed"); + } + return pcmk_rc2legacy(rc); } void @@ -701,16 +987,7 @@ cib_apply_patch_event(xmlNode *event, xmlNode *input, xmlNode **output, } if (level > LOG_CRIT) { - pcmk__output_t *out = NULL; - - rc = pcmk_rc2legacy(pcmk__log_output_new(&out)); - CRM_CHECK(rc == pcmk_ok, return rc); - - pcmk__output_set_log_level(out, level); - rc = out->message(out, "xml-patchset", diff); - out->finish(out, pcmk_rc2exitc(rc), true, NULL); - pcmk__output_free(out); - rc = pcmk_ok; + pcmk__log_xml_patchset(level, diff); } if (input != NULL) { diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am index 9225f29..2ddbffb 100644 --- a/lib/cluster/Makefile.am +++ b/lib/cluster/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2018 the Pacemaker project contributors +# Copyright 2004-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -13,17 +13,20 @@ noinst_HEADERS = crmcluster_private.h ## libraries lib_LTLIBRARIES = libcrmcluster.la -libcrmcluster_la_LDFLAGS = -version-info 30:0:1 +libcrmcluster_la_LDFLAGS = -version-info 31:0:2 libcrmcluster_la_CFLAGS = $(CFLAGS_HARDENED_LIB) libcrmcluster_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) -libcrmcluster_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la $(CLUSTERLIBS) +libcrmcluster_la_LIBADD = $(top_builddir)/lib/fencing/libstonithd.la +libcrmcluster_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la +libcrmcluster_la_LIBADD += $(CLUSTERLIBS) -libcrmcluster_la_SOURCES = election.c cluster.c membership.c +## Library sources (*must* use += format for bumplibs) +libcrmcluster_la_SOURCES = cluster.c +libcrmcluster_la_SOURCES += election.c +libcrmcluster_la_SOURCES += membership.c if BUILD_CS_SUPPORT -libcrmcluster_la_SOURCES += cpg.c corosync.c +libcrmcluster_la_SOURCES += corosync.c +libcrmcluster_la_SOURCES += cpg.c endif - -clean-generic: - rm -f *.log *.debug *.xml *~ diff --git a/lib/cluster/cluster.c b/lib/cluster/cluster.c index 011e053..f2cd428 100644 --- a/lib/cluster/cluster.c +++ b/lib/cluster/cluster.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -160,7 +160,7 @@ pcmk_cluster_free(crm_cluster_t *cluster) */ gboolean send_cluster_message(const crm_node_t *node, enum crm_ais_msg_types service, - xmlNode *data, gboolean ordered) + const xmlNode *data, gboolean ordered) { switch (get_cluster_type()) { case pcmk_cluster_corosync: @@ -280,7 +280,7 @@ crm_peer_uname(const char *uuid) return NULL; } - node = pcmk__search_cluster_node_cache((uint32_t) id, NULL); + node = pcmk__search_cluster_node_cache((uint32_t) id, NULL, NULL); if (node != NULL) { crm_info("Setting uuid for node %s[%u] to %s", node->uname, node->id, uuid); @@ -293,19 +293,6 @@ crm_peer_uname(const char *uuid) return NULL; } -/*! - * \brief Add a node's UUID as an XML attribute - * - * \param[in,out] xml XML element to add UUID to - * \param[in] attr XML attribute name to set - * \param[in,out] node Node whose UUID should be used as attribute value - */ -void -set_uuid(xmlNode *xml, const char *attr, crm_node_t *node) -{ - crm_xml_add(xml, attr, crm_peer_uuid(node)); -} - /*! * \brief Get a log-friendly string equivalent of a cluster type * @@ -403,3 +390,17 @@ is_corosync_cluster(void) { return get_cluster_type() == pcmk_cluster_corosync; } + +// Deprecated functions kept only for backward API compatibility +// LCOV_EXCL_START + +#include + +void +set_uuid(xmlNode *xml, const char *attr, crm_node_t *node) +{ + crm_xml_add(xml, attr, crm_peer_uuid(node)); +} + +// LCOV_EXCL_STOP +// End deprecated API diff --git a/lib/cluster/cpg.c b/lib/cluster/cpg.c index 2af4a50..d1decc6 100644 --- a/lib/cluster/cpg.c +++ b/lib/cluster/cpg.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -506,14 +506,14 @@ pcmk_message_common_cs(cpg_handle_t handle, uint32_t nodeid, uint32_t pid, void uncompressed = calloc(1, new_size); rc = BZ2_bzBuffToBuffDecompress(uncompressed, &new_size, msg->data, msg->compressed_size, 1, 0); - if (rc != BZ_OK) { - crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", - bz2_strerror(rc), rc); + rc = pcmk__bzlib2rc(rc); + + if (rc != pcmk_rc_ok) { + crm_err("Decompression failed: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); free(uncompressed); goto badmsg; } - CRM_ASSERT(rc == BZ_OK); CRM_ASSERT(new_size == msg->size); data = uncompressed; @@ -628,7 +628,7 @@ node_left(const char *cpg_group_name, int event_counter, size_t member_list_entries) { crm_node_t *peer = pcmk__search_cluster_node_cache(cpg_peer->nodeid, - NULL); + NULL, NULL); const struct cpg_address **rival = NULL; /* Most CPG-related Pacemaker code assumes that only one process on a node @@ -888,11 +888,11 @@ cluster_connect_cpg(crm_cluster_t *cluster) * * \return TRUE on success, otherwise FALSE */ -gboolean -pcmk__cpg_send_xml(xmlNode *msg, const crm_node_t *node, +bool +pcmk__cpg_send_xml(const xmlNode *msg, const crm_node_t *node, enum crm_ais_msg_types dest) { - gboolean rc = TRUE; + bool rc = true; char *data = NULL; data = dump_xml_unformatted(msg); diff --git a/lib/cluster/crmcluster_private.h b/lib/cluster/crmcluster_private.h index 6933b73..370bca5 100644 --- a/lib/cluster/crmcluster_private.h +++ b/lib/cluster/crmcluster_private.h @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the Pacemaker project contributors + * Copyright 2020-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -41,7 +41,7 @@ G_GNUC_INTERNAL void pcmk__corosync_disconnect(crm_cluster_t *cluster); G_GNUC_INTERNAL -gboolean pcmk__cpg_send_xml(xmlNode *msg, const crm_node_t *node, - enum crm_ais_msg_types dest); +bool pcmk__cpg_send_xml(const xmlNode *msg, const crm_node_t *node, + enum crm_ais_msg_types dest); #endif // PCMK__CRMCLUSTER_PRIVATE__H diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c index 0c54f19..f856cca 100644 --- a/lib/cluster/membership.c +++ b/lib/cluster/membership.c @@ -157,7 +157,7 @@ crm_remote_peer_cache_remove(const char *node_name) * * \param[in] node_state XML of node state * - * \return CRM_NODE_LOST if XML_NODE_IN_CLUSTER is false in node_state, + * \return CRM_NODE_LOST if PCMK__XA_IN_CCM is false in node_state, * CRM_NODE_MEMBER otherwise * \note Unlike most boolean XML attributes, this one defaults to true, for * backward compatibility with older controllers that don't set it. @@ -167,7 +167,8 @@ remote_state_from_cib(const xmlNode *node_state) { bool status = false; - if (pcmk__xe_get_bool_attr(node_state, XML_NODE_IN_CLUSTER, &status) == pcmk_rc_ok && !status) { + if ((pcmk__xe_get_bool_attr(node_state, PCMK__XA_IN_CCM, + &status) == pcmk_rc_ok) && !status) { return CRM_NODE_LOST; } else { return CRM_NODE_MEMBER; @@ -515,7 +516,7 @@ pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags) } if ((node == NULL) && pcmk_is_set(flags, CRM_GET_PEER_CLUSTER)) { - node = pcmk__search_cluster_node_cache(id, uname); + node = pcmk__search_cluster_node_cache(id, uname, NULL); } return node; } @@ -525,12 +526,15 @@ pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags) * * \param[in] id If not 0, cluster node ID to search for * \param[in] uname If not NULL, node name to search for + * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster + * node ID to search for * \param[in] flags Bitmask of enum crm_get_peer_flags * * \return (Possibly newly created) node cache entry */ crm_node_t * -crm_get_peer_full(unsigned int id, const char *uname, int flags) +pcmk__get_peer_full(unsigned int id, const char *uname, const char *uuid, + int flags) { crm_node_t *node = NULL; @@ -543,22 +547,40 @@ crm_get_peer_full(unsigned int id, const char *uname, int flags) } if ((node == NULL) && pcmk_is_set(flags, CRM_GET_PEER_CLUSTER)) { - node = crm_get_peer(id, uname); + node = pcmk__get_peer(id, uname, uuid); } return node; } +/*! + * \brief Get a node cache entry (cluster or Pacemaker Remote) + * + * \param[in] id If not 0, cluster node ID to search for + * \param[in] uname If not NULL, node name to search for + * \param[in] flags Bitmask of enum crm_get_peer_flags + * + * \return (Possibly newly created) node cache entry + */ +crm_node_t * +crm_get_peer_full(unsigned int id, const char *uname, int flags) +{ + return pcmk__get_peer_full(id, uname, NULL, flags); +} + /*! * \internal * \brief Search cluster node cache * * \param[in] id If not 0, cluster node ID to search for * \param[in] uname If not NULL, node name to search for + * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster + * node ID to search for * * \return Cluster node cache entry if found, otherwise NULL */ crm_node_t * -pcmk__search_cluster_node_cache(unsigned int id, const char *uname) +pcmk__search_cluster_node_cache(unsigned int id, const char *uname, + const char *uuid) { GHashTableIter iter; crm_node_t *node = NULL; @@ -589,6 +611,16 @@ pcmk__search_cluster_node_cache(unsigned int id, const char *uname) break; } } + + } else if (uuid != NULL) { + g_hash_table_iter_init(&iter, crm_peer_cache); + while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { + if (pcmk__str_eq(node->uuid, uuid, pcmk__str_casei)) { + crm_trace("UUID match: %s = %p", node->uuid, node); + by_id = node; + break; + } + } } node = by_id; /* Good default */ @@ -693,12 +725,14 @@ remove_conflicting_peer(crm_node_t *node) * * \param[in] id If not 0, cluster node ID to search for * \param[in] uname If not NULL, node name to search for + * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster + * node ID to search for * * \return (Possibly newly created) cluster node cache entry */ /* coverity[-alloc] Memory is referenced in one or both hashtables */ crm_node_t * -crm_get_peer(unsigned int id, const char *uname) +pcmk__get_peer(unsigned int id, const char *uname, const char *uuid) { crm_node_t *node = NULL; char *uname_lookup = NULL; @@ -707,7 +741,7 @@ crm_get_peer(unsigned int id, const char *uname) crm_peer_init(); - node = pcmk__search_cluster_node_cache(id, uname); + node = pcmk__search_cluster_node_cache(id, uname, uuid); /* if uname wasn't provided, and find_peer did not turn up a uname based on id. * we need to do a lookup of the node name using the id in the cluster membership. */ @@ -721,7 +755,7 @@ crm_get_peer(unsigned int id, const char *uname) /* try to turn up the node one more time now that we know the uname. */ if (node == NULL) { - node = pcmk__search_cluster_node_cache(id, uname); + node = pcmk__search_cluster_node_cache(id, uname, uuid); } } @@ -750,7 +784,9 @@ crm_get_peer(unsigned int id, const char *uname) } if(node->uuid == NULL) { - const char *uuid = crm_peer_uuid(node); + if (uuid == NULL) { + uuid = crm_peer_uuid(node); + } if (uuid) { crm_info("Node %u has uuid %s", id, uuid); @@ -765,6 +801,21 @@ crm_get_peer(unsigned int id, const char *uname) return node; } +/*! + * \brief Get a cluster node cache entry + * + * \param[in] id If not 0, cluster node ID to search for + * \param[in] uname If not NULL, node name to search for + * + * \return (Possibly newly created) cluster node cache entry + */ +/* coverity[-alloc] Memory is referenced in one or both hashtables */ +crm_node_t * +crm_get_peer(unsigned int id, const char *uname) +{ + return pcmk__get_peer(id, uname, NULL); +} + /*! * \internal * \brief Update a node's uname @@ -917,6 +968,13 @@ crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const proc2text(flag), status); } + if (pcmk_is_set(node->processes, crm_get_cluster_proc())) { + node->when_online = time(NULL); + + } else { + node->when_online = 0; + } + /* Call the client callback first, then update the peer state, * in case the node will be reaped */ @@ -1025,6 +1083,13 @@ update_peer_state_iter(const char *source, crm_node_t *node, const char *state, if (state && !pcmk__str_eq(node->state, state, pcmk__str_casei)) { char *last = node->state; + if (is_member) { + node->when_member = time(NULL); + + } else { + node->when_member = 0; + } + node->state = strdup(state); crm_notice("Node %s state is now %s " CRM_XS " nodeid=%u previous=%s source=%s", node->uname, state, diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am index ef729d4..f9c43b9 100644 --- a/lib/common/Makefile.am +++ b/lib/common/Makefile.am @@ -8,7 +8,8 @@ # include $(top_srcdir)/mk/common.mk -AM_CPPFLAGS += -I$(top_builddir)/lib/gnu -I$(top_srcdir)/lib/gnu +AM_CPPFLAGS += -I$(top_builddir)/lib/gnu \ + -I$(top_srcdir)/lib/gnu ## libraries lib_LTLIBRARIES = libcrmcommon.la @@ -29,14 +30,16 @@ CFLAGS = $(CFLAGS_COPY:-Wcast-qual=) -fPIC # changes the order so the subdirectories are processed afterwards. SUBDIRS = . tests -noinst_HEADERS = crmcommon_private.h mock_private.h +noinst_HEADERS = crmcommon_private.h \ + mock_private.h -libcrmcommon_la_LDFLAGS = -version-info 45:0:11 +libcrmcommon_la_LDFLAGS = -version-info 46:0:12 libcrmcommon_la_CFLAGS = $(CFLAGS_HARDENED_LIB) libcrmcommon_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) -libcrmcommon_la_LIBADD = @LIBADD_DL@ $(top_builddir)/lib/gnu/libgnu.la +libcrmcommon_la_LIBADD = @LIBADD_DL@ \ + $(top_builddir)/lib/gnu/libgnu.la # If configured with --with-profiling or --with-coverage, BUILD_PROFILING will # be set and -fno-builtin will be added to the CFLAGS. However, libcrmcommon @@ -47,9 +50,10 @@ if BUILD_PROFILING libcrmcommon_la_LIBADD += -lm endif -# Use += rather than backlashed continuation lines for parsing by bumplibs +## Library sources (*must* use += format for bumplibs) libcrmcommon_la_SOURCES = libcrmcommon_la_SOURCES += acl.c +libcrmcommon_la_SOURCES += actions.c libcrmcommon_la_SOURCES += agents.c libcrmcommon_la_SOURCES += alerts.c libcrmcommon_la_SOURCES += attrs.c @@ -75,7 +79,6 @@ libcrmcommon_la_SOURCES += mainloop.c libcrmcommon_la_SOURCES += messages.c libcrmcommon_la_SOURCES += nodes.c libcrmcommon_la_SOURCES += nvpair.c -libcrmcommon_la_SOURCES += operations.c libcrmcommon_la_SOURCES += options.c libcrmcommon_la_SOURCES += output.c libcrmcommon_la_SOURCES += output_html.c @@ -89,12 +92,14 @@ libcrmcommon_la_SOURCES += pid.c libcrmcommon_la_SOURCES += procfs.c libcrmcommon_la_SOURCES += remote.c libcrmcommon_la_SOURCES += results.c +libcrmcommon_la_SOURCES += scheduler.c libcrmcommon_la_SOURCES += schemas.c libcrmcommon_la_SOURCES += scores.c libcrmcommon_la_SOURCES += strings.c libcrmcommon_la_SOURCES += utils.c libcrmcommon_la_SOURCES += watchdog.c libcrmcommon_la_SOURCES += xml.c +libcrmcommon_la_SOURCES += xml_attr.c libcrmcommon_la_SOURCES += xml_display.c libcrmcommon_la_SOURCES += xpath.c @@ -107,18 +112,22 @@ include $(top_srcdir)/mk/tap.mk libcrmcommon_test_la_SOURCES = $(libcrmcommon_la_SOURCES) libcrmcommon_test_la_SOURCES += mock.c -libcrmcommon_test_la_LDFLAGS = $(libcrmcommon_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP) +libcrmcommon_test_la_LDFLAGS = $(libcrmcommon_la_LDFLAGS) \ + -rpath $(libdir) \ + $(LDFLAGS_WRAP) # If GCC emits a builtin function in place of something we've mocked up, that will # get used instead of the mocked version which leads to unexpected test results. So # disable all builtins. Older versions of GCC (at least, on RHEL7) will still emit # replacement code for strdup (and possibly other functions) unless -fno-inline is # also added. -libcrmcommon_test_la_CFLAGS = $(libcrmcommon_la_CFLAGS) -DPCMK__UNIT_TESTING -fno-builtin -fno-inline +libcrmcommon_test_la_CFLAGS = $(libcrmcommon_la_CFLAGS) \ + -DPCMK__UNIT_TESTING \ + -fno-builtin \ + -fno-inline # If -fno-builtin is used, -lm also needs to be added. See the comment at # BUILD_PROFILING above. -libcrmcommon_test_la_LIBADD = $(libcrmcommon_la_LIBADD) -lcmocka -lm +libcrmcommon_test_la_LIBADD = $(libcrmcommon_la_LIBADD) \ + -lcmocka \ + -lm nodist_libcrmcommon_test_la_SOURCES = $(nodist_libcrmcommon_la_SOURCES) - -clean-generic: - rm -f *.log *.debug *.xml *~ diff --git a/lib/common/acl.c b/lib/common/acl.c index 33a4e00..1ebd765 100644 --- a/lib/common/acl.c +++ b/lib/common/acl.c @@ -26,7 +26,7 @@ typedef struct xml_acl_s { enum xml_private_flags mode; - char *xpath; + gchar *xpath; } xml_acl_t; static void @@ -35,7 +35,7 @@ free_acl(void *data) if (data) { xml_acl_t *acl = data; - free(acl->xpath); + g_free(acl->xpath); free(acl); } } @@ -68,7 +68,7 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode) if ((tag == NULL) && (ref == NULL) && (xpath == NULL)) { // Schema should prevent this, but to be safe ... crm_trace("Ignoring ACL <%s> element without selection criteria", - crm_element_name(xml)); + xml->name); return NULL; } @@ -77,10 +77,9 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode) acl->mode = mode; if (xpath) { - acl->xpath = strdup(xpath); - CRM_ASSERT(acl->xpath != NULL); + acl->xpath = g_strdup(xpath); crm_trace("Unpacked ACL <%s> element using xpath: %s", - crm_element_name(xml), acl->xpath); + xml->name, acl->xpath); } else { GString *buf = g_string_sized_new(128); @@ -101,12 +100,11 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode) pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), NULL); } - acl->xpath = strdup((const char *) buf->str); - CRM_ASSERT(acl->xpath != NULL); + acl->xpath = buf->str; - g_string_free(buf, TRUE); + g_string_free(buf, FALSE); crm_trace("Unpacked ACL <%s> element as xpath: %s", - crm_element_name(xml), acl->xpath); + xml->name, acl->xpath); } return g_list_append(acls, acl); @@ -131,10 +129,10 @@ parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls) for (child = pcmk__xe_first_child(acl_entry); child; child = pcmk__xe_next(child)) { - const char *tag = crm_element_name(child); + const char *tag = (const char *) child->name; const char *kind = crm_element_value(child, XML_ACL_ATTR_KIND); - if (strcmp(XML_ACL_TAG_PERMISSION, tag) == 0){ + if (pcmk__xe_is(child, XML_ACL_TAG_PERMISSION)) { CRM_ASSERT(kind != NULL); crm_trace("Unpacking ACL <%s> element of kind '%s'", tag, kind); tag = kind; @@ -157,7 +155,7 @@ parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls) if (role_id && strcmp(ref_role, role_id) == 0) { crm_trace("Unpacking referenced role '%s' in ACL <%s> element", - role_id, crm_element_name(acl_entry)); + role_id, acl_entry->name); acls = parse_acl_entry(acl_top, role, acls); break; } @@ -304,10 +302,9 @@ pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user) for (child = pcmk__xe_first_child(acls); child; child = pcmk__xe_next(child)) { - const char *tag = crm_element_name(child); - if (!strcmp(tag, XML_ACL_TAG_USER) - || !strcmp(tag, XML_ACL_TAG_USERv1)) { + if (pcmk__xe_is(child, XML_ACL_TAG_USER) + || pcmk__xe_is(child, XML_ACL_TAG_USERv1)) { const char *id = crm_element_value(child, XML_ATTR_NAME); if (id == NULL) { @@ -318,7 +315,7 @@ pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user) crm_debug("Unpacking ACLs for user '%s'", id); docpriv->acls = parse_acl_entry(acls, child, docpriv->acls); } - } else if (!strcmp(tag, XML_ACL_TAG_GROUP)) { + } else if (pcmk__xe_is(child, XML_ACL_TAG_GROUP)) { const char *id = crm_element_value(child, XML_ATTR_NAME); if (id == NULL) { @@ -392,7 +389,7 @@ purge_xml_attributes(xmlNode *xml) if (test_acl_mode(nodepriv->flags, pcmk__xf_acl_read)) { crm_trace("%s[@" XML_ATTR_ID "=%s] is readable", - crm_element_name(xml), ID(xml)); + xml->name, ID(xml)); return true; } @@ -571,22 +568,22 @@ pcmk__apply_creation_acl(xmlNode *xml, bool check_top) if (implicitly_allowed(xml)) { crm_trace("Creation of <%s> scaffolding with id=\"%s\"" " is implicitly allowed", - crm_element_name(xml), display_id(xml)); + xml->name, display_id(xml)); } else if (pcmk__check_acl(xml, NULL, pcmk__xf_acl_write)) { crm_trace("ACLs allow creation of <%s> with id=\"%s\"", - crm_element_name(xml), display_id(xml)); + xml->name, display_id(xml)); } else if (check_top) { crm_trace("ACLs disallow creation of <%s> with id=\"%s\"", - crm_element_name(xml), display_id(xml)); + xml->name, display_id(xml)); pcmk_free_xml_subtree(xml); return; } else { crm_notice("ACLs would disallow creation of %s<%s> with id=\"%s\"", ((xml == xmlDocGetRootElement(xml->doc))? "root element " : ""), - crm_element_name(xml), display_id(xml)); + xml->name, display_id(xml)); } } diff --git a/lib/common/actions.c b/lib/common/actions.c new file mode 100644 index 0000000..e710615 --- /dev/null +++ b/lib/common/actions.c @@ -0,0 +1,532 @@ +/* + * Copyright 2004-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#include + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/*! + * \brief Generate an operation key (RESOURCE_ACTION_INTERVAL) + * + * \param[in] rsc_id ID of resource being operated on + * \param[in] op_type Operation name + * \param[in] interval_ms Operation interval + * + * \return Newly allocated memory containing operation key as string + * + * \note This function asserts on errors, so it will never return NULL. + * The caller is responsible for freeing the result with free(). + */ +char * +pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms) +{ + CRM_ASSERT(rsc_id != NULL); + CRM_ASSERT(op_type != NULL); + return crm_strdup_printf(PCMK__OP_FMT, rsc_id, op_type, interval_ms); +} + +static inline gboolean +convert_interval(const char *s, guint *interval_ms) +{ + unsigned long l; + + errno = 0; + l = strtoul(s, NULL, 10); + + if (errno != 0) { + return FALSE; + } + + *interval_ms = (guint) l; + return TRUE; +} + +/*! + * \internal + * \brief Check for underbar-separated substring match + * + * \param[in] key Overall string being checked + * \param[in] position Match before underbar at this \p key index + * \param[in] matches Substrings to match (may contain underbars) + * + * \return \p key index of underbar before any matching substring, + * or 0 if none + */ +static size_t +match_before(const char *key, size_t position, const char **matches) +{ + for (int i = 0; matches[i] != NULL; ++i) { + const size_t match_len = strlen(matches[i]); + + // Must have at least X_MATCH before position + if (position > (match_len + 1)) { + const size_t possible = position - match_len - 1; + + if ((key[possible] == '_') + && (strncmp(key + possible + 1, matches[i], match_len) == 0)) { + return possible; + } + } + } + return 0; +} + +gboolean +parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms) +{ + guint local_interval_ms = 0; + const size_t key_len = (key == NULL)? 0 : strlen(key); + + // Operation keys must be formatted as RSC_ACTION_INTERVAL + size_t action_underbar = 0; // Index in key of underbar before ACTION + size_t interval_underbar = 0; // Index in key of underbar before INTERVAL + size_t possible = 0; + + /* Underbar was a poor choice of separator since both RSC and ACTION can + * contain underbars. Here, list action names and name prefixes that can. + */ + const char *actions_with_underbars[] = { + PCMK_ACTION_MIGRATE_FROM, + PCMK_ACTION_MIGRATE_TO, + NULL + }; + const char *action_prefixes_with_underbars[] = { + "pre_" PCMK_ACTION_NOTIFY, + "post_" PCMK_ACTION_NOTIFY, + "confirmed-pre_" PCMK_ACTION_NOTIFY, + "confirmed-post_" PCMK_ACTION_NOTIFY, + NULL, + }; + + // Initialize output variables in case of early return + if (rsc_id) { + *rsc_id = NULL; + } + if (op_type) { + *op_type = NULL; + } + if (interval_ms) { + *interval_ms = 0; + } + + // RSC_ACTION_INTERVAL implies a minimum of 5 characters + if (key_len < 5) { + return FALSE; + } + + // Find, parse, and validate interval + interval_underbar = key_len - 2; + while ((interval_underbar > 2) && (key[interval_underbar] != '_')) { + --interval_underbar; + } + if ((interval_underbar == 2) + || !convert_interval(key + interval_underbar + 1, &local_interval_ms)) { + return FALSE; + } + + // Find the base (OCF) action name, disregarding prefixes + action_underbar = match_before(key, interval_underbar, + actions_with_underbars); + if (action_underbar == 0) { + action_underbar = interval_underbar - 2; + while ((action_underbar > 0) && (key[action_underbar] != '_')) { + --action_underbar; + } + if (action_underbar == 0) { + return FALSE; + } + } + possible = match_before(key, action_underbar, + action_prefixes_with_underbars); + if (possible != 0) { + action_underbar = possible; + } + + // Set output variables + if (rsc_id != NULL) { + *rsc_id = strndup(key, action_underbar); + CRM_ASSERT(*rsc_id != NULL); + } + if (op_type != NULL) { + *op_type = strndup(key + action_underbar + 1, + interval_underbar - action_underbar - 1); + CRM_ASSERT(*op_type != NULL); + } + if (interval_ms != NULL) { + *interval_ms = local_interval_ms; + } + return TRUE; +} + +char * +pcmk__notify_key(const char *rsc_id, const char *notify_type, + const char *op_type) +{ + CRM_CHECK(rsc_id != NULL, return NULL); + CRM_CHECK(op_type != NULL, return NULL); + CRM_CHECK(notify_type != NULL, return NULL); + return crm_strdup_printf("%s_%s_notify_%s_0", + rsc_id, notify_type, op_type); +} + +/*! + * \brief Parse a transition magic string into its constituent parts + * + * \param[in] magic Magic string to parse (must be non-NULL) + * \param[out] uuid If non-NULL, where to store copy of parsed UUID + * \param[out] transition_id If non-NULL, where to store parsed transition ID + * \param[out] action_id If non-NULL, where to store parsed action ID + * \param[out] op_status If non-NULL, where to store parsed result status + * \param[out] op_rc If non-NULL, where to store parsed actual rc + * \param[out] target_rc If non-NULL, where to stored parsed target rc + * + * \return TRUE if key was valid, FALSE otherwise + * \note If uuid is supplied and this returns TRUE, the caller is responsible + * for freeing the memory for *uuid using free(). + */ +gboolean +decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, + int *op_status, int *op_rc, int *target_rc) +{ + int res = 0; + char *key = NULL; + gboolean result = TRUE; + int local_op_status = -1; + int local_op_rc = -1; + + CRM_CHECK(magic != NULL, return FALSE); + +#ifdef HAVE_SSCANF_M + res = sscanf(magic, "%d:%d;%ms", &local_op_status, &local_op_rc, &key); +#else + key = calloc(1, strlen(magic) - 3); // magic must have >=4 other characters + CRM_ASSERT(key); + res = sscanf(magic, "%d:%d;%s", &local_op_status, &local_op_rc, key); +#endif + if (res == EOF) { + crm_err("Could not decode transition information '%s': %s", + magic, pcmk_rc_str(errno)); + result = FALSE; + } else if (res < 3) { + crm_warn("Transition information '%s' incomplete (%d of 3 expected items)", + magic, res); + result = FALSE; + } else { + if (op_status) { + *op_status = local_op_status; + } + if (op_rc) { + *op_rc = local_op_rc; + } + result = decode_transition_key(key, uuid, transition_id, action_id, + target_rc); + } + free(key); + return result; +} + +char * +pcmk__transition_key(int transition_id, int action_id, int target_rc, + const char *node) +{ + CRM_CHECK(node != NULL, return NULL); + return crm_strdup_printf("%d:%d:%d:%-*s", + action_id, transition_id, target_rc, 36, node); +} + +/*! + * \brief Parse a transition key into its constituent parts + * + * \param[in] key Transition key to parse (must be non-NULL) + * \param[out] uuid If non-NULL, where to store copy of parsed UUID + * \param[out] transition_id If non-NULL, where to store parsed transition ID + * \param[out] action_id If non-NULL, where to store parsed action ID + * \param[out] target_rc If non-NULL, where to stored parsed target rc + * + * \return TRUE if key was valid, FALSE otherwise + * \note If uuid is supplied and this returns TRUE, the caller is responsible + * for freeing the memory for *uuid using free(). + */ +gboolean +decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id, + int *target_rc) +{ + int local_transition_id = -1; + int local_action_id = -1; + int local_target_rc = -1; + char local_uuid[37] = { '\0' }; + + // Initialize any supplied output arguments + if (uuid) { + *uuid = NULL; + } + if (transition_id) { + *transition_id = -1; + } + if (action_id) { + *action_id = -1; + } + if (target_rc) { + *target_rc = -1; + } + + CRM_CHECK(key != NULL, return FALSE); + if (sscanf(key, "%d:%d:%d:%36s", &local_action_id, &local_transition_id, + &local_target_rc, local_uuid) != 4) { + crm_err("Invalid transition key '%s'", key); + return FALSE; + } + if (strlen(local_uuid) != 36) { + crm_warn("Invalid UUID '%s' in transition key '%s'", local_uuid, key); + } + if (uuid) { + *uuid = strdup(local_uuid); + CRM_ASSERT(*uuid); + } + if (transition_id) { + *transition_id = local_transition_id; + } + if (action_id) { + *action_id = local_action_id; + } + if (target_rc) { + *target_rc = local_target_rc; + } + return TRUE; +} + +// Return true if a is an attribute that should be filtered +static bool +should_filter_for_digest(xmlAttrPtr a, void *user_data) +{ + if (strncmp((const char *) a->name, CRM_META "_", + sizeof(CRM_META " ") - 1) == 0) { + return true; + } + return pcmk__str_any_of((const char *) a->name, + XML_ATTR_ID, + XML_ATTR_CRM_VERSION, + XML_LRM_ATTR_OP_DIGEST, + XML_LRM_ATTR_TARGET, + XML_LRM_ATTR_TARGET_UUID, + "pcmk_external_ip", + NULL); +} + +/*! + * \internal + * \brief Remove XML attributes not needed for operation digest + * + * \param[in,out] param_set XML with operation parameters + */ +void +pcmk__filter_op_for_digest(xmlNode *param_set) +{ + char *key = NULL; + char *timeout = NULL; + guint interval_ms = 0; + + if (param_set == NULL) { + return; + } + + /* Timeout is useful for recurring operation digests, so grab it before + * removing meta-attributes + */ + key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); + if (crm_element_value_ms(param_set, key, &interval_ms) != pcmk_ok) { + interval_ms = 0; + } + free(key); + key = NULL; + if (interval_ms != 0) { + key = crm_meta_name(XML_ATTR_TIMEOUT); + timeout = crm_element_value_copy(param_set, key); + } + + // Remove all CRM_meta_* attributes and certain other attributes + pcmk__xe_remove_matching_attrs(param_set, should_filter_for_digest, NULL); + + // Add timeout back for recurring operation digests + if (timeout != NULL) { + crm_xml_add(param_set, key, timeout); + } + free(timeout); + free(key); +} + +int +rsc_op_expected_rc(const lrmd_event_data_t *op) +{ + int rc = 0; + + if (op && op->user_data) { + decode_transition_key(op->user_data, NULL, NULL, NULL, &rc); + } + return rc; +} + +gboolean +did_rsc_op_fail(lrmd_event_data_t * op, int target_rc) +{ + switch (op->op_status) { + case PCMK_EXEC_CANCELLED: + case PCMK_EXEC_PENDING: + return FALSE; + + case PCMK_EXEC_NOT_SUPPORTED: + case PCMK_EXEC_TIMEOUT: + case PCMK_EXEC_ERROR: + case PCMK_EXEC_NOT_CONNECTED: + case PCMK_EXEC_NO_FENCE_DEVICE: + case PCMK_EXEC_NO_SECRETS: + case PCMK_EXEC_INVALID: + return TRUE; + + default: + if (target_rc != op->rc) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * \brief Create a CIB XML element for an operation + * + * \param[in,out] parent If not NULL, make new XML node a child of this + * \param[in] prefix Generate an ID using this prefix + * \param[in] task Operation task to set + * \param[in] interval_spec Operation interval to set + * \param[in] timeout If not NULL, operation timeout to set + * + * \return New XML object on success, NULL otherwise + */ +xmlNode * +crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, + const char *interval_spec, const char *timeout) +{ + xmlNode *xml_op; + + CRM_CHECK(prefix && task && interval_spec, return NULL); + + xml_op = create_xml_node(parent, XML_ATTR_OP); + crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval_spec); + crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval_spec); + crm_xml_add(xml_op, "name", task); + if (timeout) { + crm_xml_add(xml_op, XML_ATTR_TIMEOUT, timeout); + } + return xml_op; +} + +/*! + * \brief Check whether an operation requires resource agent meta-data + * + * \param[in] rsc_class Resource agent class (or NULL to skip class check) + * \param[in] op Operation action (or NULL to skip op check) + * + * \return true if operation needs meta-data, false otherwise + * \note At least one of rsc_class and op must be specified. + */ +bool +crm_op_needs_metadata(const char *rsc_class, const char *op) +{ + /* Agent metadata is used to determine whether an agent reload is possible, + * so if this op is not relevant to that feature, we don't need metadata. + */ + + CRM_CHECK((rsc_class != NULL) || (op != NULL), return false); + + if ((rsc_class != NULL) + && !pcmk_is_set(pcmk_get_ra_caps(rsc_class), pcmk_ra_cap_params)) { + // Metadata is needed only for resource classes that use parameters + return false; + } + if (op == NULL) { + return true; + } + + // Metadata is needed only for these actions + return pcmk__str_any_of(op, PCMK_ACTION_START, PCMK_ACTION_MONITOR, + PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, + PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, + PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, + PCMK_ACTION_NOTIFY, NULL); +} + +/*! + * \internal + * \brief Check whether an action name is for a fencing action + * + * \param[in] action Action name to check + * + * \return true if \p action is "off", "reboot", or "poweroff", otherwise false + */ +bool +pcmk__is_fencing_action(const char *action) +{ + return pcmk__str_any_of(action, PCMK_ACTION_OFF, PCMK_ACTION_REBOOT, + "poweroff", NULL); +} + +bool +pcmk_is_probe(const char *task, guint interval) +{ + if (task == NULL) { + return false; + } + + return (interval == 0) + && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none); +} + +bool +pcmk_xe_is_probe(const xmlNode *xml_op) +{ + const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); + const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); + int interval_ms; + + pcmk__scan_min_int(interval_ms_s, &interval_ms, 0); + return pcmk_is_probe(task, interval_ms); +} + +bool +pcmk_xe_mask_probe_failure(const xmlNode *xml_op) +{ + int status = PCMK_EXEC_UNKNOWN; + int rc = PCMK_OCF_OK; + + if (!pcmk_xe_is_probe(xml_op)) { + return false; + } + + crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status); + crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc); + + return rc == PCMK_OCF_NOT_INSTALLED || rc == PCMK_OCF_INVALID_PARAM || + status == PCMK_EXEC_NOT_INSTALLED; +} diff --git a/lib/common/alerts.c b/lib/common/alerts.c index abdadef..98b1e3f 100644 --- a/lib/common/alerts.c +++ b/lib/common/alerts.c @@ -1,5 +1,5 @@ /* - * Copyright 2015-2022 the Pacemaker project contributors + * Copyright 2015-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -12,8 +12,8 @@ #include #include #include +#include #include -#include /* for F_CIB_UPDATE_RESULT */ /* * to allow script compatibility we can have more than one @@ -168,86 +168,3 @@ pcmk__add_alert_key_int(GHashTable *table, enum pcmk__alert_keys_e name, g_hash_table_insert(table, strdup(*key), pcmk__itoa(value)); } } - -#define XPATH_PATCHSET1_DIFF "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED - -#define XPATH_PATCHSET1_CRMCONFIG XPATH_PATCHSET1_DIFF "//" XML_CIB_TAG_CRMCONFIG -#define XPATH_PATCHSET1_ALERTS XPATH_PATCHSET1_DIFF "//" XML_CIB_TAG_ALERTS - -#define XPATH_PATCHSET1_EITHER \ - XPATH_PATCHSET1_CRMCONFIG " | " XPATH_PATCHSET1_ALERTS - -#define XPATH_CONFIG "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION - -#define XPATH_CRMCONFIG XPATH_CONFIG "/" XML_CIB_TAG_CRMCONFIG "/" -#define XPATH_ALERTS XPATH_CONFIG "/" XML_CIB_TAG_ALERTS - -/*! - * \internal - * \brief Check whether a CIB update affects alerts - * - * \param[in] msg XML containing CIB update - * \param[in] config Whether to check for crmconfig change as well - * - * \return TRUE if update affects alerts, FALSE otherwise - */ -bool -pcmk__alert_in_patchset(xmlNode *msg, bool config) -{ - int rc = -1; - int format= 1; - xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); - xmlNode *change = NULL; - xmlXPathObject *xpathObj = NULL; - - CRM_CHECK(msg != NULL, return FALSE); - - crm_element_value_int(msg, F_CIB_RC, &rc); - if (rc < pcmk_ok) { - crm_trace("Ignore failed CIB update: %s (%d)", pcmk_strerror(rc), rc); - return FALSE; - } - - crm_element_value_int(patchset, "format", &format); - if (format == 1) { - const char *diff = (config? XPATH_PATCHSET1_EITHER : XPATH_PATCHSET1_ALERTS); - - if ((xpathObj = xpath_search(msg, diff)) != NULL) { - freeXpathObject(xpathObj); - return TRUE; - } - } else if (format == 2) { - for (change = pcmk__xml_first_child(patchset); change != NULL; - change = pcmk__xml_next(change)) { - const char *xpath = crm_element_value(change, XML_DIFF_PATH); - - if (xpath == NULL) { - continue; - } - - if ((!config || !strstr(xpath, XPATH_CRMCONFIG)) - && !strstr(xpath, XPATH_ALERTS)) { - - /* this is not a change to an existing section ... */ - - xmlNode *section = NULL; - const char *name = NULL; - - if ((strcmp(xpath, XPATH_CONFIG) != 0) || - ((section = pcmk__xml_first_child(change)) == NULL) || - ((name = crm_element_name(section)) == NULL) || - (strcmp(name, XML_CIB_TAG_ALERTS) != 0)) { - - /* ... nor is it a newly added alerts section */ - continue; - } - } - - return TRUE; - } - - } else { - crm_warn("Unknown patch format: %d", format); - } - return FALSE; -} diff --git a/lib/common/cib.c b/lib/common/cib.c index b84c5e8..fee7881 100644 --- a/lib/common/cib.c +++ b/lib/common/cib.c @@ -1,6 +1,6 @@ /* * Original copyright 2004 International Business Machines - * Later changes copyright 2008-2021 the Pacemaker project contributors + * Later changes copyright 2008-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,6 +14,8 @@ #include // xmlNode #include +#include +#include /* * Functions to help find particular sections of the CIB @@ -99,7 +101,7 @@ static struct { }; /*! - * \brief Get the XPath needed to find a specified CIB element name + * \brief Get the relative XPath needed to find a specified CIB element name * * \param[in] element_name Name of CIB element * @@ -119,6 +121,23 @@ pcmk_cib_xpath_for(const char *element_name) return NULL; } +/*! + * \internal + * \brief Get the absolute XPath needed to find a specified CIB element name + * + * \param[in] element Name of CIB element + * + * \return XPath for finding \p element in CIB XML (or \c NULL if unknown) + */ +const char * +pcmk__cib_abs_xpath_for(const char *element) +{ + const char *xpath = pcmk_cib_xpath_for(element); + + // XPaths returned by pcmk_cib_xpath_for() are relative (starting with "//") + return ((xpath != NULL)? (xpath + 1) : NULL); +} + /*! * \brief Get the parent element name of a given CIB element name * diff --git a/lib/common/crmcommon_private.h b/lib/common/crmcommon_private.h index 7faccb6..121d663 100644 --- a/lib/common/crmcommon_private.h +++ b/lib/common/crmcommon_private.h @@ -63,7 +63,7 @@ typedef struct xml_doc_private_s { } while (0) G_GNUC_INTERNAL -void pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, +void pcmk__xml2text(const xmlNode *data, uint32_t options, GString *buffer, int depth); G_GNUC_INTERNAL @@ -116,12 +116,14 @@ G_GNUC_INTERNAL void pcmk__log_xmllib_err(void *ctx, const char *fmt, ...) G_GNUC_PRINTF(2, 3); -static inline const char * -pcmk__xml_attr_value(const xmlAttr *attr) -{ - return ((attr == NULL) || (attr->children == NULL))? NULL - : (const char *) attr->children->content; -} +G_GNUC_INTERNAL +void pcmk__mark_xml_node_dirty(xmlNode *xml); + +G_GNUC_INTERNAL +bool pcmk__marked_as_deleted(xmlAttrPtr a, void *user_data); + +G_GNUC_INTERNAL +void pcmk__dump_xml_attr(const xmlAttr *attr, GString *buffer); /* * IPC @@ -173,11 +175,11 @@ typedef struct pcmk__ipc_methods_s { * \brief Check whether an IPC request results in a reply * * \param[in,out] api IPC API connection - * \param[in,out] request IPC request XML + * \param[in] request IPC request XML * * \return true if request would result in an IPC reply, false otherwise */ - bool (*reply_expected)(pcmk_ipc_api_t *api, xmlNode *request); + bool (*reply_expected)(pcmk_ipc_api_t *api, const xmlNode *request); /*! * \internal @@ -222,7 +224,7 @@ typedef struct pcmk__ipc_header_s { } pcmk__ipc_header_t; G_GNUC_INTERNAL -int pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request); +int pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request); G_GNUC_INTERNAL void pcmk__call_ipc_callback(pcmk_ipc_api_t *api, @@ -264,47 +266,6 @@ pcmk__ipc_methods_t *pcmk__schedulerd_api_methods(void); //! XML has been moved #define PCMK__XML_PREFIX_MOVED "+~" -/*! - * \brief Check the authenticity of the IPC socket peer process - * - * If everything goes well, peer's authenticity is verified by the means - * of comparing against provided referential UID and GID (either satisfies), - * and the result of this check can be deduced from the return value. - * As an exception, detected UID of 0 ("root") satisfies arbitrary - * provided referential daemon's credentials. - * - * \param[in] qb_ipc libqb client connection if available - * \param[in] sock IPC related, connected Unix socket to check peer of - * \param[in] refuid referential UID to check against - * \param[in] refgid referential GID to check against - * \param[out] gotpid to optionally store obtained PID of the peer - * (not available on FreeBSD, special value of 1 - * used instead, and the caller is required to - * special case this value respectively) - * \param[out] gotuid to optionally store obtained UID of the peer - * \param[out] gotgid to optionally store obtained GID of the peer - * - * \return Standard Pacemaker return code - * ie: 0 if it the connection is authentic - * pcmk_rc_ipc_unauthorized if the connection is not authentic, - * standard errors. - * - * \note While this function is tolerant on what constitutes authorized - * IPC daemon process (its effective user matches UID=0 or \p refuid, - * or at least its group matches \p refgid), either or both (in case - * of UID=0) mismatches on the expected credentials of such peer - * process \e shall be investigated at the caller when value of 1 - * gets returned there, since higher-than-expected privileges in - * respect to the expected/intended credentials possibly violate - * the least privilege principle and may pose an additional risk - * (i.e. such accidental inconsistency shall be eventually fixed). - */ -int pcmk__crm_ipc_is_authentic_process(qb_ipcc_connection_t *qb_ipc, int sock, - uid_t refuid, gid_t refgid, - pid_t *gotpid, uid_t *gotuid, - gid_t *gotgid); - - /* * Output */ diff --git a/lib/common/digest.c b/lib/common/digest.c index 3bf04bf..4de6f97 100644 --- a/lib/common/digest.c +++ b/lib/common/digest.c @@ -1,5 +1,5 @@ /* - * Copyright 2015-2022 the Pacemaker project contributors + * Copyright 2015-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -89,7 +89,7 @@ calculate_xml_digest_v1(xmlNode *input, gboolean sort, gboolean ignored) * \return Newly allocated string containing digest */ static char * -calculate_xml_digest_v2(xmlNode *source, gboolean do_filter) +calculate_xml_digest_v2(const xmlNode *source, gboolean do_filter) { char *digest = NULL; GString *buffer = g_string_sized_new(1024); diff --git a/lib/common/io.c b/lib/common/io.c index 2264e16..35efbe9 100644 --- a/lib/common/io.c +++ b/lib/common/io.c @@ -460,11 +460,17 @@ pcmk__file_contents(const char *filename, char **contents) goto bail; } rewind(fp); - read_len = fread(*contents, 1, length, fp); /* Coverity: False positive */ + + read_len = fread(*contents, 1, length, fp); if (read_len != length) { free(*contents); *contents = NULL; rc = EIO; + } else { + /* Coverity thinks *contents isn't null-terminated. It doesn't + * understand calloc(). + */ + (*contents)[length] = '\0'; } } diff --git a/lib/common/ipc_attrd.c b/lib/common/ipc_attrd.c index 7c40aa7..9caaabe 100644 --- a/lib/common/ipc_attrd.c +++ b/lib/common/ipc_attrd.c @@ -44,7 +44,7 @@ set_pairs_data(pcmk__attrd_api_reply_t *data, xmlNode *msg_data) } static bool -reply_expected(pcmk_ipc_api_t *api, xmlNode *request) +reply_expected(pcmk_ipc_api_t *api, const xmlNode *request) { const char *command = crm_element_value(request, PCMK__XA_TASK); @@ -169,32 +169,29 @@ destroy_api(pcmk_ipc_api_t *api) } static int -connect_and_send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request) +connect_and_send_attrd_request(pcmk_ipc_api_t *api, const xmlNode *request) { int rc = pcmk_rc_ok; - int max = 5; - - while (max > 0) { - crm_info("Connecting to cluster... %d retries remaining", max); - rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_sync); - - if (rc == pcmk_rc_ok) { - rc = pcmk__send_ipc_request(api, request); - break; - } else if (rc == EAGAIN || rc == EALREADY) { - sleep(5 - max); - max--; - } else { - crm_err("Could not connect to attrd: %s", pcmk_rc_str(rc)); - break; - } + + rc = pcmk__connect_ipc(api, pcmk_ipc_dispatch_sync, 5); + if (rc != pcmk_rc_ok) { + crm_err("Could not connect to %s: %s", + pcmk_ipc_name(api, true), pcmk_rc_str(rc)); + return rc; } - return rc; + rc = pcmk__send_ipc_request(api, request); + if (rc != pcmk_rc_ok) { + crm_err("Could not send request to %s: %s", + pcmk_ipc_name(api, true), pcmk_rc_str(rc)); + return rc; + } + + return pcmk_rc_ok; } static int -send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request) +send_attrd_request(pcmk_ipc_api_t *api, const xmlNode *request) { return pcmk__send_ipc_request(api, request); } diff --git a/lib/common/ipc_client.c b/lib/common/ipc_client.c index c6d1645..0d38650 100644 --- a/lib/common/ipc_client.c +++ b/lib/common/ipc_client.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -31,6 +31,10 @@ #include #include "crmcommon_private.h" +static int is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock, + uid_t refuid, gid_t refgid, pid_t *gotpid, + uid_t *gotuid, gid_t *gotgid); + /*! * \brief Create a new object for using Pacemaker daemon IPC * @@ -164,7 +168,7 @@ ipc_post_disconnect(gpointer user_data) { pcmk_ipc_api_t *api = user_data; - crm_info("Disconnected from %s IPC API", pcmk_ipc_name(api, true)); + crm_info("Disconnected from %s", pcmk_ipc_name(api, true)); // Perform any daemon-specific handling needed if ((api->cmds != NULL) && (api->cmds->post_disconnect != NULL)) { @@ -389,7 +393,7 @@ dispatch_ipc_source_data(const char *buffer, ssize_t length, gpointer user_data) * meaning no data is available; all other values indicate errors. * \todo This does not allow the caller to poll multiple file descriptors at * once. If there is demand for that, we could add a wrapper for - * crm_ipc_get_fd(api->ipc), so the caller can call poll() themselves. + * pcmk__ipc_fd(api->ipc), so the caller can call poll() themselves. */ int pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms) @@ -400,7 +404,14 @@ pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms) if ((api == NULL) || (api->dispatch_type != pcmk_ipc_dispatch_poll)) { return EINVAL; } - pollfd.fd = crm_ipc_get_fd(api->ipc); + + rc = pcmk__ipc_fd(api->ipc, &(pollfd.fd)); + if (rc != pcmk_rc_ok) { + crm_debug("Could not obtain file descriptor for %s IPC: %s", + pcmk_ipc_name(api, true), pcmk_rc_str(rc)); + return rc; + } + pollfd.events = POLLIN; rc = poll(&pollfd, 1, timeout_ms); if (rc < 0) { @@ -465,54 +476,54 @@ connect_with_main_loop(pcmk_ipc_api_t *api) static int connect_without_main_loop(pcmk_ipc_api_t *api) { - int rc; + int rc = pcmk__connect_generic_ipc(api->ipc); - if (!crm_ipc_connect(api->ipc)) { - rc = errno; + if (rc != pcmk_rc_ok) { crm_ipc_close(api->ipc); - return rc; + } else { + crm_debug("Connected to %s IPC (without main loop)", + pcmk_ipc_name(api, true)); } - crm_debug("Connected to %s IPC (without main loop)", - pcmk_ipc_name(api, true)); - return pcmk_rc_ok; + return rc; } /*! - * \brief Connect to a Pacemaker daemon via IPC + * \internal + * \brief Connect to a Pacemaker daemon via IPC (retrying after soft errors) * * \param[in,out] api IPC API instance * \param[in] dispatch_type How IPC replies should be dispatched + * \param[in] attempts How many times to try (in case of soft error) * * \return Standard Pacemaker return code */ int -pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type) +pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type, + int attempts) { - const int n_attempts = 2; int rc = pcmk_rc_ok; - if (api == NULL) { - crm_err("Cannot connect to uninitialized API object"); + if ((api == NULL) || (attempts < 1)) { return EINVAL; } if (api->ipc == NULL) { - api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), - api->ipc_size_max); + api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), api->ipc_size_max); if (api->ipc == NULL) { - crm_err("Failed to re-create IPC API"); return ENOMEM; } } if (crm_ipc_connected(api->ipc)) { - crm_trace("Already connected to %s IPC API", pcmk_ipc_name(api, true)); + crm_trace("Already connected to %s", pcmk_ipc_name(api, true)); return pcmk_rc_ok; } api->dispatch_type = dispatch_type; - for (int i = 0; i < n_attempts; i++) { + crm_debug("Attempting connection to %s (up to %d time%s)", + pcmk_ipc_name(api, true), attempts, pcmk__plural_s(attempts)); + for (int remaining = attempts - 1; remaining >= 0; --remaining) { switch (dispatch_type) { case pcmk_ipc_dispatch_main: rc = connect_with_main_loop(api); @@ -524,17 +535,15 @@ pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type) break; } - if (rc != EAGAIN) { - break; + if ((remaining == 0) || ((rc != EAGAIN) && (rc != EALREADY))) { + break; // Result is final } - /* EAGAIN may occur due to interruption by a signal or due to some - * transient issue. Try one more time to be more resilient. - */ - if (i < (n_attempts - 1)) { - crm_trace("Connection to %s IPC API failed with EAGAIN, retrying", - pcmk_ipc_name(api, true)); - } + // Retry after soft error (interrupted by signal, etc.) + pcmk__sleep_ms((attempts - remaining) * 500); + crm_debug("Re-attempting connection to %s (%d attempt%s remaining)", + pcmk_ipc_name(api, true), remaining, + pcmk__plural_s(remaining)); } if (rc != pcmk_rc_ok) { @@ -550,6 +559,26 @@ pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type) return rc; } +/*! + * \brief Connect to a Pacemaker daemon via IPC + * + * \param[in,out] api IPC API instance + * \param[in] dispatch_type How IPC replies should be dispatched + * + * \return Standard Pacemaker return code + */ +int +pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type) +{ + int rc = pcmk__connect_ipc(api, dispatch_type, 2); + + if (rc != pcmk_rc_ok) { + crm_err("Connection to %s failed: %s", + pcmk_ipc_name(api, true), pcmk_rc_str(rc)); + } + return rc; +} + /*! * \brief Disconnect an IPC API instance * @@ -628,7 +657,7 @@ pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb, * \brief Send an XML request across an IPC API connection * * \param[in,out] api IPC API connection - * \param[in,out] request XML request to send + * \param[in] request XML request to send * * \return Standard Pacemaker return code * @@ -636,7 +665,7 @@ pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb, * requests, because it handles different dispatch types appropriately. */ int -pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request) +pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request) { int rc; xmlNode *reply = NULL; @@ -854,6 +883,77 @@ crm_ipc_new(const char *name, size_t max_size) return client; } +/*! + * \internal + * \brief Connect a generic (not daemon-specific) IPC object + * + * \param[in,out] ipc Generic IPC object to connect + * + * \return Standard Pacemaker return code + */ +int +pcmk__connect_generic_ipc(crm_ipc_t *ipc) +{ + uid_t cl_uid = 0; + gid_t cl_gid = 0; + pid_t found_pid = 0; + uid_t found_uid = 0; + gid_t found_gid = 0; + int rc = pcmk_rc_ok; + + if (ipc == NULL) { + return EINVAL; + } + + ipc->need_reply = FALSE; + ipc->ipc = qb_ipcc_connect(ipc->server_name, ipc->buf_size); + if (ipc->ipc == NULL) { + return errno; + } + + rc = qb_ipcc_fd_get(ipc->ipc, &ipc->pfd.fd); + if (rc < 0) { // -errno + crm_ipc_close(ipc); + return -rc; + } + + rc = pcmk_daemon_user(&cl_uid, &cl_gid); + rc = pcmk_legacy2rc(rc); + if (rc != pcmk_rc_ok) { + crm_ipc_close(ipc); + return rc; + } + + rc = is_ipc_provider_expected(ipc->ipc, ipc->pfd.fd, cl_uid, cl_gid, + &found_pid, &found_uid, &found_gid); + if (rc != pcmk_rc_ok) { + if (rc == pcmk_rc_ipc_unauthorized) { + crm_info("%s IPC provider authentication failed: process %lld has " + "uid %lld (expected %lld) and gid %lld (expected %lld)", + ipc->server_name, + (long long) PCMK__SPECIAL_PID_AS_0(found_pid), + (long long) found_uid, (long long) cl_uid, + (long long) found_gid, (long long) cl_gid); + } + crm_ipc_close(ipc); + return rc; + } + + ipc->max_buf_size = qb_ipcc_get_buffer_size(ipc->ipc); + if (ipc->max_buf_size > ipc->buf_size) { + free(ipc->buffer); + ipc->buffer = calloc(ipc->max_buf_size, sizeof(char)); + if (ipc->buffer == NULL) { + rc = errno; + crm_ipc_close(ipc); + return rc; + } + ipc->buf_size = ipc->max_buf_size; + } + + return pcmk_rc_ok; +} + /*! * \brief Establish an IPC connection to a Pacemaker component * @@ -866,76 +966,26 @@ crm_ipc_new(const char *name, size_t max_size) bool crm_ipc_connect(crm_ipc_t *client) { - uid_t cl_uid = 0; - gid_t cl_gid = 0; - pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; - int rv; + int rc = pcmk__connect_generic_ipc(client); - if (client == NULL) { - errno = EINVAL; - return false; + if (rc == pcmk_rc_ok) { + return true; } - - client->need_reply = FALSE; - client->ipc = qb_ipcc_connect(client->server_name, client->buf_size); - - if (client->ipc == NULL) { + if ((client != NULL) && (client->ipc == NULL)) { + errno = (rc > 0)? rc : ENOTCONN; crm_debug("Could not establish %s IPC connection: %s (%d)", client->server_name, pcmk_rc_str(errno), errno); - return false; - } - - client->pfd.fd = crm_ipc_get_fd(client); - if (client->pfd.fd < 0) { - rv = errno; - /* message already omitted */ - crm_ipc_close(client); - errno = rv; - return false; - } - - rv = pcmk_daemon_user(&cl_uid, &cl_gid); - if (rv < 0) { - /* message already omitted */ - crm_ipc_close(client); - errno = -rv; - return false; - } - - if ((rv = pcmk__crm_ipc_is_authentic_process(client->ipc, client->pfd.fd, cl_uid, cl_gid, - &found_pid, &found_uid, - &found_gid)) == pcmk_rc_ipc_unauthorized) { - crm_err("%s IPC provider authentication failed: process %lld has " - "uid %lld (expected %lld) and gid %lld (expected %lld)", - client->server_name, - (long long) PCMK__SPECIAL_PID_AS_0(found_pid), - (long long) found_uid, (long long) cl_uid, - (long long) found_gid, (long long) cl_gid); - crm_ipc_close(client); + } else if (rc == pcmk_rc_ipc_unauthorized) { + crm_err("%s IPC provider authentication failed", + (client == NULL)? "Pacemaker" : client->server_name); errno = ECONNABORTED; - return false; - - } else if (rv != pcmk_rc_ok) { - crm_perror(LOG_ERR, "Could not verify authenticity of %s IPC provider", - client->server_name); - crm_ipc_close(client); - if (rv > 0) { - errno = rv; - } else { - errno = ENOTCONN; - } - return false; - } - - qb_ipcc_context_set(client->ipc, client); - - client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc); - if (client->max_buf_size > client->buf_size) { - free(client->buffer); - client->buffer = calloc(1, client->max_buf_size); - client->buf_size = client->max_buf_size; + } else { + crm_perror(LOG_ERR, + "Could not verify authenticity of %s IPC provider", + (client == NULL)? "Pacemaker" : client->server_name); + errno = ENOTCONN; } - return true; + return false; } void @@ -977,18 +1027,40 @@ crm_ipc_destroy(crm_ipc_t * client) } } +/*! + * \internal + * \brief Get the file descriptor for a generic IPC object + * + * \param[in,out] ipc Generic IPC object to get file descriptor for + * \param[out] fd Where to store file descriptor + * + * \return Standard Pacemaker return code + */ +int +pcmk__ipc_fd(crm_ipc_t *ipc, int *fd) +{ + if ((ipc == NULL) || (fd == NULL)) { + return EINVAL; + } + if ((ipc->ipc == NULL) || (ipc->pfd.fd < 0)) { + return ENOTCONN; + } + *fd = ipc->pfd.fd; + return pcmk_rc_ok; +} + int crm_ipc_get_fd(crm_ipc_t * client) { - int fd = 0; + int fd = -1; - if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) { - return fd; + if (pcmk__ipc_fd(client, &fd) != pcmk_rc_ok) { + crm_err("Could not obtain file descriptor for %s IPC", + ((client == NULL)? "unspecified" : client->server_name)); + errno = EINVAL; + return -EINVAL; } - errno = EINVAL; - crm_perror(LOG_ERR, "Could not obtain file descriptor for %s IPC", - (client? client->server_name : "unspecified")); - return -errno; + return fd; } bool @@ -1057,12 +1129,13 @@ crm_ipc_decompress(crm_ipc_t * client) rc = BZ2_bzBuffToBuffDecompress(uncompressed + sizeof(pcmk__ipc_header_t), &size_u, client->buffer + sizeof(pcmk__ipc_header_t), header->size_compressed, 1, 0); + rc = pcmk__bzlib2rc(rc); - if (rc != BZ_OK) { - crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", - bz2_strerror(rc), rc); + if (rc != pcmk_rc_ok) { + crm_err("Decompression failed: %s " CRM_XS " rc=%d", + pcmk_rc_str(rc), rc); free(uncompressed); - return EILSEQ; + return rc; } /* @@ -1221,7 +1294,7 @@ internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout, * \brief Send an IPC XML message * * \param[in,out] client Connection to IPC server - * \param[in,out] message XML message to send + * \param[in] message XML message to send * \param[in] flags Bitmask of crm_ipc_flags * \param[in] ms_timeout Give up if not sent within this much time * (5 seconds if 0, or no timeout if negative) @@ -1231,8 +1304,8 @@ internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout, * if reply was needed, otherwise number of bytes sent */ int -crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout, - xmlNode ** reply) +crm_ipc_send(crm_ipc_t *client, const xmlNode *message, + enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode **reply) { int rc = 0; ssize_t qb_rc = 0; @@ -1385,89 +1458,129 @@ crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, in return rc; } -int -pcmk__crm_ipc_is_authentic_process(qb_ipcc_connection_t *qb_ipc, int sock, uid_t refuid, gid_t refgid, - pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) +/*! + * \brief Ensure an IPC provider has expected user or group + * + * \param[in] qb_ipc libqb client connection if available + * \param[in] sock Connected Unix socket for IPC + * \param[in] refuid Expected user ID + * \param[in] refgid Expected group ID + * \param[out] gotpid If not NULL, where to store provider's actual process ID + * (or 1 on platforms where ID is not available) + * \param[out] gotuid If not NULL, where to store provider's actual user ID + * \param[out] gotgid If not NULL, where to store provider's actual group ID + * + * \return Standard Pacemaker return code + * \note An actual user ID of 0 (root) will always be considered authorized, + * regardless of the expected values provided. The caller can use the + * output arguments to be stricter than this function. + */ +static int +is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock, + uid_t refuid, gid_t refgid, + pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { - int ret = 0; - pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; -#if defined(HAVE_UCRED) - struct ucred ucred; - socklen_t ucred_len = sizeof(ucred); -#endif + int rc = EOPNOTSUPP; + pid_t found_pid = 0; + uid_t found_uid = 0; + gid_t found_gid = 0; #ifdef HAVE_QB_IPCC_AUTH_GET - if (qb_ipc && !qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid)) { - goto do_checks; + if (qb_ipc != NULL) { + rc = qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid); + rc = -rc; // libqb returns 0 or -errno + if (rc == pcmk_rc_ok) { + goto found; + } } #endif -#if defined(HAVE_UCRED) - if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, - &ucred, &ucred_len) - && ucred_len == sizeof(ucred)) { - found_pid = ucred.pid; found_uid = ucred.uid; found_gid = ucred.gid; +#ifdef HAVE_UCRED + { + struct ucred ucred; + socklen_t ucred_len = sizeof(ucred); -#elif defined(HAVE_SOCKPEERCRED) - struct sockpeercred sockpeercred; - socklen_t sockpeercred_len = sizeof(sockpeercred); - - if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, - &sockpeercred, &sockpeercred_len) - && sockpeercred_len == sizeof(sockpeercred_len)) { - found_pid = sockpeercred.pid; - found_uid = sockpeercred.uid; found_gid = sockpeercred.gid; + if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len) < 0) { + rc = errno; + } else if (ucred_len != sizeof(ucred)) { + rc = EOPNOTSUPP; + } else { + found_pid = ucred.pid; + found_uid = ucred.uid; + found_gid = ucred.gid; + goto found; + } + } +#endif -#elif defined(HAVE_GETPEEREID) - if (!getpeereid(sock, &found_uid, &found_gid)) { - found_pid = PCMK__SPECIAL_PID; /* cannot obtain PID (FreeBSD) */ +#ifdef HAVE_SOCKPEERCRED + { + struct sockpeercred sockpeercred; + socklen_t sockpeercred_len = sizeof(sockpeercred); -#elif defined(HAVE_GETPEERUCRED) - ucred_t *ucred; - if (!getpeerucred(sock, &ucred)) { - errno = 0; - found_pid = ucred_getpid(ucred); - found_uid = ucred_geteuid(ucred); found_gid = ucred_getegid(ucred); - ret = -errno; - ucred_free(ucred); - if (ret) { - return (ret < 0) ? ret : -pcmk_err_generic; + if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, + &sockpeercred, &sockpeercred_len) < 0) { + rc = errno; + } else if (sockpeercred_len != sizeof(sockpeercred)) { + rc = EOPNOTSUPP; + } else { + found_pid = sockpeercred.pid; + found_uid = sockpeercred.uid; + found_gid = sockpeercred.gid; + goto found; } - -#else -# error "No way to authenticate a Unix socket peer" - errno = 0; - if (0) { + } #endif -#ifdef HAVE_QB_IPCC_AUTH_GET - do_checks: + +#ifdef HAVE_GETPEEREID // For example, FreeBSD + if (getpeereid(sock, &found_uid, &found_gid) < 0) { + rc = errno; + } else { + found_pid = PCMK__SPECIAL_PID; + goto found; + } #endif - if (gotpid != NULL) { - *gotpid = found_pid; - } - if (gotuid != NULL) { - *gotuid = found_uid; - } - if (gotgid != NULL) { - *gotgid = found_gid; - } - if (found_uid == 0 || found_uid == refuid || found_gid == refgid) { - ret = 0; + +#ifdef HAVE_GETPEERUCRED + { + ucred_t *ucred = NULL; + + if (getpeerucred(sock, &ucred) < 0) { + rc = errno; } else { - ret = pcmk_rc_ipc_unauthorized; + found_pid = ucred_getpid(ucred); + found_uid = ucred_geteuid(ucred); + found_gid = ucred_getegid(ucred); + ucred_free(ucred); + goto found; } - } else { - ret = (errno > 0) ? errno : pcmk_rc_error; } - return ret; +#endif + + return rc; // If we get here, nothing succeeded + +found: + if (gotpid != NULL) { + *gotpid = found_pid; + } + if (gotuid != NULL) { + *gotuid = found_uid; + } + if (gotgid != NULL) { + *gotgid = found_gid; + } + if ((found_uid != 0) && (found_uid != refuid) && (found_gid != refgid)) { + return pcmk_rc_ipc_unauthorized; + } + return pcmk_rc_ok; } int crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid, pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { - int ret = pcmk__crm_ipc_is_authentic_process(NULL, sock, refuid, refgid, - gotpid, gotuid, gotgid); + int ret = is_ipc_provider_expected(NULL, sock, refuid, refgid, + gotpid, gotuid, gotgid); /* The old function had some very odd return codes*/ if (ret == 0) { @@ -1528,8 +1641,8 @@ pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, goto bail; } - auth_rc = pcmk__crm_ipc_is_authentic_process(c, fd, refuid, refgid, &found_pid, - &found_uid, &found_gid); + auth_rc = is_ipc_provider_expected(c, fd, refuid, refgid, + &found_pid, &found_uid, &found_gid); if (auth_rc == pcmk_rc_ipc_unauthorized) { crm_err("Daemon (IPC %s) effectively blocked with unauthorized" " process %lld (uid: %lld, gid: %lld)", diff --git a/lib/common/ipc_common.c b/lib/common/ipc_common.c index d0c0636..a48b0e9 100644 --- a/lib/common/ipc_common.c +++ b/lib/common/ipc_common.c @@ -35,7 +35,7 @@ pcmk__ipc_buffer_size(unsigned int max) if (global_max == 0) { long long global_ll; - if ((pcmk__scan_ll(getenv("PCMK_ipc_buffer"), &global_ll, + if ((pcmk__scan_ll(pcmk__env_option(PCMK__ENV_IPC_BUFFER), &global_ll, 0LL) != pcmk_rc_ok) || (global_ll <= 0)) { global_max = MAX_MSG_SIZE; // Default for unset or invalid diff --git a/lib/common/ipc_controld.c b/lib/common/ipc_controld.c index 9303afd..8e2016e 100644 --- a/lib/common/ipc_controld.c +++ b/lib/common/ipc_controld.c @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the Pacemaker project contributors + * Copyright 2020-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -135,7 +135,7 @@ set_node_info_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data) data->data.node_info.uuid = crm_element_value(msg_data, XML_ATTR_ID); data->data.node_info.uname = crm_element_value(msg_data, XML_ATTR_UNAME); - data->data.node_info.state = crm_element_value(msg_data, XML_NODE_IS_PEER); + data->data.node_info.state = crm_element_value(msg_data, PCMK__XA_CRMD); } static void @@ -169,26 +169,24 @@ set_nodes_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data) node_info->id = id_ll; } node_info->uname = crm_element_value(node, XML_ATTR_UNAME); - node_info->state = crm_element_value(node, XML_NODE_IN_CLUSTER); + node_info->state = crm_element_value(node, PCMK__XA_IN_CCM); data->data.nodes = g_list_prepend(data->data.nodes, node_info); } } static bool -reply_expected(pcmk_ipc_api_t *api, xmlNode *request) +reply_expected(pcmk_ipc_api_t *api, const xmlNode *request) { - const char *command = crm_element_value(request, F_CRM_TASK); - - if (command == NULL) { - return false; - } - - // We only need to handle commands that functions in this file can send - return !strcmp(command, CRM_OP_REPROBE) - || !strcmp(command, CRM_OP_NODE_INFO) - || !strcmp(command, CRM_OP_PING) - || !strcmp(command, CRM_OP_LRM_FAIL) - || !strcmp(command, CRM_OP_LRM_DELETE); + // We only need to handle commands that API functions can send + return pcmk__str_any_of(crm_element_value(request, F_CRM_TASK), + PCMK__CONTROLD_CMD_NODES, + CRM_OP_LRM_DELETE, + CRM_OP_LRM_FAIL, + CRM_OP_NODE_INFO, + CRM_OP_PING, + CRM_OP_REPROBE, + CRM_OP_RM_NODE_CACHE, + NULL); } static bool @@ -202,22 +200,12 @@ dispatch(pcmk_ipc_api_t *api, xmlNode *reply) pcmk_controld_reply_unknown, NULL, NULL, }; - /* If we got an ACK, return true so the caller knows to expect more responses - * from the IPC server. We do this before decrementing replies_expected because - * ACKs are not going to be included in that value. - * - * Note that we cannot do the same kind of status checking here that we do in - * ipc_pacemakerd.c. The ACK message we receive does not necessarily contain - * a status attribute. That is, we may receive this: - * - * - * - * Instead of this: - * - * - */ - if (pcmk__str_eq(crm_element_name(reply), "ack", pcmk__str_none)) { - return true; // More replies needed + if (pcmk__xe_is(reply, "ack")) { + /* ACKs are trivial responses that do not count toward expected replies, + * and do not have all the fields that validation requires, so skip that + * processing. + */ + return private->replies_expected > 0; } if (private->replies_expected > 0) { @@ -341,21 +329,18 @@ create_controller_request(const pcmk_ipc_api_t *api, const char *op, // \return Standard Pacemaker return code static int -send_controller_request(pcmk_ipc_api_t *api, xmlNode *request, +send_controller_request(pcmk_ipc_api_t *api, const xmlNode *request, bool reply_is_expected) { - int rc; - if (crm_element_value(request, XML_ATTR_REFERENCE) == NULL) { return EINVAL; } - rc = pcmk__send_ipc_request(api, request); - if ((rc == pcmk_rc_ok) && reply_is_expected) { + if (reply_is_expected) { struct controld_api_private_s *private = api->api_data; private->replies_expected++; } - return rc; + return pcmk__send_ipc_request(api, request); } static xmlNode * diff --git a/lib/common/ipc_pacemakerd.c b/lib/common/ipc_pacemakerd.c index 91a3143..2f03709 100644 --- a/lib/common/ipc_pacemakerd.c +++ b/lib/common/ipc_pacemakerd.c @@ -1,5 +1,5 @@ /* - * Copyright 2020-2022 the Pacemaker project contributors + * Copyright 2020-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -178,7 +178,7 @@ post_disconnect(pcmk_ipc_api_t *api) } static bool -reply_expected(pcmk_ipc_api_t *api, xmlNode *request) +reply_expected(pcmk_ipc_api_t *api, const xmlNode *request) { const char *command = crm_element_value(request, F_CRM_TASK); diff --git a/lib/common/ipc_schedulerd.c b/lib/common/ipc_schedulerd.c index c1b81a4..cf788e5 100644 --- a/lib/common/ipc_schedulerd.c +++ b/lib/common/ipc_schedulerd.c @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the Pacemaker project contributors + * Copyright 2021-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -62,7 +62,7 @@ post_connect(pcmk_ipc_api_t *api) } static bool -reply_expected(pcmk_ipc_api_t *api, xmlNode *request) +reply_expected(pcmk_ipc_api_t *api, const xmlNode *request) { const char *command = crm_element_value(request, F_CRM_TASK); diff --git a/lib/common/ipc_server.c b/lib/common/ipc_server.c index 60f20fb..5cd7e70 100644 --- a/lib/common/ipc_server.c +++ b/lib/common/ipc_server.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -421,9 +421,11 @@ pcmk__client_data2xml(pcmk__client_t *c, void *data, uint32_t *id, rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0); text = uncompressed; - if (rc != BZ_OK) { - crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", - bz2_strerror(rc), rc); + rc = pcmk__bzlib2rc(rc); + + if (rc != pcmk_rc_ok) { + crm_err("Decompression failed: %s " CRM_XS " rc=%d", + pcmk_rc_str(rc), rc); free(uncompressed); return NULL; } @@ -568,16 +570,16 @@ crm_ipcs_flush_events(pcmk__client_t *c) * \internal * \brief Create an I/O vector for sending an IPC XML message * - * \param[in] request Identifier for libqb response header - * \param[in,out] message XML message to send - * \param[in] max_send_size If 0, default IPC buffer size is used - * \param[out] result Where to store prepared I/O vector - * \param[out] bytes Size of prepared data in bytes + * \param[in] request Identifier for libqb response header + * \param[in] message XML message to send + * \param[in] max_send_size If 0, default IPC buffer size is used + * \param[out] result Where to store prepared I/O vector + * \param[out] bytes Size of prepared data in bytes * * \return Standard Pacemaker return code */ int -pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message, +pcmk__ipc_prepare_iov(uint32_t request, const xmlNode *message, uint32_t max_send_size, struct iovec **result, ssize_t *bytes) { @@ -741,7 +743,7 @@ pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags) } int -pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message, +pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, const xmlNode *message, uint32_t flags) { struct iovec *iov = NULL; @@ -819,6 +821,7 @@ pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, if (ack != NULL) { crm_trace("Ack'ing IPC message from client %s as <%s status=%d>", pcmk__client_name(c), tag, status); + crm_log_xml_trace(ack, "sent-ack"); c->request_id = 0; rc = pcmk__ipc_send_xml(c, request, ack, flags); free_xml(ack); @@ -995,14 +998,17 @@ pcmk__serve_schedulerd_ipc(struct qb_ipcs_service_handlers *cb) bool crm_is_daemon_name(const char *name) { - name = pcmk__message_name(name); - return (!strcmp(name, CRM_SYSTEM_CRMD) - || !strcmp(name, CRM_SYSTEM_STONITHD) - || !strcmp(name, "stonith-ng") - || !strcmp(name, "attrd") - || !strcmp(name, CRM_SYSTEM_CIB) - || !strcmp(name, CRM_SYSTEM_MCP) - || !strcmp(name, CRM_SYSTEM_DC) - || !strcmp(name, CRM_SYSTEM_TENGINE) - || !strcmp(name, CRM_SYSTEM_LRMD)); + return pcmk__str_any_of(pcmk__message_name(name), + "attrd", + CRM_SYSTEM_CIB, + CRM_SYSTEM_CRMD, + CRM_SYSTEM_DC, + CRM_SYSTEM_LRMD, + CRM_SYSTEM_MCP, + CRM_SYSTEM_PENGINE, + CRM_SYSTEM_STONITHD, + CRM_SYSTEM_TENGINE, + "pacemaker-remoted", + "stonith-ng", + NULL); } diff --git a/lib/common/iso8601.c b/lib/common/iso8601.c index 3e000e1..9de018f 100644 --- a/lib/common/iso8601.c +++ b/lib/common/iso8601.c @@ -1930,9 +1930,10 @@ pcmk__readable_interval(guint interval_ms) #define MS_IN_H (MS_IN_M * 60) #define MS_IN_D (MS_IN_H * 24) #define MAXSTR sizeof("..d..h..m..s...ms") - static char str[MAXSTR] = { '\0', }; + static char str[MAXSTR]; int offset = 0; + str[0] = '\0'; if (interval_ms > MS_IN_D) { offset += snprintf(str + offset, MAXSTR - offset, "%ud", interval_ms / MS_IN_D); diff --git a/lib/common/logging.c b/lib/common/logging.c index dded873..7768c35 100644 --- a/lib/common/logging.c +++ b/lib/common/logging.c @@ -51,6 +51,11 @@ static unsigned int crm_log_priority = LOG_NOTICE; static GLogFunc glib_log_default = NULL; static pcmk__output_t *logger_out = NULL; +pcmk__config_error_func pcmk__config_error_handler = NULL; +pcmk__config_warning_func pcmk__config_warning_handler = NULL; +void *pcmk__config_error_context = NULL; +void *pcmk__config_warning_context = NULL; + static gboolean crm_tracing_enabled(void); static void @@ -237,7 +242,7 @@ chown_logfile(const char *filename, int logfd) static void chmod_logfile(const char *filename, int logfd) { - const char *modestr = getenv("PCMK_logfile_mode"); + const char *modestr = pcmk__env_option(PCMK__ENV_LOGFILE_MODE); mode_t filemode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; if (modestr != NULL) { @@ -297,7 +302,7 @@ setenv_logfile(const char *filename) { // Some resource agents will log only if environment variable is set if (pcmk__env_option(PCMK__ENV_LOGFILE) == NULL) { - pcmk__set_env_option(PCMK__ENV_LOGFILE, filename); + pcmk__set_env_option(PCMK__ENV_LOGFILE, filename, true); } } @@ -609,6 +614,20 @@ crm_log_filter_source(int source, const char *trace_files, const char *trace_fns } } +#ifndef HAVE_STRCHRNUL +/* strchrnul() is a GNU extension. If not present, use our own definition. + * The GNU version returns char*, but we only need it to be const char*. + */ +static const char * +strchrnul(const char *s, int c) +{ + while ((*s != c) && (*s != '\0')) { + ++s; + } + return s; +} +#endif + static void crm_log_filter(struct qb_log_callsite *cs) { @@ -622,11 +641,11 @@ crm_log_filter(struct qb_log_callsite *cs) if (need_init) { need_init = 0; - trace_fns = getenv("PCMK_trace_functions"); - trace_fmts = getenv("PCMK_trace_formats"); - trace_tags = getenv("PCMK_trace_tags"); - trace_files = getenv("PCMK_trace_files"); - trace_blackbox = getenv("PCMK_trace_blackbox"); + trace_fns = pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS); + trace_fmts = pcmk__env_option(PCMK__ENV_TRACE_FORMATS); + trace_tags = pcmk__env_option(PCMK__ENV_TRACE_TAGS); + trace_files = pcmk__env_option(PCMK__ENV_TRACE_FILES); + trace_blackbox = pcmk__env_option(PCMK__ENV_TRACE_BLACKBOX); if (trace_tags != NULL) { uint32_t tag; @@ -695,8 +714,10 @@ crm_update_callsites(void) log = FALSE; crm_debug ("Enabling callsites based on priority=%d, files=%s, functions=%s, formats=%s, tags=%s", - crm_log_level, getenv("PCMK_trace_files"), getenv("PCMK_trace_functions"), - getenv("PCMK_trace_formats"), getenv("PCMK_trace_tags")); + crm_log_level, pcmk__env_option(PCMK__ENV_TRACE_FILES), + pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS), + pcmk__env_option(PCMK__ENV_TRACE_FORMATS), + pcmk__env_option(PCMK__ENV_TRACE_TAGS)); } qb_log_filter_fn_set(crm_log_filter); } @@ -704,13 +725,11 @@ crm_update_callsites(void) static gboolean crm_tracing_enabled(void) { - if (crm_log_level == LOG_TRACE) { - return TRUE; - } else if (getenv("PCMK_trace_files") || getenv("PCMK_trace_functions") - || getenv("PCMK_trace_formats") || getenv("PCMK_trace_tags")) { - return TRUE; - } - return FALSE; + return (crm_log_level == LOG_TRACE) + || (pcmk__env_option(PCMK__ENV_TRACE_FILES) != NULL) + || (pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS) != NULL) + || (pcmk__env_option(PCMK__ENV_TRACE_FORMATS) != NULL) + || (pcmk__env_option(PCMK__ENV_TRACE_TAGS) != NULL); } static int @@ -784,7 +803,8 @@ set_identity(const char *entity, int argc, char *const *argv) CRM_ASSERT(crm_system_name != NULL); - setenv("PCMK_service", crm_system_name, 1); + // Used by fencing.py.py (in fence-agents) + pcmk__set_env_option(PCMK__ENV_SERVICE, crm_system_name, false); } void @@ -897,7 +917,7 @@ crm_log_init(const char *entity, uint8_t level, gboolean daemon, gboolean to_std } else { facility = PCMK__VALUE_NONE; } - pcmk__set_env_option(PCMK__ENV_LOGFACILITY, facility); + pcmk__set_env_option(PCMK__ENV_LOGFACILITY, facility, true); } if (pcmk__str_eq(facility, PCMK__VALUE_NONE, pcmk__str_casei)) { @@ -1127,16 +1147,21 @@ pcmk__cli_init_logging(const char *name, unsigned int verbosity) /*! * \brief Log XML line-by-line in a formatted fashion * - * \param[in] level Priority at which to log the messages - * \param[in] text Prefix for each line - * \param[in] xml XML to log + * \param[in] file File name to use for log filtering + * \param[in] function Function name to use for log filtering + * \param[in] line Line number to use for log filtering + * \param[in] tags Logging tags to use for log filtering + * \param[in] level Priority at which to log the messages + * \param[in] text Prefix for each line + * \param[in] xml XML to log * * \note This does nothing when \p level is \p LOG_STDOUT. * \note Do not call this function directly. It should be called only from the * \p do_crm_log_xml() macro. */ void -pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml) +pcmk_log_xml_as(const char *file, const char *function, uint32_t line, + uint32_t tags, uint8_t level, const char *text, const xmlNode *xml) { if (xml == NULL) { do_crm_log(level, "%s%sNo data to dump as XML", @@ -1148,12 +1173,76 @@ pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml) } pcmk__output_set_log_level(logger_out, level); + pcmk__output_set_log_filter(logger_out, file, function, line, tags); pcmk__xml_show(logger_out, text, xml, 1, pcmk__xml_fmt_pretty |pcmk__xml_fmt_open |pcmk__xml_fmt_children |pcmk__xml_fmt_close); + pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U); + } +} + +/*! + * \internal + * \brief Log XML changes line-by-line in a formatted fashion + * + * \param[in] file File name to use for log filtering + * \param[in] function Function name to use for log filtering + * \param[in] line Line number to use for log filtering + * \param[in] tags Logging tags to use for log filtering + * \param[in] level Priority at which to log the messages + * \param[in] xml XML whose changes to log + * + * \note This does nothing when \p level is \c LOG_STDOUT. + */ +void +pcmk__log_xml_changes_as(const char *file, const char *function, uint32_t line, + uint32_t tags, uint8_t level, const xmlNode *xml) +{ + if (xml == NULL) { + do_crm_log(level, "No XML to dump"); + return; + } + + if (logger_out == NULL) { + CRM_CHECK(pcmk__log_output_new(&logger_out) == pcmk_rc_ok, return); } + pcmk__output_set_log_level(logger_out, level); + pcmk__output_set_log_filter(logger_out, file, function, line, tags); + pcmk__xml_show_changes(logger_out, xml); + pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U); +} + +/*! + * \internal + * \brief Log an XML patchset line-by-line in a formatted fashion + * + * \param[in] file File name to use for log filtering + * \param[in] function Function name to use for log filtering + * \param[in] line Line number to use for log filtering + * \param[in] tags Logging tags to use for log filtering + * \param[in] level Priority at which to log the messages + * \param[in] patchset XML patchset to log + * + * \note This does nothing when \p level is \c LOG_STDOUT. + */ +void +pcmk__log_xml_patchset_as(const char *file, const char *function, uint32_t line, + uint32_t tags, uint8_t level, const xmlNode *patchset) +{ + if (patchset == NULL) { + do_crm_log(level, "No patchset to dump"); + return; + } + + if (logger_out == NULL) { + CRM_CHECK(pcmk__log_output_new(&logger_out) == pcmk_rc_ok, return); + } + pcmk__output_set_log_level(logger_out, level); + pcmk__output_set_log_filter(logger_out, file, function, line, tags); + logger_out->message(logger_out, "xml-patchset", patchset); + pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U); } /*! @@ -1188,5 +1277,23 @@ crm_add_logfile(const char *filename) return pcmk__add_logfile(filename) == pcmk_rc_ok; } +void +pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml) +{ + pcmk_log_xml_as(__FILE__, __func__, __LINE__, 0, level, text, xml); +} + // LCOV_EXCL_STOP // End deprecated API + +void pcmk__set_config_error_handler(pcmk__config_error_func error_handler, void *error_context) +{ + pcmk__config_error_handler = error_handler; + pcmk__config_error_context = error_context; +} + +void pcmk__set_config_warning_handler(pcmk__config_warning_func warning_handler, void *warning_context) +{ + pcmk__config_warning_handler = warning_handler; + pcmk__config_warning_context = warning_context; +} \ No newline at end of file diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c index 3124e43..f971713 100644 --- a/lib/common/mainloop.c +++ b/lib/common/mainloop.c @@ -393,16 +393,6 @@ mainloop_add_signal(int sig, void (*dispatch) (int sig)) mainloop_destroy_signal_entry(sig); return FALSE; } -#if 0 - /* If we want signals to interrupt mainloop's poll(), instead of waiting for - * the timeout, then we should call siginterrupt() below - * - * For now, just enforce a low timeout - */ - if (siginterrupt(sig, 1) < 0) { - crm_perror(LOG_INFO, "Could not enable system call interruptions for signal %d", sig); - } -#endif return TRUE; } @@ -624,7 +614,7 @@ struct qb_ipcs_poll_handlers gio_poll_funcs = { static enum qb_ipc_type pick_ipc_type(enum qb_ipc_type requested) { - const char *env = getenv("PCMK_ipc_type"); + const char *env = pcmk__env_option(PCMK__ENV_IPC_TYPE); if (env && strcmp("shared-mem", env) == 0) { return QB_IPC_SHM; @@ -668,7 +658,8 @@ mainloop_add_ipc_server_with_prio(const char *name, enum qb_ipc_type type, server = qb_ipcs_create(name, 0, pick_ipc_type(type), callbacks); if (server == NULL) { - crm_err("Could not create %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc); + crm_err("Could not create %s IPC server: %s (%d)", + name, pcmk_rc_str(errno), errno); return NULL; } @@ -874,21 +865,34 @@ pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata, const struct ipc_client_callbacks *callbacks, mainloop_io_t **source) { + int rc = pcmk_rc_ok; + int fd = -1; + const char *ipc_name = NULL; + CRM_CHECK((ipc != NULL) && (callbacks != NULL), return EINVAL); - if (!crm_ipc_connect(ipc)) { - int rc = errno; - crm_debug("Connection to %s failed: %d", crm_ipc_name(ipc), errno); + ipc_name = pcmk__s(crm_ipc_name(ipc), "Pacemaker"); + rc = pcmk__connect_generic_ipc(ipc); + if (rc != pcmk_rc_ok) { + crm_debug("Connection to %s failed: %s", ipc_name, pcmk_rc_str(rc)); return rc; } - *source = mainloop_add_fd(crm_ipc_name(ipc), priority, crm_ipc_get_fd(ipc), - userdata, NULL); - if (*source == NULL) { - int rc = errno; + rc = pcmk__ipc_fd(ipc, &fd); + if (rc != pcmk_rc_ok) { + crm_debug("Could not obtain file descriptor for %s IPC: %s", + ipc_name, pcmk_rc_str(rc)); crm_ipc_close(ipc); return rc; } + + *source = mainloop_add_fd(ipc_name, priority, fd, userdata, NULL); + if (*source == NULL) { + rc = errno; + crm_ipc_close(ipc); + return rc; + } + (*source)->ipc = ipc; (*source)->destroy_fn = callbacks->destroy; (*source)->dispatch_fn_ipc = callbacks->dispatch; diff --git a/lib/common/mock.c b/lib/common/mock.c index 2bd8334..6f837ad 100644 --- a/lib/common/mock.c +++ b/lib/common/mock.c @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the Pacemaker project contributors + * Copyright 2021-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -7,6 +7,8 @@ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ +#include + #include #include #include @@ -262,6 +264,8 @@ __wrap_endgrent(void) { * will_return(__wrap_fopen, errno_to_set); * * expect_* functions: https://api.cmocka.org/group__cmocka__param.html + * + * This has two mocked functions, since fopen() is sometimes actually fopen64(). */ bool pcmk__mock_fopen = false; @@ -285,6 +289,26 @@ __wrap_fopen(const char *pathname, const char *mode) } } +#ifdef HAVE_FOPEN64 +FILE * +__wrap_fopen64(const char *pathname, const char *mode) +{ + if (pcmk__mock_fopen) { + check_expected_ptr(pathname); + check_expected_ptr(mode); + errno = mock_type(int); + + if (errno != 0) { + return NULL; + } else { + return __real_fopen64(pathname, mode); + } + + } else { + return __real_fopen64(pathname, mode); + } +} +#endif /* getpwnam_r() * diff --git a/lib/common/mock_private.h b/lib/common/mock_private.h index 45207c4..b0e0ed2 100644 --- a/lib/common/mock_private.h +++ b/lib/common/mock_private.h @@ -1,5 +1,5 @@ /* - * Copyright 2021-2022 the Pacemaker project contributors + * Copyright 2021-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -29,6 +29,10 @@ void *__wrap_calloc(size_t nmemb, size_t size); extern bool pcmk__mock_fopen; FILE *__real_fopen(const char *pathname, const char *mode); FILE *__wrap_fopen(const char *pathname, const char *mode); +#ifdef HAVE_FOPEN64 +FILE *__real_fopen64(const char *pathname, const char *mode); +FILE *__wrap_fopen64(const char *pathname, const char *mode); +#endif extern bool pcmk__mock_getenv; char *__real_getenv(const char *name); diff --git a/lib/common/nvpair.c b/lib/common/nvpair.c index 3766c45..dbb9c99 100644 --- a/lib/common/nvpair.c +++ b/lib/common/nvpair.c @@ -333,55 +333,6 @@ crm_xml_add(xmlNode *node, const char *name, const char *value) return (char *)attr->children->content; } -/*! - * \brief Replace an XML attribute with specified name and (possibly NULL) value - * - * \param[in,out] node XML node to modify - * \param[in] name Attribute name to set - * \param[in] value Attribute value to set - * - * \return New value on success, \c NULL otherwise - * \note This does nothing if node or name is \c NULL or empty. - */ -const char * -crm_xml_replace(xmlNode *node, const char *name, const char *value) -{ - bool dirty = FALSE; - xmlAttr *attr = NULL; - const char *old_value = NULL; - - CRM_CHECK(node != NULL, return NULL); - CRM_CHECK(name != NULL && name[0] != 0, return NULL); - - old_value = crm_element_value(node, name); - - /* Could be re-setting the same value */ - CRM_CHECK(old_value != value, return value); - - if (pcmk__check_acl(node, name, pcmk__xf_acl_write) == FALSE) { - /* Create a fake object linked to doc->_private instead? */ - crm_trace("Cannot replace %s=%s to %s", name, value, node->name); - return NULL; - - } else if (old_value && !value) { - xml_remove_prop(node, name); - return NULL; - } - - if (pcmk__tracking_xml_changes(node, FALSE)) { - if (!old_value || !value || !strcmp(old_value, value)) { - dirty = TRUE; - } - } - - attr = xmlSetProp(node, (pcmkXmlStr) name, (pcmkXmlStr) value); - if (dirty) { - pcmk__mark_xml_attr_dirty(attr); - } - CRM_CHECK(attr && attr->children && attr->children->content, return NULL); - return (char *) attr->children->content; -} - /*! * \brief Create an XML attribute with specified name and integer value * @@ -503,7 +454,7 @@ crm_element_value(const xmlNode *data, const char *name) return NULL; } else if (name == NULL) { - crm_err("Couldn't find NULL in %s", crm_element_name(data)); + crm_err("Couldn't find NULL in %s", data->name); return NULL; } @@ -883,7 +834,7 @@ xml2list(const xmlNode *parent) nvpair_list = find_xml_node(parent, XML_TAG_ATTRS, FALSE); if (nvpair_list == NULL) { - crm_trace("No attributes in %s", crm_element_name(parent)); + crm_trace("No attributes in %s", parent->name); crm_log_xml_trace(parent, "No attributes for resource op"); } @@ -988,5 +939,44 @@ pcmk_format_named_time(const char *name, time_t epoch_time) return result; } +const char * +crm_xml_replace(xmlNode *node, const char *name, const char *value) +{ + bool dirty = FALSE; + xmlAttr *attr = NULL; + const char *old_value = NULL; + + CRM_CHECK(node != NULL, return NULL); + CRM_CHECK(name != NULL && name[0] != 0, return NULL); + + old_value = crm_element_value(node, name); + + /* Could be re-setting the same value */ + CRM_CHECK(old_value != value, return value); + + if (pcmk__check_acl(node, name, pcmk__xf_acl_write) == FALSE) { + /* Create a fake object linked to doc->_private instead? */ + crm_trace("Cannot replace %s=%s to %s", name, value, node->name); + return NULL; + + } else if (old_value && !value) { + xml_remove_prop(node, name); + return NULL; + } + + if (pcmk__tracking_xml_changes(node, FALSE)) { + if (!old_value || !value || !strcmp(old_value, value)) { + dirty = TRUE; + } + } + + attr = xmlSetProp(node, (pcmkXmlStr) name, (pcmkXmlStr) value); + if (dirty) { + pcmk__mark_xml_attr_dirty(attr); + } + CRM_CHECK(attr && attr->children && attr->children->content, return NULL); + return (char *) attr->children->content; +} + // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/common/operations.c b/lib/common/operations.c deleted file mode 100644 index 3db96cd..0000000 --- a/lib/common/operations.c +++ /dev/null @@ -1,530 +0,0 @@ -/* - * Copyright 2004-2023 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#ifndef _GNU_SOURCE -# define _GNU_SOURCE -#endif - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -/*! - * \brief Generate an operation key (RESOURCE_ACTION_INTERVAL) - * - * \param[in] rsc_id ID of resource being operated on - * \param[in] op_type Operation name - * \param[in] interval_ms Operation interval - * - * \return Newly allocated memory containing operation key as string - * - * \note This function asserts on errors, so it will never return NULL. - * The caller is responsible for freeing the result with free(). - */ -char * -pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms) -{ - CRM_ASSERT(rsc_id != NULL); - CRM_ASSERT(op_type != NULL); - return crm_strdup_printf(PCMK__OP_FMT, rsc_id, op_type, interval_ms); -} - -static inline gboolean -convert_interval(const char *s, guint *interval_ms) -{ - unsigned long l; - - errno = 0; - l = strtoul(s, NULL, 10); - - if (errno != 0) { - return FALSE; - } - - *interval_ms = (guint) l; - return TRUE; -} - -/*! - * \internal - * \brief Check for underbar-separated substring match - * - * \param[in] key Overall string being checked - * \param[in] position Match before underbar at this \p key index - * \param[in] matches Substrings to match (may contain underbars) - * - * \return \p key index of underbar before any matching substring, - * or 0 if none - */ -static size_t -match_before(const char *key, size_t position, const char **matches) -{ - for (int i = 0; matches[i] != NULL; ++i) { - const size_t match_len = strlen(matches[i]); - - // Must have at least X_MATCH before position - if (position > (match_len + 1)) { - const size_t possible = position - match_len - 1; - - if ((key[possible] == '_') - && (strncmp(key + possible + 1, matches[i], match_len) == 0)) { - return possible; - } - } - } - return 0; -} - -gboolean -parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms) -{ - guint local_interval_ms = 0; - const size_t key_len = (key == NULL)? 0 : strlen(key); - - // Operation keys must be formatted as RSC_ACTION_INTERVAL - size_t action_underbar = 0; // Index in key of underbar before ACTION - size_t interval_underbar = 0; // Index in key of underbar before INTERVAL - size_t possible = 0; - - /* Underbar was a poor choice of separator since both RSC and ACTION can - * contain underbars. Here, list action names and name prefixes that can. - */ - const char *actions_with_underbars[] = { - CRMD_ACTION_MIGRATED, - CRMD_ACTION_MIGRATE, - NULL - }; - const char *action_prefixes_with_underbars[] = { - "pre_" CRMD_ACTION_NOTIFY, - "post_" CRMD_ACTION_NOTIFY, - "confirmed-pre_" CRMD_ACTION_NOTIFY, - "confirmed-post_" CRMD_ACTION_NOTIFY, - NULL, - }; - - // Initialize output variables in case of early return - if (rsc_id) { - *rsc_id = NULL; - } - if (op_type) { - *op_type = NULL; - } - if (interval_ms) { - *interval_ms = 0; - } - - // RSC_ACTION_INTERVAL implies a minimum of 5 characters - if (key_len < 5) { - return FALSE; - } - - // Find, parse, and validate interval - interval_underbar = key_len - 2; - while ((interval_underbar > 2) && (key[interval_underbar] != '_')) { - --interval_underbar; - } - if ((interval_underbar == 2) - || !convert_interval(key + interval_underbar + 1, &local_interval_ms)) { - return FALSE; - } - - // Find the base (OCF) action name, disregarding prefixes - action_underbar = match_before(key, interval_underbar, - actions_with_underbars); - if (action_underbar == 0) { - action_underbar = interval_underbar - 2; - while ((action_underbar > 0) && (key[action_underbar] != '_')) { - --action_underbar; - } - if (action_underbar == 0) { - return FALSE; - } - } - possible = match_before(key, action_underbar, - action_prefixes_with_underbars); - if (possible != 0) { - action_underbar = possible; - } - - // Set output variables - if (rsc_id != NULL) { - *rsc_id = strndup(key, action_underbar); - CRM_ASSERT(*rsc_id != NULL); - } - if (op_type != NULL) { - *op_type = strndup(key + action_underbar + 1, - interval_underbar - action_underbar - 1); - CRM_ASSERT(*op_type != NULL); - } - if (interval_ms != NULL) { - *interval_ms = local_interval_ms; - } - return TRUE; -} - -char * -pcmk__notify_key(const char *rsc_id, const char *notify_type, - const char *op_type) -{ - CRM_CHECK(rsc_id != NULL, return NULL); - CRM_CHECK(op_type != NULL, return NULL); - CRM_CHECK(notify_type != NULL, return NULL); - return crm_strdup_printf("%s_%s_notify_%s_0", - rsc_id, notify_type, op_type); -} - -/*! - * \brief Parse a transition magic string into its constituent parts - * - * \param[in] magic Magic string to parse (must be non-NULL) - * \param[out] uuid If non-NULL, where to store copy of parsed UUID - * \param[out] transition_id If non-NULL, where to store parsed transition ID - * \param[out] action_id If non-NULL, where to store parsed action ID - * \param[out] op_status If non-NULL, where to store parsed result status - * \param[out] op_rc If non-NULL, where to store parsed actual rc - * \param[out] target_rc If non-NULL, where to stored parsed target rc - * - * \return TRUE if key was valid, FALSE otherwise - * \note If uuid is supplied and this returns TRUE, the caller is responsible - * for freeing the memory for *uuid using free(). - */ -gboolean -decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, - int *op_status, int *op_rc, int *target_rc) -{ - int res = 0; - char *key = NULL; - gboolean result = TRUE; - int local_op_status = -1; - int local_op_rc = -1; - - CRM_CHECK(magic != NULL, return FALSE); - -#ifdef HAVE_SSCANF_M - res = sscanf(magic, "%d:%d;%ms", &local_op_status, &local_op_rc, &key); -#else - key = calloc(1, strlen(magic) - 3); // magic must have >=4 other characters - CRM_ASSERT(key); - res = sscanf(magic, "%d:%d;%s", &local_op_status, &local_op_rc, key); -#endif - if (res == EOF) { - crm_err("Could not decode transition information '%s': %s", - magic, pcmk_rc_str(errno)); - result = FALSE; - } else if (res < 3) { - crm_warn("Transition information '%s' incomplete (%d of 3 expected items)", - magic, res); - result = FALSE; - } else { - if (op_status) { - *op_status = local_op_status; - } - if (op_rc) { - *op_rc = local_op_rc; - } - result = decode_transition_key(key, uuid, transition_id, action_id, - target_rc); - } - free(key); - return result; -} - -char * -pcmk__transition_key(int transition_id, int action_id, int target_rc, - const char *node) -{ - CRM_CHECK(node != NULL, return NULL); - return crm_strdup_printf("%d:%d:%d:%-*s", - action_id, transition_id, target_rc, 36, node); -} - -/*! - * \brief Parse a transition key into its constituent parts - * - * \param[in] key Transition key to parse (must be non-NULL) - * \param[out] uuid If non-NULL, where to store copy of parsed UUID - * \param[out] transition_id If non-NULL, where to store parsed transition ID - * \param[out] action_id If non-NULL, where to store parsed action ID - * \param[out] target_rc If non-NULL, where to stored parsed target rc - * - * \return TRUE if key was valid, FALSE otherwise - * \note If uuid is supplied and this returns TRUE, the caller is responsible - * for freeing the memory for *uuid using free(). - */ -gboolean -decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id, - int *target_rc) -{ - int local_transition_id = -1; - int local_action_id = -1; - int local_target_rc = -1; - char local_uuid[37] = { '\0' }; - - // Initialize any supplied output arguments - if (uuid) { - *uuid = NULL; - } - if (transition_id) { - *transition_id = -1; - } - if (action_id) { - *action_id = -1; - } - if (target_rc) { - *target_rc = -1; - } - - CRM_CHECK(key != NULL, return FALSE); - if (sscanf(key, "%d:%d:%d:%36s", &local_action_id, &local_transition_id, - &local_target_rc, local_uuid) != 4) { - crm_err("Invalid transition key '%s'", key); - return FALSE; - } - if (strlen(local_uuid) != 36) { - crm_warn("Invalid UUID '%s' in transition key '%s'", local_uuid, key); - } - if (uuid) { - *uuid = strdup(local_uuid); - CRM_ASSERT(*uuid); - } - if (transition_id) { - *transition_id = local_transition_id; - } - if (action_id) { - *action_id = local_action_id; - } - if (target_rc) { - *target_rc = local_target_rc; - } - return TRUE; -} - -// Return true if a is an attribute that should be filtered -static bool -should_filter_for_digest(xmlAttrPtr a, void *user_data) -{ - if (strncmp((const char *) a->name, CRM_META "_", - sizeof(CRM_META " ") - 1) == 0) { - return true; - } - return pcmk__str_any_of((const char *) a->name, - XML_ATTR_ID, - XML_ATTR_CRM_VERSION, - XML_LRM_ATTR_OP_DIGEST, - XML_LRM_ATTR_TARGET, - XML_LRM_ATTR_TARGET_UUID, - "pcmk_external_ip", - NULL); -} - -/*! - * \internal - * \brief Remove XML attributes not needed for operation digest - * - * \param[in,out] param_set XML with operation parameters - */ -void -pcmk__filter_op_for_digest(xmlNode *param_set) -{ - char *key = NULL; - char *timeout = NULL; - guint interval_ms = 0; - - if (param_set == NULL) { - return; - } - - /* Timeout is useful for recurring operation digests, so grab it before - * removing meta-attributes - */ - key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); - if (crm_element_value_ms(param_set, key, &interval_ms) != pcmk_ok) { - interval_ms = 0; - } - free(key); - key = NULL; - if (interval_ms != 0) { - key = crm_meta_name(XML_ATTR_TIMEOUT); - timeout = crm_element_value_copy(param_set, key); - } - - // Remove all CRM_meta_* attributes and certain other attributes - pcmk__xe_remove_matching_attrs(param_set, should_filter_for_digest, NULL); - - // Add timeout back for recurring operation digests - if (timeout != NULL) { - crm_xml_add(param_set, key, timeout); - } - free(timeout); - free(key); -} - -int -rsc_op_expected_rc(const lrmd_event_data_t *op) -{ - int rc = 0; - - if (op && op->user_data) { - decode_transition_key(op->user_data, NULL, NULL, NULL, &rc); - } - return rc; -} - -gboolean -did_rsc_op_fail(lrmd_event_data_t * op, int target_rc) -{ - switch (op->op_status) { - case PCMK_EXEC_CANCELLED: - case PCMK_EXEC_PENDING: - return FALSE; - - case PCMK_EXEC_NOT_SUPPORTED: - case PCMK_EXEC_TIMEOUT: - case PCMK_EXEC_ERROR: - case PCMK_EXEC_NOT_CONNECTED: - case PCMK_EXEC_NO_FENCE_DEVICE: - case PCMK_EXEC_NO_SECRETS: - case PCMK_EXEC_INVALID: - return TRUE; - - default: - if (target_rc != op->rc) { - return TRUE; - } - } - - return FALSE; -} - -/*! - * \brief Create a CIB XML element for an operation - * - * \param[in,out] parent If not NULL, make new XML node a child of this - * \param[in] prefix Generate an ID using this prefix - * \param[in] task Operation task to set - * \param[in] interval_spec Operation interval to set - * \param[in] timeout If not NULL, operation timeout to set - * - * \return New XML object on success, NULL otherwise - */ -xmlNode * -crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, - const char *interval_spec, const char *timeout) -{ - xmlNode *xml_op; - - CRM_CHECK(prefix && task && interval_spec, return NULL); - - xml_op = create_xml_node(parent, XML_ATTR_OP); - crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval_spec); - crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval_spec); - crm_xml_add(xml_op, "name", task); - if (timeout) { - crm_xml_add(xml_op, XML_ATTR_TIMEOUT, timeout); - } - return xml_op; -} - -/*! - * \brief Check whether an operation requires resource agent meta-data - * - * \param[in] rsc_class Resource agent class (or NULL to skip class check) - * \param[in] op Operation action (or NULL to skip op check) - * - * \return true if operation needs meta-data, false otherwise - * \note At least one of rsc_class and op must be specified. - */ -bool -crm_op_needs_metadata(const char *rsc_class, const char *op) -{ - /* Agent metadata is used to determine whether an agent reload is possible, - * so if this op is not relevant to that feature, we don't need metadata. - */ - - CRM_CHECK((rsc_class != NULL) || (op != NULL), return false); - - if ((rsc_class != NULL) - && !pcmk_is_set(pcmk_get_ra_caps(rsc_class), pcmk_ra_cap_params)) { - // Metadata is needed only for resource classes that use parameters - return false; - } - if (op == NULL) { - return true; - } - - // Metadata is needed only for these actions - return pcmk__str_any_of(op, CRMD_ACTION_START, CRMD_ACTION_STATUS, - CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, - CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT, - CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, - CRMD_ACTION_NOTIFY, NULL); -} - -/*! - * \internal - * \brief Check whether an action name is for a fencing action - * - * \param[in] action Action name to check - * - * \return true if \p action is "off", "reboot", or "poweroff", otherwise false - */ -bool -pcmk__is_fencing_action(const char *action) -{ - return pcmk__str_any_of(action, "off", "reboot", "poweroff", NULL); -} - -bool -pcmk_is_probe(const char *task, guint interval) -{ - if (task == NULL) { - return false; - } - - return (interval == 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_none); -} - -bool -pcmk_xe_is_probe(const xmlNode *xml_op) -{ - const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); - const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); - int interval_ms; - - pcmk__scan_min_int(interval_ms_s, &interval_ms, 0); - return pcmk_is_probe(task, interval_ms); -} - -bool -pcmk_xe_mask_probe_failure(const xmlNode *xml_op) -{ - int status = PCMK_EXEC_UNKNOWN; - int rc = PCMK_OCF_OK; - - if (!pcmk_xe_is_probe(xml_op)) { - return false; - } - - crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status); - crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc); - - return rc == PCMK_OCF_NOT_INSTALLED || rc == PCMK_OCF_INVALID_PARAM || - status == PCMK_EXEC_NOT_INSTALLED; -} diff --git a/lib/common/options.c b/lib/common/options.c index cb32b3f..2d86ebc 100644 --- a/lib/common/options.c +++ b/lib/common/options.c @@ -91,15 +91,23 @@ pcmk__env_option(const char *option) /*! * \brief Set or unset a Pacemaker environment variable option * - * Set an environment variable option with both a PCMK_ and (for - * backward compatibility) HA_ prefix. + * Set an environment variable option with a \c "PCMK_" prefix and optionally + * an \c "HA_" prefix for backward compatibility. * * \param[in] option Environment variable name (without prefix) * \param[in] value New value (or NULL to unset) + * \param[in] compat If false and \p value is not \c NULL, set only + * \c "PCMK_