From b41961d74fe7ff2d4d4abaca92454e87c561e49f Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 3 Jun 2024 15:39:29 +0200 Subject: Merging upstream version 2.1.8~rc1. Signed-off-by: Daniel Baumann --- COPYING | 3 +- ChangeLog | 464 ++- GNUmakefile | 4 +- INSTALL.md | 17 +- Makefile.am | 2 +- README.markdown | 5 +- SECURITY.md | 18 + agents/ocf/HealthCPU.in | 4 +- agents/ocf/HealthIOWait.in | 26 +- agents/ocf/HealthSMART.in | 46 +- agents/ocf/SysInfo.in | 6 +- agents/ocf/o2cb.in | 3 +- configure.ac | 61 +- cts/README.md | 9 + cts/cli/regression.access_render.exp | 46 +- cts/cli/regression.acls.exp | 246 +- cts/cli/regression.daemons.exp | 736 +++-- cts/cli/regression.rules.exp | 8 +- cts/cli/regression.tools.exp | 2776 ++++++++++++++++-- cts/cli/regression.upgrade.exp | 53 +- cts/cli/regression.validity.exp | 298 +- cts/cli/tickets.xml | 32 + cts/cts-attrd.in | 2 +- cts/cts-cli.in | 300 +- cts/cts-fencing.in | 28 +- cts/cts-log-watcher.in | 3 - cts/scheduler/exp/utilization-order4.exp | 2 +- cts/scheduler/stderr/order-wrong-kind.stderr | 2 +- cts/scheduler/summary/order-wrong-kind.summary | 2 +- cts/valgrind-pcmk.suppressions | 8 - daemons/attrd/attrd_alerts.c | 14 +- daemons/attrd/attrd_attributes.c | 125 +- daemons/attrd/attrd_cib.c | 316 ++- daemons/attrd/attrd_corosync.c | 295 +- daemons/attrd/attrd_elections.c | 10 +- daemons/attrd/attrd_ipc.c | 99 +- daemons/attrd/attrd_messages.c | 57 +- daemons/attrd/attrd_sync.c | 48 +- daemons/attrd/attrd_utils.c | 43 +- daemons/attrd/pacemaker-attrd.c | 10 +- daemons/attrd/pacemaker-attrd.h | 104 +- daemons/based/Makefile.am | 9 +- daemons/based/based_callbacks.c | 361 ++- daemons/based/based_io.c | 35 +- daemons/based/based_messages.c | 286 +- daemons/based/based_notify.c | 64 +- daemons/based/based_operation.c | 1 + daemons/based/based_remote.c | 63 +- daemons/based/based_transaction.c | 34 +- daemons/based/pacemaker-based.c | 61 +- daemons/based/pacemaker-based.h | 6 +- daemons/controld/controld_attrd.c | 28 +- daemons/controld/controld_callbacks.c | 32 +- daemons/controld/controld_cib.c | 184 +- daemons/controld/controld_cib.h | 6 +- daemons/controld/controld_control.c | 287 +- daemons/controld/controld_corosync.c | 40 +- daemons/controld/controld_election.c | 43 +- daemons/controld/controld_execd.c | 217 +- daemons/controld/controld_execd_state.c | 63 +- daemons/controld/controld_fencing.c | 111 +- daemons/controld/controld_fsa.c | 11 +- daemons/controld/controld_fsa.h | 6 +- daemons/controld/controld_join_client.c | 84 +- daemons/controld/controld_join_dc.c | 113 +- daemons/controld/controld_membership.c | 101 +- daemons/controld/controld_messages.c | 270 +- daemons/controld/controld_metadata.c | 51 +- daemons/controld/controld_remote_ra.c | 130 +- daemons/controld/controld_schedulerd.c | 76 +- daemons/controld/controld_te_actions.c | 142 +- daemons/controld/controld_te_callbacks.c | 367 +-- daemons/controld/controld_te_events.c | 106 +- daemons/controld/controld_te_utils.c | 71 +- daemons/controld/controld_throttle.c | 42 +- daemons/controld/controld_timers.c | 73 +- daemons/controld/controld_transition.c | 41 +- daemons/controld/controld_utils.c | 14 +- daemons/controld/pacemaker-controld.c | 28 +- daemons/controld/pacemaker-controld.h | 3 +- daemons/execd/Makefile.am | 4 +- daemons/execd/cts-exec-helper.c | 14 +- daemons/execd/execd_alerts.c | 36 +- daemons/execd/execd_commands.c | 224 +- daemons/execd/pacemaker-execd.c | 53 +- daemons/execd/pacemaker-execd.h | 1 + daemons/execd/remoted_pidone.c | 10 +- daemons/execd/remoted_proxy.c | 71 +- daemons/execd/remoted_schemas.c | 286 ++ daemons/execd/remoted_tls.c | 24 +- daemons/fenced/cts-fence-helper.c | 15 +- daemons/fenced/fenced_cib.c | 219 +- daemons/fenced/fenced_commands.c | 579 ++-- daemons/fenced/fenced_history.c | 242 +- daemons/fenced/fenced_remote.c | 459 +-- daemons/fenced/fenced_scheduler.c | 15 +- daemons/fenced/pacemaker-fenced.c | 392 +-- daemons/fenced/pacemaker-fenced.h | 14 +- daemons/pacemakerd/Makefile.am | 3 +- daemons/pacemakerd/pacemakerd.c | 34 +- daemons/pacemakerd/pacemakerd.h | 12 +- daemons/pacemakerd/pcmkd_corosync.c | 20 +- daemons/pacemakerd/pcmkd_corosync.h | 16 + daemons/pacemakerd/pcmkd_messages.c | 55 +- daemons/pacemakerd/pcmkd_subdaemons.c | 116 +- daemons/schedulerd/pacemaker-schedulerd.c | 32 +- daemons/schedulerd/pacemaker-schedulerd.h | 3 +- daemons/schedulerd/schedulerd_messages.c | 61 +- devel/Makefile.am | 4 +- doc/README.md | 32 +- doc/sphinx/Makefile.am | 25 +- doc/sphinx/Pacemaker_Administration/agents.rst | 1247 +++++++-- .../Pacemaker_Administration/configuring.rst | 55 +- doc/sphinx/Pacemaker_Administration/index.rst | 1 + doc/sphinx/Pacemaker_Administration/installing.rst | 6 +- doc/sphinx/Pacemaker_Administration/options.rst | 178 ++ doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst | 3 +- doc/sphinx/Pacemaker_Administration/upgrading.rst | 30 +- doc/sphinx/Pacemaker_Development/c.rst | 125 +- doc/sphinx/Pacemaker_Development/components.rst | 48 +- doc/sphinx/Pacemaker_Development/documentation.rst | 35 + doc/sphinx/Pacemaker_Development/faq.rst | 27 +- doc/sphinx/Pacemaker_Development/general.rst | 10 + doc/sphinx/Pacemaker_Development/glossary.rst | 84 + doc/sphinx/Pacemaker_Development/index.rst | 2 + doc/sphinx/Pacemaker_Explained/alerts.rst | 28 +- doc/sphinx/Pacemaker_Explained/cluster-options.rst | 242 +- doc/sphinx/Pacemaker_Explained/collective.rst | 411 --- doc/sphinx/Pacemaker_Explained/constraints.rst | 241 +- doc/sphinx/Pacemaker_Explained/fencing.rst | 623 ++--- doc/sphinx/Pacemaker_Explained/local-options.rst | 301 +- doc/sphinx/Pacemaker_Explained/nodes.rst | 38 + doc/sphinx/Pacemaker_Explained/operations.rst | 257 +- doc/sphinx/Pacemaker_Explained/resources.rst | 470 ++-- .../Pacemaker_Explained/reusing-configuration.rst | 12 +- doc/sphinx/Pacemaker_Explained/rules.rst | 1293 +++++---- doc/sphinx/Pacemaker_Explained/utilization.rst | 252 +- doc/sphinx/Pacemaker_Remote/options.rst | 4 + doc/sphinx/conf.py.in | 6 +- etc/sysconfig/pacemaker.in | 12 +- include/Makefile.am | 6 +- include/crm/Makefile.am | 4 +- include/crm/cib.h | 16 +- include/crm/cib/cib_types.h | 58 +- include/crm/cib/internal.h | 43 +- include/crm/cib/util.h | 12 +- include/crm/cib/util_compat.h | 22 +- include/crm/cib_compat.h | 34 + include/crm/cluster.h | 203 +- include/crm/cluster/compat.h | 159 +- include/crm/cluster/internal.h | 104 +- include/crm/common/Makefile.am | 12 +- include/crm/common/acl.h | 8 +- include/crm/common/acl_internal.h | 7 +- include/crm/common/action_relation_internal.h | 51 +- include/crm/common/actions.h | 237 +- include/crm/common/actions_internal.h | 71 +- include/crm/common/agents.h | 4 +- include/crm/common/agents_compat.h | 4 +- include/crm/common/alerts_internal.h | 6 +- include/crm/common/attrd_internal.h | 50 - include/crm/common/attrs_internal.h | 57 + include/crm/common/bundles_internal.h | 89 + include/crm/common/cib.h | 4 +- include/crm/common/cib_internal.h | 4 +- include/crm/common/clone_internal.h | 52 +- include/crm/common/digests_internal.h | 15 +- include/crm/common/failcounts_internal.h | 4 +- include/crm/common/group_internal.h | 24 +- include/crm/common/history_internal.h | 52 + include/crm/common/internal.h | 72 +- include/crm/common/io_internal.h | 4 +- include/crm/common/ipc.h | 28 +- include/crm/common/ipc_attrd_internal.h | 11 +- include/crm/common/ipc_controld.h | 4 +- include/crm/common/ipc_internal.h | 18 +- include/crm/common/ipc_pacemakerd.h | 18 +- include/crm/common/ipc_schedulerd.h | 4 +- include/crm/common/iso8601.h | 30 +- include/crm/common/iso8601_internal.h | 4 +- include/crm/common/location_internal.h | 36 + include/crm/common/logging.h | 125 +- include/crm/common/logging_compat.h | 6 +- include/crm/common/logging_internal.h | 96 +- include/crm/common/mainloop.h | 30 +- include/crm/common/mainloop_compat.h | 8 +- include/crm/common/messages_internal.h | 3 +- include/crm/common/nodes.h | 198 +- include/crm/common/nodes_internal.h | 27 + include/crm/common/nvpair.h | 23 +- include/crm/common/nvpair_internal.h | 63 + include/crm/common/options.h | 231 ++ include/crm/common/options_internal.h | 204 +- include/crm/common/output.h | 4 +- include/crm/common/output_internal.h | 107 +- include/crm/common/primitive_internal.h | 39 + include/crm/common/remote_internal.h | 65 +- include/crm/common/resources.h | 268 +- include/crm/common/resources_internal.h | 25 + include/crm/common/results.h | 76 +- include/crm/common/results_compat.h | 4 +- include/crm/common/results_internal.h | 23 +- include/crm/common/roles.h | 14 +- include/crm/common/roles_internal.h | 48 +- include/crm/common/rules.h | 112 + include/crm/common/rules_internal.h | 38 + include/crm/common/scheduler.h | 207 +- include/crm/common/scheduler_internal.h | 118 +- include/crm/common/scheduler_types.h | 4 +- include/crm/common/schemas.h | 32 + include/crm/common/schemas_internal.h | 37 + include/crm/common/scores.h | 33 + include/crm/common/scores_compat.h | 37 + include/crm/common/scores_internal.h | 15 + include/crm/common/strings_internal.h | 22 +- include/crm/common/tags.h | 14 +- include/crm/common/tickets.h | 20 +- include/crm/common/unittest_internal.h | 85 +- include/crm/common/util.h | 56 +- include/crm/common/util_compat.h | 29 +- include/crm/common/xml.h | 224 +- include/crm/common/xml_compat.h | 127 +- include/crm/common/xml_internal.h | 234 +- include/crm/common/xml_io.h | 48 + include/crm/common/xml_io_compat.h | 58 + include/crm/common/xml_io_internal.h | 34 + include/crm/common/xml_names.h | 458 +++ include/crm/common/xml_names_internal.h | 353 +++ include/crm/compatibility.h | 35 +- include/crm/crm.h | 25 +- include/crm/crm_compat.h | 34 +- include/crm/fencing/internal.h | 66 +- include/crm/lrmd.h | 84 +- include/crm/lrmd_compat.h | 177 ++ include/crm/msg_xml.h | 469 +--- include/crm/msg_xml_compat.h | 924 +++++- include/crm/pengine/Makefile.am | 5 +- include/crm/pengine/common.h | 38 +- include/crm/pengine/common_compat.h | 57 +- include/crm/pengine/complex.h | 2 + include/crm/pengine/internal.h | 294 +- include/crm/pengine/pe_types_compat.h | 116 +- include/crm/pengine/remote_internal.h | 7 +- include/crm/pengine/rules.h | 42 +- include/crm/pengine/rules_compat.h | 50 +- include/crm/pengine/rules_internal.h | 16 +- include/crm/pengine/status.h | 73 +- include/crm/pengine/status_compat.h | 72 + include/crm/services.h | 7 +- include/crm/stonith-ng.h | 49 +- include/crm_config.h.in | 5 +- include/crm_internal.h | 48 +- include/pacemaker-internal.h | 5 +- include/pacemaker.h | 174 +- include/pcmki/pcmki_options.h | 19 + include/pcmki/pcmki_resource.h | 4 + include/pcmki/pcmki_ticket.h | 154 + include/pcmki/pcmki_transition.h | 3 +- include/pcmki/pcmki_verify.h | 49 + lib/Makefile.am | 2 +- lib/cib/Makefile.am | 2 +- lib/cib/cib_attrs.c | 255 +- lib/cib/cib_client.c | 35 +- lib/cib/cib_file.c | 90 +- lib/cib/cib_native.c | 45 +- lib/cib/cib_ops.c | 350 +-- lib/cib/cib_remote.c | 50 +- lib/cib/cib_utils.c | 383 ++- lib/cluster/Makefile.am | 6 +- lib/cluster/cluster.c | 528 ++-- lib/cluster/corosync.c | 175 +- lib/cluster/cpg.c | 472 ++-- lib/cluster/crmcluster_private.h | 26 +- lib/cluster/election.c | 67 +- lib/cluster/membership.c | 790 ++++-- lib/cluster/tests/Makefile.am | 12 + lib/cluster/tests/cluster/Makefile.am | 18 + .../cluster/pcmk_cluster_set_destroy_fn_test.c | 79 + lib/cluster/tests/cpg/Makefile.am | 19 + .../tests/cpg/pcmk_cpg_set_confchg_fn_test.c | 98 + .../tests/cpg/pcmk_cpg_set_deliver_fn_test.c | 94 + lib/common/Makefile.am | 22 +- lib/common/acl.c | 136 +- lib/common/actions.c | 281 +- lib/common/agents.c | 3 +- lib/common/alerts.c | 20 +- lib/common/attrs.c | 116 +- lib/common/cib.c | 100 +- lib/common/crmcommon_private.h | 160 +- lib/common/digest.c | 90 +- lib/common/health.c | 39 +- lib/common/io.c | 15 +- lib/common/ipc_attrd.c | 258 +- lib/common/ipc_client.c | 39 +- lib/common/ipc_common.c | 4 +- lib/common/ipc_controld.c | 113 +- lib/common/ipc_pacemakerd.c | 69 +- lib/common/ipc_schedulerd.c | 30 +- lib/common/ipc_server.c | 88 +- lib/common/iso8601.c | 233 +- lib/common/logging.c | 20 +- lib/common/mainloop.c | 33 +- lib/common/messages.c | 122 +- lib/common/mock.c | 85 +- lib/common/mock_private.h | 13 +- lib/common/nodes.c | 145 +- lib/common/nvpair.c | 161 +- lib/common/options.c | 1489 ++++++++-- lib/common/options_display.c | 493 ++++ lib/common/output.c | 41 +- lib/common/output_html.c | 120 +- lib/common/output_log.c | 80 +- lib/common/output_none.c | 8 +- lib/common/output_text.c | 108 +- lib/common/output_xml.c | 320 ++- lib/common/patchset.c | 821 +++--- lib/common/patchset_display.c | 73 +- lib/common/probes.c | 84 + lib/common/remote.c | 35 +- lib/common/resources.c | 67 + lib/common/results.c | 8 +- lib/common/roles.c | 88 + lib/common/rules.c | 1512 ++++++++++ lib/common/scheduler.c | 97 +- lib/common/schemas.c | 1561 +++++++---- lib/common/scores.c | 57 +- lib/common/strings.c | 259 +- lib/common/tests/Makefile.am | 8 +- lib/common/tests/acl/xml_acl_denied_test.c | 10 +- lib/common/tests/acl/xml_acl_enabled_test.c | 10 +- lib/common/tests/actions/Makefile.am | 10 +- lib/common/tests/actions/copy_in_properties_test.c | 62 - lib/common/tests/actions/expand_plus_plus_test.c | 256 -- .../tests/actions/fix_plus_plus_recursive_test.c | 47 - lib/common/tests/actions/pcmk_is_probe_test.c | 25 - lib/common/tests/actions/pcmk_xe_is_probe_test.c | 43 - .../actions/pcmk_xe_mask_probe_failure_test.c | 150 - .../health/pcmk__parse_health_strategy_test.c | 2 +- .../health/pcmk__validate_health_strategy_test.c | 2 +- lib/common/tests/io/pcmk__full_path_test.c | 10 +- lib/common/tests/iso8601/Makefile.am | 6 +- .../tests/iso8601/pcmk__add_time_from_xml_test.c | 243 ++ .../tests/iso8601/pcmk__readable_interval_test.c | 4 +- .../tests/iso8601/pcmk__set_time_if_earlier_test.c | 80 + lib/common/tests/nodes/Makefile.am | 23 + .../tests/nodes/pcmk__find_node_in_list_test.c | 53 + lib/common/tests/nodes/pcmk__xe_add_node_test.c | 71 + .../nodes/pcmk_foreach_active_resource_test.c | 149 + lib/common/tests/nodes/pcmk_node_is_clean_test.c | 54 + .../tests/nodes/pcmk_node_is_in_maintenance_test.c | 54 + lib/common/tests/nodes/pcmk_node_is_online_test.c | 54 + lib/common/tests/nodes/pcmk_node_is_pending_test.c | 54 + .../tests/nodes/pcmk_node_is_shutting_down_test.c | 54 + lib/common/tests/nvpair/Makefile.am | 7 +- lib/common/tests/nvpair/crm_meta_name_test.c | 41 + lib/common/tests/nvpair/crm_meta_value_test.c | 58 + .../tests/nvpair/pcmk__xe_attr_is_true_test.c | 10 +- .../tests/nvpair/pcmk__xe_get_bool_attr_test.c | 11 +- .../tests/nvpair/pcmk__xe_get_datetime_test.c | 108 + .../tests/nvpair/pcmk__xe_set_bool_attr_test.c | 12 +- lib/common/tests/probes/Makefile.am | 18 + lib/common/tests/probes/pcmk_is_probe_test.c | 25 + lib/common/tests/probes/pcmk_xe_is_probe_test.c | 54 + .../tests/probes/pcmk_xe_mask_probe_failure_test.c | 333 +++ .../tests/procfs/pcmk__procfs_pid2path_test.c | 8 +- lib/common/tests/resources/Makefile.am | 17 + lib/common/tests/resources/pcmk_resource_id_test.c | 36 + .../resources/pcmk_resource_is_managed_test.c | 46 + lib/common/tests/rules/Makefile.am | 29 + lib/common/tests/rules/pcmk__cmp_by_type_test.c | 102 + .../rules/pcmk__evaluate_attr_expression_test.c | 831 ++++++ .../tests/rules/pcmk__evaluate_condition_test.c | 197 ++ .../rules/pcmk__evaluate_date_expression_test.c | 684 +++++ .../tests/rules/pcmk__evaluate_date_spec_test.c | 231 ++ .../rules/pcmk__evaluate_op_expression_test.c | 207 ++ .../rules/pcmk__evaluate_rsc_expression_test.c | 227 ++ lib/common/tests/rules/pcmk__parse_combine_test.c | 52 + .../tests/rules/pcmk__parse_comparison_test.c | 72 + lib/common/tests/rules/pcmk__parse_source_test.c | 62 + lib/common/tests/rules/pcmk__parse_type_test.c | 127 + .../tests/rules/pcmk__replace_submatches_test.c | 81 + .../tests/rules/pcmk__unpack_duration_test.c | 120 + lib/common/tests/rules/pcmk_evaluate_rule_test.c | 379 +++ lib/common/tests/scheduler/Makefile.am | 19 + lib/common/tests/scheduler/pcmk_get_dc_test.c | 47 + .../scheduler/pcmk_get_no_quorum_policy_test.c | 34 + lib/common/tests/scheduler/pcmk_has_quorum_test.c | 36 + .../tests/scheduler/pcmk_set_scheduler_cib_test.c | 71 + lib/common/tests/schemas/Makefile.am | 88 + lib/common/tests/schemas/crm_schema_init_test.c | 152 + .../schemas/pcmk__build_schema_xml_node_test.c | 158 ++ .../tests/schemas/pcmk__cmp_schemas_by_name_test.c | 121 + .../tests/schemas/pcmk__find_x_0_schema_test.c | 100 + lib/common/tests/schemas/pcmk__get_schema_test.c | 81 + .../schemas/pcmk__schema_files_later_than_test.c | 106 + lib/common/tests/scores/char2score_test.c | 14 +- lib/common/tests/scores/pcmk__add_scores_test.c | 69 +- lib/common/tests/scores/pcmk_readable_score_test.c | 10 +- lib/common/tests/strings/Makefile.am | 3 +- lib/common/tests/strings/crm_get_msec_test.c | 30 +- lib/common/tests/strings/crm_str_to_boolean_test.c | 16 +- .../tests/strings/pcmk__char_in_any_str_test.c | 46 - lib/common/tests/strings/pcmk__compress_test.c | 11 +- lib/common/tests/strings/pcmk__str_update_test.c | 3 +- lib/common/tests/utils/Makefile.am | 11 +- lib/common/tests/utils/compare_version_test.c | 5 +- lib/common/tests/utils/crm_meta_name_test.c | 41 - lib/common/tests/utils/crm_meta_value_test.c | 56 - lib/common/tests/utils/pcmk__realloc_test.c | 69 + lib/common/tests/utils/pcmk_hostname_test.c | 56 - lib/common/tests/xml/Makefile.am | 11 +- lib/common/tests/xml/crm_xml_init_test.c | 230 ++ lib/common/tests/xml/pcmk__xe_copy_attrs_test.c | 188 ++ lib/common/tests/xml/pcmk__xe_first_child_test.c | 106 + lib/common/tests/xml/pcmk__xe_foreach_child_test.c | 20 +- lib/common/tests/xml/pcmk__xe_match_test.c | 106 - lib/common/tests/xml/pcmk__xe_set_score_test.c | 188 ++ lib/common/tests/xml/pcmk__xml_escape_test.c | 213 ++ lib/common/tests/xml/pcmk__xml_needs_escape_test.c | 337 +++ lib/common/tests/xpath/pcmk__xpath_node_id_test.c | 29 +- lib/common/unittest.c | 128 + lib/common/utils.c | 145 +- lib/common/watchdog.c | 45 +- lib/common/xml.c | 2949 ++++++++++---------- lib/common/xml_attr.c | 31 +- lib/common/xml_display.c | 21 +- lib/common/xml_io.c | 840 ++++++ lib/common/xpath.c | 85 +- lib/fencing/Makefile.am | 2 +- lib/fencing/st_actions.c | 46 +- lib/fencing/st_client.c | 277 +- lib/fencing/st_lha.c | 115 +- lib/fencing/st_output.c | 66 +- lib/fencing/st_rhcs.c | 65 +- lib/lrmd/Makefile.am | 2 +- lib/lrmd/lrmd_alerts.c | 4 +- lib/lrmd/lrmd_client.c | 310 +- lib/lrmd/lrmd_output.c | 39 +- lib/lrmd/proxy_common.c | 86 +- lib/pacemaker/Makefile.am | 10 +- lib/pacemaker/libpacemaker_private.h | 77 +- lib/pacemaker/pcmk_acl.c | 43 +- lib/pacemaker/pcmk_agents.c | 8 +- lib/pacemaker/pcmk_cluster_queries.c | 29 +- lib/pacemaker/pcmk_fence.c | 22 +- lib/pacemaker/pcmk_graph_consumer.c | 270 +- lib/pacemaker/pcmk_graph_logging.c | 20 +- lib/pacemaker/pcmk_graph_producer.c | 191 +- lib/pacemaker/pcmk_injections.c | 282 +- lib/pacemaker/pcmk_options.c | 153 + lib/pacemaker/pcmk_output.c | 679 +++-- lib/pacemaker/pcmk_resource.c | 113 +- lib/pacemaker/pcmk_result_code.c | 4 +- lib/pacemaker/pcmk_rule.c | 134 +- lib/pacemaker/pcmk_sched_actions.c | 458 +-- lib/pacemaker/pcmk_sched_bundle.c | 171 +- lib/pacemaker/pcmk_sched_clone.c | 111 +- lib/pacemaker/pcmk_sched_colocation.c | 508 ++-- lib/pacemaker/pcmk_sched_constraints.c | 120 +- lib/pacemaker/pcmk_sched_fencing.c | 61 +- lib/pacemaker/pcmk_sched_group.c | 147 +- lib/pacemaker/pcmk_sched_instances.c | 324 +-- lib/pacemaker/pcmk_sched_location.c | 542 ++-- lib/pacemaker/pcmk_sched_migration.c | 181 +- lib/pacemaker/pcmk_sched_nodes.c | 63 +- lib/pacemaker/pcmk_sched_ordering.c | 485 ++-- lib/pacemaker/pcmk_sched_primitive.c | 457 +-- lib/pacemaker/pcmk_sched_probes.c | 119 +- lib/pacemaker/pcmk_sched_promotable.c | 328 +-- lib/pacemaker/pcmk_sched_recurring.c | 205 +- lib/pacemaker/pcmk_sched_remote.c | 78 +- lib/pacemaker/pcmk_sched_resource.c | 89 +- lib/pacemaker/pcmk_sched_tickets.c | 156 +- lib/pacemaker/pcmk_sched_utilization.c | 34 +- lib/pacemaker/pcmk_scheduler.c | 141 +- lib/pacemaker/pcmk_setup.c | 78 + lib/pacemaker/pcmk_simulate.c | 121 +- lib/pacemaker/pcmk_status.c | 18 +- lib/pacemaker/pcmk_ticket.c | 553 ++++ lib/pacemaker/pcmk_verify.c | 153 + lib/pacemaker/tests/Makefile.am | 11 + lib/pacemaker/tests/pcmk_resource/Makefile.am | 20 + .../pcmk_resource/pcmk_resource_delete_test.c | 156 ++ lib/pacemaker/tests/pcmk_ticket/Makefile.am | 27 + .../pcmk_ticket/pcmk__get_ticket_state_test.c | 178 ++ .../pcmk_ticket/pcmk_ticket_constraints_test.c | 130 + .../tests/pcmk_ticket/pcmk_ticket_delete_test.c | 170 ++ .../tests/pcmk_ticket/pcmk_ticket_get_attr_test.c | 150 + .../tests/pcmk_ticket/pcmk_ticket_info_test.c | 138 + .../pcmk_ticket/pcmk_ticket_remove_attr_test.c | 231 ++ .../tests/pcmk_ticket/pcmk_ticket_set_attr_test.c | 281 ++ .../tests/pcmk_ticket/pcmk_ticket_state_test.c | 156 ++ lib/pengine/Makefile.am | 23 +- lib/pengine/bundle.c | 445 +-- lib/pengine/clone.c | 250 +- lib/pengine/common.c | 603 +--- lib/pengine/complex.c | 532 ++-- lib/pengine/failcounts.c | 93 +- lib/pengine/group.c | 80 +- lib/pengine/native.c | 404 +-- lib/pengine/pe_actions.c | 607 ++-- lib/pengine/pe_digest.c | 138 +- lib/pengine/pe_health.c | 27 +- lib/pengine/pe_notif.c | 122 +- lib/pengine/pe_output.c | 1317 +++++---- lib/pengine/pe_status_private.h | 2 +- lib/pengine/remote.c | 104 +- lib/pengine/rules.c | 1232 ++------ lib/pengine/rules_alerts.c | 87 +- lib/pengine/status.c | 112 +- lib/pengine/tests/Makefile.am | 6 +- lib/pengine/tests/native/native_find_rsc_test.c | 14 +- lib/pengine/tests/native/pe_base_name_eq_test.c | 6 +- lib/pengine/tests/rules/Makefile.am | 18 - .../tests/rules/pe_cron_range_satisfied_test.c | 165 -- lib/pengine/tests/status/Makefile.am | 3 +- lib/pengine/tests/status/pe_find_node_any_test.c | 10 +- lib/pengine/tests/status/pe_find_node_id_test.c | 10 +- lib/pengine/tests/status/pe_find_node_test.c | 51 - .../tests/status/set_working_set_defaults_test.c | 7 +- lib/pengine/unpack.c | 1545 +++++----- lib/pengine/utils.c | 90 +- lib/services/Makefile.am | 2 +- lib/services/services.c | 12 +- lib/services/services_linux.c | 50 +- lib/services/services_lsb.c | 191 +- lib/services/services_nagios.c | 8 +- lib/services/systemd.c | 65 +- lib/services/upstart.c | 55 +- m4/version.m4 | 2 +- mk/tap.mk | 17 +- mk/unittest.mk | 15 +- po/zh_CN.po | 1510 ++++++---- python/Makefile.am | 12 +- python/pacemaker/__init__.py | 6 +- python/pacemaker/_cts/CTS.py | 137 +- python/pacemaker/_cts/__init__.py | 6 +- python/pacemaker/_cts/audits.py | 350 ++- python/pacemaker/_cts/cib.py | 65 +- python/pacemaker/_cts/cibxml.py | 547 ++-- python/pacemaker/_cts/clustermanager.py | 212 +- python/pacemaker/_cts/cmcorosync.py | 17 +- python/pacemaker/_cts/corosync.py | 52 +- python/pacemaker/_cts/environment.py | 128 +- python/pacemaker/_cts/errors.py | 22 +- python/pacemaker/_cts/input.py | 8 +- python/pacemaker/_cts/logging.py | 45 +- python/pacemaker/_cts/network.py | 34 +- python/pacemaker/_cts/patterns.py | 188 +- python/pacemaker/_cts/process.py | 18 +- python/pacemaker/_cts/remote.py | 143 +- python/pacemaker/_cts/scenarios.py | 154 +- python/pacemaker/_cts/test.py | 295 +- python/pacemaker/_cts/tests/__init__.py | 14 +- python/pacemaker/_cts/tests/componentfail.py | 23 +- python/pacemaker/_cts/tests/ctstest.py | 112 +- python/pacemaker/_cts/tests/fliptest.py | 18 +- python/pacemaker/_cts/tests/maintenancemode.py | 38 +- python/pacemaker/_cts/tests/nearquorumpointtest.py | 30 +- python/pacemaker/_cts/tests/partialstart.py | 23 +- python/pacemaker/_cts/tests/reattach.py | 51 +- python/pacemaker/_cts/tests/remotebasic.py | 18 +- python/pacemaker/_cts/tests/remotedriver.py | 152 +- python/pacemaker/_cts/tests/remotemigrate.py | 21 +- python/pacemaker/_cts/tests/remoterscfailure.py | 26 +- python/pacemaker/_cts/tests/remotestonithd.py | 26 +- python/pacemaker/_cts/tests/resourcerecover.py | 35 +- python/pacemaker/_cts/tests/restartonebyone.py | 18 +- python/pacemaker/_cts/tests/restarttest.py | 18 +- python/pacemaker/_cts/tests/resynccib.py | 23 +- python/pacemaker/_cts/tests/simulstart.py | 18 +- python/pacemaker/_cts/tests/simulstartlite.py | 31 +- python/pacemaker/_cts/tests/simulstop.py | 18 +- python/pacemaker/_cts/tests/simulstoplite.py | 33 +- python/pacemaker/_cts/tests/splitbraintest.py | 36 +- python/pacemaker/_cts/tests/standbytest.py | 22 +- python/pacemaker/_cts/tests/startonebyone.py | 18 +- python/pacemaker/_cts/tests/starttest.py | 24 +- python/pacemaker/_cts/tests/stonithdtest.py | 24 +- python/pacemaker/_cts/tests/stoponebyone.py | 18 +- python/pacemaker/_cts/tests/stoptest.py | 26 +- python/pacemaker/_cts/timer.py | 44 +- python/pacemaker/_cts/watcher.py | 489 ++-- python/pacemaker/buildoptions.py.in | 49 +- python/pacemaker/exitstatus.py | 99 +- python/tests/test_cts_network.py | 1 + python/tests/test_exitstatus.py | 1 + rpm/Makefile.am | 11 +- rpm/pacemaker.spec.in | 11 +- rpm/rpmlintrc | 9 - tools/attrd_updater.c | 12 +- tools/cibadmin.c | 209 +- tools/cibsecret.in | 2 +- tools/crm_attribute.c | 316 ++- tools/crm_diff.c | 50 +- tools/crm_error.c | 4 +- tools/crm_mon.c | 324 ++- tools/crm_mon.h | 3 +- tools/crm_mon_curses.c | 28 +- tools/crm_node.c | 87 +- tools/crm_resource.c | 1101 ++++---- tools/crm_resource.h | 26 +- tools/crm_resource_ban.c | 188 +- tools/crm_resource_print.c | 271 +- tools/crm_resource_runtime.c | 779 +++--- tools/crm_rule.c | 15 +- tools/crm_shadow.c | 94 +- tools/crm_simulate.c | 52 +- tools/crm_ticket.c | 614 +--- tools/crm_verify.c | 174 +- tools/crmadmin.c | 14 +- tools/stonith_admin.c | 23 +- xml/Makefile.am | 63 +- xml/README.md | 2 + xml/alerts-3.10.rng | 82 + xml/api/crm_attribute-2.34.rng | 33 + xml/api/crm_attribute-2.36.rng | 33 + xml/api/crm_mon-2.35.rng | 197 ++ xml/api/crm_resource-2.36.rng | 289 ++ xml/api/crm_resource-2.37.rng | 348 +++ xml/api/crm_ticket-2.35.rng | 34 + xml/api/ocf-ra-1.1.rng | 221 ++ xml/api/options-2.34.rng | 37 + xml/api/options-2.36.rng | 46 + xml/api/stonith_admin-2.33.rng | 55 + xml/api/ticket-2.35.rng | 68 + xml/best-match.sh | 98 - xml/constraints-3.10.rng | 287 ++ xml/constraints-next.rng | 4 +- xml/nodes-3.10.rng | 46 + xml/nvset-3.10.rng | 68 + xml/options-3.10.rng | 133 + xml/resources-3.10.rng | 451 +++ xml/rng-helper.in | 251 ++ xml/rule-3.10.rng | 433 +++ xml/version-diff.sh.in | 60 - 636 files changed, 56925 insertions(+), 29378 deletions(-) create mode 100644 SECURITY.md create mode 100644 cts/cli/tickets.xml create mode 100644 daemons/execd/remoted_schemas.c create mode 100644 daemons/pacemakerd/pcmkd_corosync.h create mode 100644 doc/sphinx/Pacemaker_Administration/options.rst create mode 100644 doc/sphinx/Pacemaker_Development/documentation.rst create mode 100644 doc/sphinx/Pacemaker_Development/glossary.rst create mode 100644 include/crm/cib_compat.h delete mode 100644 include/crm/common/attrd_internal.h create mode 100644 include/crm/common/attrs_internal.h create mode 100644 include/crm/common/bundles_internal.h create mode 100644 include/crm/common/history_internal.h create mode 100644 include/crm/common/location_internal.h create mode 100644 include/crm/common/nodes_internal.h create mode 100644 include/crm/common/nvpair_internal.h create mode 100644 include/crm/common/options.h create mode 100644 include/crm/common/primitive_internal.h create mode 100644 include/crm/common/resources_internal.h create mode 100644 include/crm/common/rules.h create mode 100644 include/crm/common/rules_internal.h create mode 100644 include/crm/common/schemas.h create mode 100644 include/crm/common/schemas_internal.h create mode 100644 include/crm/common/scores.h create mode 100644 include/crm/common/scores_compat.h create mode 100644 include/crm/common/scores_internal.h create mode 100644 include/crm/common/xml_io.h create mode 100644 include/crm/common/xml_io_compat.h create mode 100644 include/crm/common/xml_io_internal.h create mode 100644 include/crm/common/xml_names.h create mode 100644 include/crm/common/xml_names_internal.h create mode 100644 include/crm/lrmd_compat.h create mode 100644 include/crm/pengine/status_compat.h create mode 100644 include/pcmki/pcmki_options.h create mode 100644 include/pcmki/pcmki_ticket.h create mode 100644 include/pcmki/pcmki_verify.h create mode 100644 lib/cluster/tests/Makefile.am create mode 100644 lib/cluster/tests/cluster/Makefile.am create mode 100644 lib/cluster/tests/cluster/pcmk_cluster_set_destroy_fn_test.c create mode 100644 lib/cluster/tests/cpg/Makefile.am create mode 100644 lib/cluster/tests/cpg/pcmk_cpg_set_confchg_fn_test.c create mode 100644 lib/cluster/tests/cpg/pcmk_cpg_set_deliver_fn_test.c create mode 100644 lib/common/options_display.c create mode 100644 lib/common/probes.c create mode 100644 lib/common/resources.c create mode 100644 lib/common/roles.c create mode 100644 lib/common/rules.c delete mode 100644 lib/common/tests/actions/copy_in_properties_test.c delete mode 100644 lib/common/tests/actions/expand_plus_plus_test.c delete mode 100644 lib/common/tests/actions/fix_plus_plus_recursive_test.c delete mode 100644 lib/common/tests/actions/pcmk_is_probe_test.c delete mode 100644 lib/common/tests/actions/pcmk_xe_is_probe_test.c delete mode 100644 lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c create mode 100644 lib/common/tests/iso8601/pcmk__add_time_from_xml_test.c create mode 100644 lib/common/tests/iso8601/pcmk__set_time_if_earlier_test.c create mode 100644 lib/common/tests/nodes/Makefile.am create mode 100644 lib/common/tests/nodes/pcmk__find_node_in_list_test.c create mode 100644 lib/common/tests/nodes/pcmk__xe_add_node_test.c create mode 100644 lib/common/tests/nodes/pcmk_foreach_active_resource_test.c create mode 100644 lib/common/tests/nodes/pcmk_node_is_clean_test.c create mode 100644 lib/common/tests/nodes/pcmk_node_is_in_maintenance_test.c create mode 100644 lib/common/tests/nodes/pcmk_node_is_online_test.c create mode 100644 lib/common/tests/nodes/pcmk_node_is_pending_test.c create mode 100644 lib/common/tests/nodes/pcmk_node_is_shutting_down_test.c create mode 100644 lib/common/tests/nvpair/crm_meta_name_test.c create mode 100644 lib/common/tests/nvpair/crm_meta_value_test.c create mode 100644 lib/common/tests/nvpair/pcmk__xe_get_datetime_test.c create mode 100644 lib/common/tests/probes/Makefile.am create mode 100644 lib/common/tests/probes/pcmk_is_probe_test.c create mode 100644 lib/common/tests/probes/pcmk_xe_is_probe_test.c create mode 100644 lib/common/tests/probes/pcmk_xe_mask_probe_failure_test.c create mode 100644 lib/common/tests/resources/Makefile.am create mode 100644 lib/common/tests/resources/pcmk_resource_id_test.c create mode 100644 lib/common/tests/resources/pcmk_resource_is_managed_test.c create mode 100644 lib/common/tests/rules/Makefile.am create mode 100644 lib/common/tests/rules/pcmk__cmp_by_type_test.c create mode 100644 lib/common/tests/rules/pcmk__evaluate_attr_expression_test.c create mode 100644 lib/common/tests/rules/pcmk__evaluate_condition_test.c create mode 100644 lib/common/tests/rules/pcmk__evaluate_date_expression_test.c create mode 100644 lib/common/tests/rules/pcmk__evaluate_date_spec_test.c create mode 100644 lib/common/tests/rules/pcmk__evaluate_op_expression_test.c create mode 100644 lib/common/tests/rules/pcmk__evaluate_rsc_expression_test.c create mode 100644 lib/common/tests/rules/pcmk__parse_combine_test.c create mode 100644 lib/common/tests/rules/pcmk__parse_comparison_test.c create mode 100644 lib/common/tests/rules/pcmk__parse_source_test.c create mode 100644 lib/common/tests/rules/pcmk__parse_type_test.c create mode 100644 lib/common/tests/rules/pcmk__replace_submatches_test.c create mode 100644 lib/common/tests/rules/pcmk__unpack_duration_test.c create mode 100644 lib/common/tests/rules/pcmk_evaluate_rule_test.c create mode 100644 lib/common/tests/scheduler/Makefile.am create mode 100644 lib/common/tests/scheduler/pcmk_get_dc_test.c create mode 100644 lib/common/tests/scheduler/pcmk_get_no_quorum_policy_test.c create mode 100644 lib/common/tests/scheduler/pcmk_has_quorum_test.c create mode 100644 lib/common/tests/scheduler/pcmk_set_scheduler_cib_test.c create mode 100644 lib/common/tests/schemas/Makefile.am create mode 100644 lib/common/tests/schemas/crm_schema_init_test.c create mode 100644 lib/common/tests/schemas/pcmk__build_schema_xml_node_test.c create mode 100644 lib/common/tests/schemas/pcmk__cmp_schemas_by_name_test.c create mode 100644 lib/common/tests/schemas/pcmk__find_x_0_schema_test.c create mode 100644 lib/common/tests/schemas/pcmk__get_schema_test.c create mode 100644 lib/common/tests/schemas/pcmk__schema_files_later_than_test.c delete mode 100644 lib/common/tests/strings/pcmk__char_in_any_str_test.c delete mode 100644 lib/common/tests/utils/crm_meta_name_test.c delete mode 100644 lib/common/tests/utils/crm_meta_value_test.c create mode 100644 lib/common/tests/utils/pcmk__realloc_test.c delete mode 100644 lib/common/tests/utils/pcmk_hostname_test.c create mode 100644 lib/common/tests/xml/crm_xml_init_test.c create mode 100644 lib/common/tests/xml/pcmk__xe_copy_attrs_test.c create mode 100644 lib/common/tests/xml/pcmk__xe_first_child_test.c delete mode 100644 lib/common/tests/xml/pcmk__xe_match_test.c create mode 100644 lib/common/tests/xml/pcmk__xe_set_score_test.c create mode 100644 lib/common/tests/xml/pcmk__xml_escape_test.c create mode 100644 lib/common/tests/xml/pcmk__xml_needs_escape_test.c create mode 100644 lib/common/unittest.c create mode 100644 lib/common/xml_io.c create mode 100644 lib/pacemaker/pcmk_options.c create mode 100644 lib/pacemaker/pcmk_setup.c create mode 100644 lib/pacemaker/pcmk_ticket.c create mode 100644 lib/pacemaker/pcmk_verify.c create mode 100644 lib/pacemaker/tests/Makefile.am create mode 100644 lib/pacemaker/tests/pcmk_resource/Makefile.am create mode 100644 lib/pacemaker/tests/pcmk_resource/pcmk_resource_delete_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/Makefile.am create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk__get_ticket_state_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_constraints_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_delete_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_get_attr_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_info_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_remove_attr_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_set_attr_test.c create mode 100644 lib/pacemaker/tests/pcmk_ticket/pcmk_ticket_state_test.c delete mode 100644 lib/pengine/tests/rules/Makefile.am delete mode 100644 lib/pengine/tests/rules/pe_cron_range_satisfied_test.c delete mode 100644 lib/pengine/tests/status/pe_find_node_test.c create mode 100644 xml/alerts-3.10.rng create mode 100644 xml/api/crm_attribute-2.34.rng create mode 100644 xml/api/crm_attribute-2.36.rng create mode 100644 xml/api/crm_mon-2.35.rng create mode 100644 xml/api/crm_resource-2.36.rng create mode 100644 xml/api/crm_resource-2.37.rng create mode 100644 xml/api/crm_ticket-2.35.rng create mode 100644 xml/api/ocf-ra-1.1.rng create mode 100644 xml/api/options-2.34.rng create mode 100644 xml/api/options-2.36.rng create mode 100644 xml/api/stonith_admin-2.33.rng create mode 100644 xml/api/ticket-2.35.rng delete mode 100755 xml/best-match.sh create mode 100644 xml/constraints-3.10.rng create mode 100644 xml/nodes-3.10.rng create mode 100644 xml/nvset-3.10.rng create mode 100644 xml/options-3.10.rng create mode 100644 xml/resources-3.10.rng create mode 100755 xml/rng-helper.in create mode 100644 xml/rule-3.10.rng delete mode 100644 xml/version-diff.sh.in diff --git a/COPYING b/COPYING index 1bf7219..7ffee16 100644 --- a/COPYING +++ b/COPYING @@ -10,5 +10,4 @@ The text of these licenses are provided in the "licenses" subdirectory. If you find any deviations from this policy, or wish to inquire about alternate licensing arrangements, please e-mail the developers@ClusterLabs.org mailing -list. Licensing issues are further discussed on the ClusterLabs wiki -(at https://wiki.clusterlabs.org/wiki/License). +list. diff --git a/ChangeLog b/ChangeLog index e5ecf98..8211238 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,465 @@ +* Wed May 15 2024 Ken Gaillot Pacemaker-2.1.8-rc1 +- 2480 commits with 507 files changed, 45891 insertions(+), 22991 deletions(-) + +- Features added since Pacemaker-2.1.7 + + CIB: deprecate omitting validate-with from the CIB or setting it to "none" + or an unknown schema + + CIB: deprecate "default" and "#default" as explicit meta-attribute values + + CIB: deprecate resource-discovery-enabled node attribute + + CIB: deprecate support for multiple top-level rules within a location + constraint (a single rule may still contain multiple sub-rules) + + CIB: deprecate support for node attribute expressions in rules beneath op, + op_defaults, or fence device meta-attributes + + CIB: deprecate support for rkt in bundles + + CIB: drop support for (nonworking) rules based on the #role node attribute + (role-based location constraints may still contain rules) + + CIB manager,controller,fencer,scheduler: deprecate "metadata" command-line + option (instead, use crm_attribute --list-options mentioned below) + + pacemaker-remoted: newer schema files are now downloaded from the cluster, + allowing more command-line tools to work when the Pacemaker Remote node + has an older Pacemaker version + + agents: deprecate the ocf:pacemaker:o2cb resource agent + + tools: crm_attribute --list-options lists all possible cluster options + + tools: crm_resource --list-options lists all possible primitive + meta-attributes or special fence device parameters + + tools: new --score option for cibadmin --modify and crm_attribute --update + enables expansion of "++" and "+=" in attribute values without a warning + (using such expansions without --score is now deprecated) + + tools: crm_ticket supports standard --output-as/--output-to arguments + + tools: deprecate --text-fancy standard command-line option + +- Fixes since Pacemaker-2.1.7 + + tools: restore the (deprecated) ability to automatically correct malformed + XML passed via standard input (regression introduced in 2.1.7) + + CIB: restore the (deprecated) ability to use validate-with="pacemaker-next" + (regression introduced in 2.1.6) + + controller: avoid zombie children when asynchronous actions exit while a + synchronous meta-data action is in progress + (regression introduced in 2.1.5) + + libcrmcommon: avoid file descriptor leak in asynchronous IPC clients + (regression introduced in 2.1.3) + + tools: crm_mon no longer crashes on some platforms when the fencer + connection is lost (regression introduced in 2.1.0) + + attribute manager: write Pacemaker Remote node attributes even if node is + not cached + + attribute manager: avoid use-after-free when remote node in cluster node + cache + + attribute manager: correctly propagate utilization attributes to peers + to avoid the possibility of later being written out as regular node + attributes + + fencer: correctly parse action-specific timeouts with units other than + seconds + + fencer: avoid unnecessary timeouts when the watchdog timeout is greater + than a query timeout, per-device fencing timeout, or stonith-timeout + + libcrmcommon: don't assume next schema will validate when not transforming + + libcrmcommon: when displaying XML, don't show "" for empty attribute + values, and properly escape special characters + + scheduler: if the user specifies a timeout of 0, use the default 20s as + documented + + agents: ocf:pacemaker:SysInfo respects attrd_updater dampening + + agents: ocf:pacemaker:HealthSMART properly handles SMART data missing + temperature + + tools: cibadmin --replace now leaves "++" and "+=" unexpanded in XML + attribute values rather than wrongly treat them as 0 + + tools: cibsecret avoids possible truncation issue in process listing + + tools: crm_attribute --node localhost or --node auto works + + tools: crm_resource ignores resource meta-attribute node expressions + for consistency with how the cluster works + + tools: crm_resource honors rules when getting utilization attributes + + tools: crm_verify --output-as=xml includes detailed messages + + tools: crm_mon exits upon loss of an attached pseudo-terminal to avoid + possibility of 100% CPU usage (seen when run via sudo with use_pty + configured) + +- Public API changes since Pacemaker-2.1.7 + + libcib: add cib_score_update cib_call_options value + + libcib: deprecate functions cib_get_generation(), cib_metadata(), + cib_pref(), query_node_uname(), and set_standby() + + libcib: deprecate T_CIB_DIFF_NOTIFY + + libcib: deprecate `` element in CIB create reply + + libcrmcluster: add enum pcmk_cluster_layer + + libcrmcluster: add functions pcmk_cluster_connect(), + pcmk_cluster_disconnect(), pcmk_cluster_layer_text(), + pcmk_cluster_set_destroy_fn(), pcmk_cpg_set_confchg_fn(), + pcmk_cpg_set_deliver_fn(), and pcmk_get_cluster_layer() + + libcrmcluster: add type pcmk_cluster_t + + libcrmcluster: deprecate functions cluster_connect_cpg(), + cluster_disconnect_cpg(), crm_active_peers(), crm_cluster_connect(), + crm_cluster_disconnect(), crm_get_peer(), crm_get_peer_full(), + crm_is_corosync_peer_active(), crm_is_peer_active(), crm_join_phase_str(), + crm_peer_destroy(), crm_peer_init(), crm_peer_uname(), + crm_peer_uuid(), crm_remote_node_cache_size(), + crm_remote_peer_cache_refresh(), crm_remote_peer_cache_remove(), + crm_remote_peer_get(), crm_set_autoreap(), crm_set_status_callback(), + get_cluster_type(), get_local_nodeid(), get_local_node_name(), + get_node_name(), is_corosync_cluster(), name_for_cluster_type(), + pcmk_cpg_membership(), pcmk_message_common_cs(), reap_crm_member(), + send_cluster_message(), send_cluster_text(), and text2msg_type() + + libcrmcluster: deprecate enums crm_ais_msg_types, crm_status_type, + cluster_type_e, crm_ais_msg_class, crm_get_peer_flags, crm_join_phase, + and crm_node_flags, including all their values + + libcrmcluster: deprecate global variables crm_have_quorum, crm_peer_cache, + crm_peer_seq, and crm_remote_peer_cache + + libcrmcluster: deprecate crm_cluster_t and struct crm_cluster_s, including + all its members + + libcrmcluster: deprecate crm_node_t and struct crm_peer_node_s, including + all its members + + libcrmcluster: deprecate constants CRM_NODE_LOST and CRM_NODE_MEMBER + + libcrmcommon: add constants PCMK_ACTION_METADATA, PCMK_META_ALLOW_MIGRATE, + PCMK_META_ALLOW_UNHEALTHY_NODES, PCMK_META_CONTAINER_ATTRIBUTE_TARGET, + PCMK_META_CRITICAL, PCMK_META_GLOBALLY_UNIQUE, PCMK_META_INTERLEAVE, + PCMK_META_INTERVAL, PCMK_META_INTERVAL_ORIGIN, PCMK_META_IS_MANAGED, + PCMK_META_MAINTENANCE, PCMK_META_MULTIPLE_ACTIVE, PCMK_META_NOTIFY, + PCMK_META_ON_FAIL, PCMK_META_ORDERED, PCMK_META_PRIORITY, + PCMK_META_PROMOTABLE, PCMK_META_RECORD_PENDING, PCMK_META_REMOTE_ADDR, + PCMK_META_REMOTE_ALLOW_MIGRATE, PCMK_META_REMOTE_CONNECT_TIMEOUT, + PCMK_META_REMOTE_NODE, PCMK_META_REMOTE_PORT, PCMK_META_REQUIRES, + PCMK_META_RESOURCE_STICKINESS, PCMK_META_START_DELAY, + PCMK_META_TARGET_ROLE, PCMK_META_TIMEOUT, PCMK_META_TIMESTAMP_FORMAT, + PCMK_NODE_ATTR_MAINTENANCE, PCMK_NODE_ATTR_STANDBY, PCMK_OPT_BATCH_LIMIT, + PCMK_OPT_CLUSTER_DELAY, PCMK_OPT_CLUSTER_INFRASTRUCTURE, + PCMK_OPT_CLUSTER_IPC_LIMIT, PCMK_OPT_CLUSTER_NAME, + PCMK_OPT_CLUSTER_RECHECK_INTERVAL, PCMK_OPT_CONCURRENT_FENCING, + PCMK_OPT_DC_DEADTIME, PCMK_OPT_DC_VERSION, PCMK_OPT_ELECTION_TIMEOUT, + PCMK_OPT_ENABLE_ACL, PCMK_OPT_ENABLE_STARTUP_PROBES, + PCMK_OPT_FENCE_REACTION, PCMK_OPT_HAVE_WATCHDOG, + PCMK_OPT_JOIN_FINALIZATION_TIMEOUT, PCMK_OPT_JOIN_INTEGRATION_TIMEOUT, + PCMK_OPT_LOAD_THRESHOLD, PCMK_OPT_MAINTENANCE_MODE, + PCMK_OPT_MIGRATION_LIMIT, PCMK_OPT_NODE_ACTION_LIMIT, + PCMK_OPT_NODE_HEALTH_BASE, PCMK_OPT_NODE_HEALTH_GREEN, + PCMK_OPT_NODE_HEALTH_RED, PCMK_OPT_NODE_HEALTH_STRATEGY, + PCMK_OPT_NODE_HEALTH_YELLOW, PCMK_OPT_NODE_PENDING_TIMEOUT, + PCMK_OPT_NO_QUORUM_POLICY, PCMK_OPT_PE_ERROR_SERIES_MAX, + PCMK_OPT_PE_INPUT_SERIES_MAX, PCMK_OPT_PE_WARN_SERIES_MAX, + PCMK_OPT_PLACEMENT_STRATEGY, PCMK_OPT_PRIORITY_FENCING_DELAY, + PCMK_OPT_SHUTDOWN_ESCALATION, PCMK_OPT_SHUTDOWN_LOCK, + PCMK_OPT_SHUTDOWN_LOCK_LIMIT, PCMK_OPT_STARTUP_FENCING, + PCMK_OPT_START_FAILURE_IS_FATAL, PCMK_OPT_STONITH_ACTION, + PCMK_OPT_STONITH_ENABLED, PCMK_OPT_STONITH_MAX_ATTEMPTS, + PCMK_OPT_STONITH_TIMEOUT, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT, + PCMK_OPT_STOP_ALL_RESOURCES, PCMK_OPT_STOP_ORPHAN_ACTIONS, + PCMK_OPT_STOP_ORPHAN_RESOURCES, PCMK_OPT_SYMMETRIC_CLUSTER, + PCMK_OPT_TRANSITION_DELAY, PCMK_REMOTE_RA_ADDR, PCMK_REMOTE_RA_PORT, + PCMK_REMOTE_RA_RECONNECT_INTERVAL, PCMK_REMOTE_RA_SERVER, + PCMK_ROLE_PROMOTED, PCMK_ROLE_STARTED, PCMK_ROLE_STOPPED, + PCMK_ROLE_UNPROMOTED, PCMK_SCORE_INFINITY, PCMK_VALUE_ALWAYS, + PCMK_VALUE_AND, PCMK_VALUE_BALANCED, PCMK_VALUE_BLOCK, PCMK_VALUE_BOOLEAN, + PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, PCMK_VALUE_COROSYNC, PCMK_VALUE_CREATE, + PCMK_VALUE_CUSTOM, PCMK_VALUE_DATE_SPEC, PCMK_VALUE_DEFAULT, + PCMK_VALUE_DEFINED, PCMK_VALUE_DELETE, PCMK_VALUE_DEMOTE, PCMK_VALUE_DENY, + PCMK_VALUE_DURATION, PCMK_VALUE_DYNAMIC_LIST, PCMK_VALUE_EQ, + PCMK_VALUE_EXCLUSIVE, PCMK_VALUE_FAILED, PCMK_VALUE_FALSE, + PCMK_VALUE_FENCE, PCMK_VALUE_FENCE_LEGACY, PCMK_VALUE_FENCING, + PCMK_VALUE_FREEZE, PCMK_VALUE_GRANTED, PCMK_VALUE_GREEN, PCMK_VALUE_GT, + PCMK_VALUE_GTE, PCMK_VALUE_HOST, PCMK_VALUE_IGNORE, PCMK_VALUE_IN_RANGE, + PCMK_VALUE_INFINITY, PCMK_VALUE_INTEGER, PCMK_VALUE_LITERAL, + PCMK_VALUE_LT, PCMK_VALUE_LTE, PCMK_VALUE_MANDATORY, PCMK_VALUE_MEMBER, + PCMK_VALUE_META, PCMK_VALUE_MIGRATE_ON_RED, PCMK_VALUE_MINIMAL, + PCMK_VALUE_MINUS_INFINITY, PCMK_VALUE_MODIFY, PCMK_VALUE_MOVE, + PCMK_VALUE_NE, PCMK_VALUE_NEVER, PCMK_VALUE_NONE, + PCMK_VALUE_NONNEGATIVE_INTEGER, PCMK_VALUE_NOTHING, + PCMK_VALUE_NOT_DEFINED, PCMK_VALUE_NUMBER, PCMK_VALUE_OFFLINE, + PCMK_VALUE_ONLINE, PCMK_VALUE_ONLY_GREEN, PCMK_VALUE_OPTIONAL, + PCMK_VALUE_OR, PCMK_VALUE_PANIC, PCMK_VALUE_PARAM, PCMK_VALUE_PENDING, + PCMK_VALUE_PERCENTAGE, PCMK_VALUE_PLUS_INFINITY, PCMK_VALUE_PORT, + PCMK_VALUE_PROGRESSIVE, PCMK_VALUE_QUORUM, PCMK_VALUE_READ, + PCMK_VALUE_RED, PCMK_VALUE_REMOTE, PCMK_VALUE_RESTART, + PCMK_VALUE_RESTART_CONTAINER, PCMK_VALUE_REVOKED, PCMK_VALUE_SCORE, + PCMK_VALUE_SELECT, PCMK_VALUE_SERIALIZE, PCMK_VALUE_STANDBY, + PCMK_VALUE_STATIC_LIST, PCMK_VALUE_STATUS, PCMK_VALUE_STOP, + PCMK_VALUE_STOP_ONLY, PCMK_VALUE_STOP_START, PCMK_VALUE_STOP_UNEXPECTED, + PCMK_VALUE_STRING, PCMK_VALUE_SUCCESS, PCMK_VALUE_TIMEOUT, + PCMK_VALUE_TRUE, PCMK_VALUE_UNFENCING, PCMK_VALUE_UNKNOWN, + PCMK_VALUE_UTILIZATION, PCMK_VALUE_VERSION, PCMK_VALUE_WRITE, + PCMK_VALUE_YELLOW, PCMK_XA_ACTION, PCMK_XA_ACTIVE, PCMK_XA_ADD_HOST, + PCMK_XA_ADMIN_EPOCH, PCMK_XA_ADVANCED, PCMK_XA_AGENT, PCMK_XA_API_VERSION, + PCMK_XA_ATTRIBUTE, PCMK_XA_AUTHOR, PCMK_XA_AUTOMATIC, PCMK_XA_BLOCKED, + PCMK_XA_BOOLEAN_OP, PCMK_XA_BUILD, PCMK_XA_CACHED, PCMK_XA_CALL, + PCMK_XA_CIB_LAST_WRITTEN, PCMK_XA_CIB_NODE, PCMK_XA_CLASS, PCMK_XA_CLIENT, + PCMK_XA_CODE, PCMK_XA_COMMENT, PCMK_XA_COMPLETED, PCMK_XA_CONTROL_PORT, + PCMK_XA_COUNT, PCMK_XA_CRMD, PCMK_XA_CRM_DEBUG_ORIGIN, + PCMK_XA_CRM_FEATURE_SET, PCMK_XA_CRM_TIMESTAMP, PCMK_XA_DAYS, + PCMK_XA_DC_UUID, PCMK_XA_DEFAULT, PCMK_XA_DELEGATE, PCMK_XA_DESCRIPTION, + PCMK_XA_DEST, PCMK_XA_DEVICE, PCMK_XA_DEVICES, PCMK_XA_DISABLED, + PCMK_XA_DURATION, PCMK_XA_END, PCMK_XA_EPOCH, PCMK_XA_EXEC, + PCMK_XA_EXECUTION_CODE, PCMK_XA_EXECUTION_DATE, PCMK_XA_EXECUTION_MESSAGE, + PCMK_XA_EXEC_TIME, PCMK_XA_EXITCODE, PCMK_XA_EXITREASON, + PCMK_XA_EXITSTATUS, PCMK_XA_EXIT_REASON, PCMK_XA_EXPECTED, + PCMK_XA_EXPECTED_UP, PCMK_XA_EXTENDED_STATUS, PCMK_XA_FAILED, + PCMK_XA_FAILURE_IGNORED, PCMK_XA_FAIL_COUNT, PCMK_XA_FEATURES, + PCMK_XA_FEATURE_SET, PCMK_XA_FILE, PCMK_XA_FIRST, PCMK_XA_FIRST_ACTION, + PCMK_XA_FOR, PCMK_XA_FUNCTION, PCMK_XA_GENERATED, PCMK_XA_HASH, + PCMK_XA_HAVE_QUORUM, PCMK_XA_HEALTH, PCMK_XA_HOST, PCMK_XA_HOST_INTERFACE, + PCMK_XA_HOST_NETMASK, PCMK_XA_HOURS, PCMK_XA_ID, PCMK_XA_ID_AS_RESOURCE, + PCMK_XA_ID_REF, PCMK_XA_IMAGE, PCMK_XA_INDEX, PCMK_XA_INFLUENCE, + PCMK_XA_INSTANCE, PCMK_XA_INTERNAL_PORT, PCMK_XA_INTERVAL, + PCMK_XA_IP_RANGE_START, PCMK_XA_IS_DC, PCMK_XA_KIND, PCMK_XA_LANG, + PCMK_XA_LAST_FAILURE, PCMK_XA_LAST_GRANTED, PCMK_XA_LAST_RC_CHANGE, + PCMK_XA_LAST_UPDATED, PCMK_XA_LOCKED_TO, PCMK_XA_LOCKED_TO_HYPHEN, + PCMK_XA_LOSS_POLICY, PCMK_XA_MAINTENANCE, PCMK_XA_MAINTENANCE_MODE, + PCMK_XA_MANAGED, PCMK_XA_MESSAGE, PCMK_XA_MINUTES, PCMK_XA_MIXED_VERSION, + PCMK_XA_MONTHDAYS, PCMK_XA_MONTHS, PCMK_XA_MULTI_STATE, PCMK_XA_NAME, + PCMK_XA_NETWORK, PCMK_XA_NEXT_ROLE, PCMK_XA_NODE, PCMK_XA_NODEID, + PCMK_XA_NODES_RUNNING_ON, PCMK_XA_NODE_ATTRIBUTE, PCMK_XA_NODE_NAME, + PCMK_XA_NODE_PATH, PCMK_XA_NO_QUORUM_PANIC, PCMK_XA_NO_QUORUM_POLICY, + PCMK_XA_NUMBER, PCMK_XA_NUMBER_RESOURCES, PCMK_XA_NUM_UPDATES, + PCMK_XA_OBJECT_TYPE, PCMK_XA_ONLINE, PCMK_XA_ON_TARGET, PCMK_XA_OP, + PCMK_XA_OPERATION, PCMK_XA_OPTIONS, PCMK_XA_OP_KEY, PCMK_XA_ORIGIN, + PCMK_XA_ORPHAN, PCMK_XA_ORPHANED, PCMK_XA_PACEMAKERD_STATE, PCMK_XA_PATH, + PCMK_XA_PENDING, PCMK_XA_PORT, PCMK_XA_PRESENT, + PCMK_XA_PRIORITY_FENCING_DELAY_MS, PCMK_XA_PROGRAM, PCMK_XA_PROMOTABLE, + PCMK_XA_PROMOTED_MAX, PCMK_XA_PROMOTED_ONLY, PCMK_XA_PROVIDER, + PCMK_XA_QUEUED, PCMK_XA_QUEUE_TIME, PCMK_XA_QUORUM, PCMK_XA_RANGE, + PCMK_XA_RC, PCMK_XA_RC_TEXT, PCMK_XA_REASON, PCMK_XA_REFERENCE, + PCMK_XA_RELOADABLE, PCMK_XA_REMAIN_STOPPED, PCMK_XA_REMOTE_CLEAR_PORT, + PCMK_XA_REMOTE_NODE, PCMK_XA_REMOTE_TLS_PORT, PCMK_XA_REPLICAS, + PCMK_XA_REPLICAS_PER_HOST, PCMK_XA_REQUEST, PCMK_XA_REQUIRE_ALL, + PCMK_XA_RESOURCE, PCMK_XA_RESOURCES_RUNNING, PCMK_XA_RESOURCE_AGENT, + PCMK_XA_RESOURCE_DISCOVERY, PCMK_XA_RESULT, PCMK_XA_ROLE, PCMK_XA_RSC, + PCMK_XA_RSC_PATTERN, PCMK_XA_RSC_ROLE, PCMK_XA_RULE_ID, PCMK_XA_RUNNING, + PCMK_XA_RUNNING_ON, PCMK_XA_RUN_COMMAND, PCMK_XA_SCOPE, PCMK_XA_SCORE, + PCMK_XA_SCORE_ATTRIBUTE, PCMK_XA_SECONDS, PCMK_XA_SEQUENTIAL, + PCMK_XA_SHUTDOWN, PCMK_XA_SOURCE, PCMK_XA_SOURCE_DIR, + PCMK_XA_SOURCE_DIR_ROOT, PCMK_XA_SPEC, PCMK_XA_STANDARD, PCMK_XA_STANDBY, + PCMK_XA_STANDBY_ONFAIL, PCMK_XA_START, PCMK_XA_STATE, PCMK_XA_STATUS, + PCMK_XA_STONITH_ENABLED, PCMK_XA_STONITH_TIMEOUT_MS, + PCMK_XA_STOP_ALL_RESOURCES, PCMK_XA_SYMMETRICAL, + PCMK_XA_SYMMETRIC_CLUSTER, PCMK_XA_SYS_FROM, PCMK_XA_TAG, PCMK_XA_TARGET, + PCMK_XA_TARGET_ATTRIBUTE, PCMK_XA_TARGET_DIR, PCMK_XA_TARGET_PATTERN, + PCMK_XA_TARGET_ROLE, PCMK_XA_TARGET_VALUE, PCMK_XA_TASK, PCMK_XA_TEMPLATE, + PCMK_XA_THEN, PCMK_XA_THEN_ACTION, PCMK_XA_TICKET, PCMK_XA_TIME, + PCMK_XA_TYPE, PCMK_XA_UNAME, PCMK_XA_UNCLEAN, PCMK_XA_UNHEALTHY, + PCMK_XA_UNIQUE, PCMK_XA_UNMANAGED, PCMK_XA_UPDATE_CLIENT, + PCMK_XA_UPDATE_ORIGIN, PCMK_XA_UPDATE_USER, PCMK_XA_USER, PCMK_XA_VALID, + PCMK_XA_VALIDATE_WITH, PCMK_XA_VALUE, PCMK_XA_VALUE_SOURCE, + PCMK_XA_VERSION, PCMK_XA_WATCHDOG, PCMK_XA_WEEKDAYS, PCMK_XA_WEEKS, + PCMK_XA_WEEKYEARS, PCMK_XA_WEIGHT, PCMK_XA_WHEN, PCMK_XA_WITH_QUORUM, + PCMK_XA_WITH_RSC, PCMK_XA_WITH_RSC_ROLE, PCMK_XA_XPATH, PCMK_XA_YEARDAYS, + PCMK_XA_YEARS, PCMK_XE_ACLS, PCMK_XE_ACL_GROUP, PCMK_XE_ACL_PERMISSION, + PCMK_XE_ACL_ROLE, PCMK_XE_ACL_TARGET, PCMK_XE_ACTION, PCMK_XE_ACTIONS, + PCMK_XE_AGENT, PCMK_XE_AGENTS, PCMK_XE_AGENT_STATUS, PCMK_XE_ALERT, + PCMK_XE_ALERTS, PCMK_XE_ALLOCATIONS, PCMK_XE_ALLOCATIONS_UTILIZATIONS, + PCMK_XE_ATTRIBUTE, PCMK_XE_BAN, PCMK_XE_BANS, PCMK_XE_BUNDLE, + PCMK_XE_CAPACITY, PCMK_XE_CHANGE, PCMK_XE_CHANGE_ATTR, + PCMK_XE_CHANGE_LIST, PCMK_XE_CHANGE_RESULT, PCMK_XE_CHECK, PCMK_XE_CIB, + PCMK_XE_CLONE, PCMK_XE_CLUSTER_ACTION, PCMK_XE_CLUSTER_INFO, + PCMK_XE_CLUSTER_OPTIONS, PCMK_XE_CLUSTER_PROPERTY_SET, + PCMK_XE_CLUSTER_STATUS, PCMK_XE_COMMAND, PCMK_XE_CONFIGURATION, + PCMK_XE_CONSTRAINTS, PCMK_XE_CONTENT, PCMK_XE_CRM_CONFIG, PCMK_XE_CRM_MON, + PCMK_XE_CRM_MON_DISCONNECTED, PCMK_XE_CURRENT_DC, PCMK_XE_DATE_SPEC, + PCMK_XE_DC, PCMK_XE_DEPRECATED, PCMK_XE_DIFF, PCMK_XE_DIGEST, + PCMK_XE_DIGESTS, PCMK_XE_DOCKER, PCMK_XE_DURATION, PCMK_XE_ERROR, + PCMK_XE_ERRORS, PCMK_XE_EXPRESSION, PCMK_XE_FAILURE, PCMK_XE_FAILURES, + PCMK_XE_FEATURE, PCMK_XE_FEATURES, PCMK_XE_FENCE_EVENT, + PCMK_XE_FENCE_HISTORY, PCMK_XE_FENCING_ACTION, PCMK_XE_FENCING_LEVEL, + PCMK_XE_FENCING_TOPOLOGY, PCMK_XE_GROUP, PCMK_XE_INJECT_ATTR, + PCMK_XE_INJECT_SPEC, PCMK_XE_INSTANCE_ATTRIBUTES, PCMK_XE_INSTRUCTION, + PCMK_XE_ITEM, PCMK_XE_LAST_CHANGE, PCMK_XE_LAST_FENCED, + PCMK_XE_LAST_UPDATE, PCMK_XE_LIST, PCMK_XE_LONGDESC, PCMK_XE_METADATA, + PCMK_XE_META_ATTRIBUTES, PCMK_XE_MODIFICATIONS, PCMK_XE_MODIFY_NODE, + PCMK_XE_MODIFY_TICKET, PCMK_XE_NETWORK, PCMK_XE_NODE, PCMK_XE_NODES, + PCMK_XE_NODES_CONFIGURED, PCMK_XE_NODE_ACTION, PCMK_XE_NODE_ATTRIBUTES, + PCMK_XE_NODE_HISTORY, PCMK_XE_NODE_INFO, PCMK_XE_NODE_WEIGHT, + PCMK_XE_NVPAIR, PCMK_XE_OBJ_REF, PCMK_XE_OP, PCMK_XE_OPERATION, + PCMK_XE_OPERATIONS, PCMK_XE_OPERATION_HISTORY, PCMK_XE_OPTION, + PCMK_XE_OP_DEFAULTS, PCMK_XE_OUTPUT, PCMK_XE_OVERRIDE, PCMK_XE_OVERRIDES, + PCMK_XE_PACEMAKERD, PCMK_XE_PACEMAKER_RESULT, PCMK_XE_PARAMETER, + PCMK_XE_PARAMETERS, PCMK_XE_PODMAN, PCMK_XE_PORT_MAPPING, + PCMK_XE_POSITION, PCMK_XE_PRIMITIVE, PCMK_XE_PROMOTION_SCORE, + PCMK_XE_PROVIDER, PCMK_XE_PROVIDERS, PCMK_XE_PSEUDO_ACTION, + PCMK_XE_REASON, PCMK_XE_RECIPIENT, PCMK_XE_REPLICA, PCMK_XE_RESOURCE, + PCMK_XE_RESOURCES, PCMK_XE_RESOURCES_CONFIGURED, PCMK_XE_RESOURCE_AGENT, + PCMK_XE_RESOURCE_AGENT_ACTION, PCMK_XE_RESOURCE_CONFIG, + PCMK_XE_RESOURCE_HISTORY, PCMK_XE_RESOURCE_REF, PCMK_XE_RESOURCE_SET, + PCMK_XE_RESULT_CODE, PCMK_XE_REVISED_CLUSTER_STATUS, + PCMK_XE_ROLE, PCMK_XE_RSC_ACTION, PCMK_XE_RSC_COLOCATION, + PCMK_XE_RSC_DEFAULTS, PCMK_XE_RSC_LOCATION, PCMK_XE_RSC_ORDER, + PCMK_XE_RSC_TICKET, PCMK_XE_RULE, PCMK_XE_RULE_CHECK, PCMK_XE_SELECT, + PCMK_XE_SELECT_ATTRIBUTES, PCMK_XE_SELECT_FENCING, PCMK_XE_SELECT_NODES, + PCMK_XE_SELECT_RESOURCES, PCMK_XE_SHADOW, PCMK_XE_SHORTDESC, + PCMK_XE_SOURCE, PCMK_XE_SPECIAL, PCMK_XE_STACK, PCMK_XE_STATUS, + PCMK_XE_STORAGE, PCMK_XE_STORAGE_MAPPING, PCMK_XE_SUMMARY, PCMK_XE_TAG, + PCMK_XE_TAGS, PCMK_XE_TARGET, PCMK_XE_TEMPLATE, PCMK_XE_TICKET, + PCMK_XE_TICKETS, PCMK_XE_TIMING, PCMK_XE_TIMINGS, PCMK_XE_TRANSITION, + PCMK_XE_UTILIZATION, PCMK_XE_UTILIZATIONS, PCMK_XE_VALIDATE, + PCMK_XE_VERSION, PCMK_XE_XML, PCMK_XE_XML_PATCHSET, and + XML_CIB_TAG_ALERT_ATTRIBUTES + + libcrmcommon: add functions pcmk_action_text(), pcmk_find_node(), + pcmk_foreach_active_resource(), pcmk_get_dc(), pcmk_parse_interval_spec(), + pcmk_get_no_quorum_policy(), pcmk_has_quorum(), + pcmk_node_is_clean(), pcmk_update_configured_schema(), + pcmk_node_is_in_maintenance(), pcmk_node_is_online(), + pcmk_node_is_pending(), pcmk_node_is_shutting_down(), pcmk_on_fail_text(), + pcmk_parse_action(), pcmk_parse_role(), pcmk_resource_id(), + pcmk_resource_is_managed(), pcmk_role_text(), and pcmk_set_scheduler_cib() + + libcrmcommon: add type pcmk_rule_input_t + + libcrmcommon: deprecate globals crm_log_level, crm_trace_nonlog, + was_processing_error, and was_processing_warning + + libcrmcommon: deprecate functions add_message_xml(), add_node_copy(), + can_prune_leaf(), cli_config_update(), copy_in_properties(), copy_xml(), + create_hello_message(), pcmk_parse_action(), create_reply(), + create_reply_adv(), create_request(), create_request_adv(), + create_xml_node(), crm_map_element_name(), crm_next_same_xml(), + crm_parse_interval_spec(), crm_xml_escape(), diff_xml_object(), + dump_xml_formatted(), dump_xml_formatted_with_text(), + dump_xml_unformatted(), expand_plus_plus(), filename2xml(), + find_xml_children(), find_xml_node(), first_named_child(), + fix_plus_plus_recursive(), get_message_xml(), get_schema_name(), + get_schema_version(), get_xpath_object_relative(), ID(), + pcmk_action_text(), pcmk_create_html_node(), pcmk_create_xml_text_node(), + pcmk_hostname(), pcmk_on_fail_text(), purge_diff_markers(), + replace_xml_child(), stdin2xml(), string2xml(), subtract_xml_object(), + update_validation(), update_xml_child(), validate_xml(), + validate_xml_verbose(), write_xml_fd(), write_xml_file(), + xml_latest_schema(), and xml_remove_prop() + + libcrmcommon: deprecate constants CIB_OPTIONS_FIRST, CRM_INFINITY_S, + CRM_MINUS_INFINITY_S, CRM_OP_LOCAL_SHUTDOWN, CRM_PLUS_INFINITY_S, + CRM_SCORE_INFINITY, F_CLIENTNAME, F_CRM_DATA, F_CRM_DC_LEAVING, + F_CRM_ELECTION_AGE_S, F_CRM_ELECTION_AGE_US, F_CRM_ELECTION_ID, + F_CRM_ELECTION_OWNER, F_CRM_HOST_FROM, F_CRM_HOST_TO, F_CRM_JOIN_ID, + F_CRM_MSG_TYPE, F_CRM_ORIGIN, F_CRM_REFERENCE, F_CRM_SYS_FROM, + F_CRM_SYS_TO, F_CRM_TASK, F_CRM_TGRAPH, F_CRM_TGRAPH_INPUT, + F_CRM_THROTTLE_MAX, F_CRM_THROTTLE_MODE, F_CRM_USER, F_CRM_VERSION, + F_ORIG, F_SEQ, F_SUBTYPE, F_TYPE, F_XML_TAGNAME, INFINITY, INFINITY_S, + MINUS_INFINITY_S, OFFLINESTATUS, ONLINESTATUS, + PCMK_XA_PROMOTED_MAX_LEGACY, PCMK_XA_PROMOTED_NODE_MAX_LEGACY, + PCMK_XA_TARGET_PATTERN, PCMK_XA_UNAME, PCMK_XE_PROMOTABLE_LEGACY, + SUPPORT_UPSTART, T_ATTRD, T_CRM, XML_ACL_ATTR_ATTRIBUTE, + XML_ACL_ATTR_KIND, XML_ACL_ATTR_REF, XML_ACL_ATTR_REFv1, XML_ACL_ATTR_TAG, + XML_ACL_ATTR_TAGv1, XML_ACL_ATTR_XPATH, XML_ACL_TAG_DENY, + XML_ACL_TAG_GROUP, XML_ACL_TAG_PERMISSION, XML_ACL_TAG_READ, + XML_ACL_TAG_ROLE, XML_ACL_TAG_ROLE_REF, XML_ACL_TAG_ROLE_REFv1, + XML_ACL_TAG_USER, XML_ACL_TAG_USERv1, XML_ACL_TAG_WRITE, + XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER, XML_ALERT_ATTR_PATH, + XML_ALERT_ATTR_REC_VALUE, XML_ALERT_ATTR_TIMEOUT, + XML_ALERT_ATTR_TSTAMP_FORMAT, XML_ATTR_CRM_VERSION, XML_ATTR_DC_UUID, + XML_ATTR_DESC, XML_ATTR_DIGEST, XML_ATTR_GENERATION, + XML_ATTR_GENERATION_ADMIN, XML_ATTR_HAVE_QUORUM, XML_ATTR_HAVE_WATCHDOG, + XML_ATTR_ID, XML_ATTR_IDREF, XML_ATTR_ID_LONG, XML_ATTR_NAME, + XML_ATTR_NUMUPDATES, XML_ATTR_OP, XML_ATTR_ORIGIN, XML_ATTR_QUORUM_PANIC, + XML_ATTR_REFERENCE, XML_ATTR_REQUEST, XML_ATTR_RESPONSE, + XML_ATTR_STONITH_DEVICES, XML_ATTR_STONITH_INDEX, XML_ATTR_STONITH_TARGET, + XML_ATTR_STONITH_TARGET_ATTRIBUTE, XML_ATTR_STONITH_TARGET_VALUE, + XML_ATTR_TE_NOWAIT, XML_ATTR_TE_TARGET_RC, XML_ATTR_TIMEOUT, + XML_ATTR_TRANSITION_KEY, XML_ATTR_TRANSITION_MAGIC, XML_ATTR_TSTAMP, + XML_ATTR_TYPE, XML_ATTR_UPDATE_CLIENT, XML_ATTR_UPDATE_ORIGIN, + XML_ATTR_UPDATE_USER, XML_ATTR_VALIDATION, XML_ATTR_VERSION, + XML_BOOLEAN_FALSE, XML_BOOLEAN_NO, XML_BOOLEAN_TRUE, XML_BOOLEAN_YES, + XML_CIB_ATTR_PRIORITY, XML_CIB_ATTR_SHUTDOWN, XML_CIB_ATTR_WRITTEN, + XML_CIB_TAG_ACLS, XML_CIB_TAG_ALERT, XML_CIB_TAG_ALERTS, + XML_CIB_TAG_ALERT_ATTR, XML_CIB_TAG_ALERT_FENCING, + XML_CIB_TAG_ALERT_NODES, XML_CIB_TAG_ALERT_RECIPIENT, + XML_CIB_TAG_ALERT_RESOURCES, XML_CIB_TAG_ALERT_SELECT, + XML_CIB_TAG_CONFIGURATION, XML_CIB_TAG_CONSTRAINTS, XML_CIB_TAG_CONTAINER, + XML_CIB_TAG_CRMCONFIG, XML_CIB_TAG_GENERATION_TUPPLE, XML_CIB_TAG_GROUP, + XML_CIB_TAG_INCARNATION, XML_CIB_TAG_LRM, XML_CIB_TAG_NODE, + XML_CIB_TAG_NODES, XML_CIB_TAG_NVPAIR, XML_CIB_TAG_OBJ_REF, + XML_CIB_TAG_OPCONFIG, XML_CIB_TAG_PROPSET, XML_CIB_TAG_RESOURCE, + XML_CIB_TAG_RESOURCES, XML_CIB_TAG_RSCCONFIG, XML_CIB_TAG_RSC_TEMPLATE, + XML_CIB_TAG_SECTION_ALL, XML_CIB_TAG_STATE, XML_CIB_TAG_STATUS, + XML_CIB_TAG_TAG, XML_CIB_TAG_TAGS, XML_CIB_TAG_TICKETS, + XML_CIB_TAG_TICKET_STATE, XML_COLOC_ATTR_INFLUENCE, + XML_COLOC_ATTR_NODE_ATTR, XML_COLOC_ATTR_SOURCE, + XML_COLOC_ATTR_SOURCE_INSTANCE, XML_COLOC_ATTR_SOURCE_ROLE, + XML_COLOC_ATTR_TARGET, XML_COLOC_ATTR_TARGET_INSTANCE, + XML_COLOC_ATTR_TARGET_ROLE, XML_CONFIG_ATTR_DC_DEADTIME, + XML_CONFIG_ATTR_ELECTION_FAIL, XML_CONFIG_ATTR_FENCE_REACTION, + XML_CONFIG_ATTR_FORCE_QUIT, XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, + XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY, XML_CONFIG_ATTR_RECHECK, + XML_CONFIG_ATTR_SHUTDOWN_LOCK, XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, + XML_CONS_ATTR_SYMMETRICAL, XML_CONS_TAG_RSC_DEPEND, + XML_CONS_TAG_RSC_LOCATION, XML_CONS_TAG_RSC_ORDER, XML_CONS_TAG_RSC_SET, + XML_CONS_TAG_RSC_TICKET, XML_CRM_TAG_PING, XML_DIFF_ATTR, XML_DIFF_CHANGE, + XML_DIFF_LIST, XML_DIFF_MARKER, XML_DIFF_OP, XML_DIFF_PATH, + XML_DIFF_POSITION, XML_DIFF_RESULT, XML_DIFF_VERSION, XML_DIFF_VSOURCE, + XML_DIFF_VTARGET, XML_EXPR_ATTR_ATTRIBUTE, XML_EXPR_ATTR_OPERATION, + XML_EXPR_ATTR_TYPE, XML_EXPR_ATTR_VALUE, XML_EXPR_ATTR_VALUE_SOURCE, + XML_FAILCIB_ATTR_ID, XML_FAILCIB_ATTR_OBJTYPE, XML_FAILCIB_ATTR_OP, + XML_FAILCIB_ATTR_REASON, XML_FAIL_TAG_CIB, XML_GRAPH_TAG_CRM_EVENT, + XML_GRAPH_TAG_DOWNED, XML_GRAPH_TAG_MAINTENANCE, + XML_GRAPH_TAG_PSEUDO_EVENT, XML_GRAPH_TAG_RSC_OP, + XML_LOCATION_ATTR_DISCOVERY, XML_LOC_ATTR_SOURCE, + XML_LOC_ATTR_SOURCE_PATTERN, XML_LRM_ATTR_CALLID, + XML_LRM_ATTR_EXIT_REASON, XML_LRM_ATTR_INTERVAL, XML_LRM_ATTR_INTERVAL_MS, + XML_LRM_ATTR_MIGRATE_SOURCE, XML_LRM_ATTR_MIGRATE_TARGET, + XML_LRM_ATTR_OPSTATUS, XML_LRM_ATTR_OP_DIGEST, XML_LRM_ATTR_OP_RESTART, + XML_LRM_ATTR_OP_SECURE, XML_LRM_ATTR_RC, XML_LRM_ATTR_RESTART_DIGEST, + XML_LRM_ATTR_ROUTER_NODE, XML_LRM_ATTR_RSCID, XML_LRM_ATTR_SECURE_DIGEST, + XML_LRM_ATTR_TARGET, XML_LRM_ATTR_TARGET_UUID, XML_LRM_ATTR_TASK, + XML_LRM_ATTR_TASK_KEY, XML_LRM_TAG_RESOURCE, XML_LRM_TAG_RESOURCES, + XML_LRM_TAG_RSC_OP, XML_NODE_ATTR_RSC_DISCOVERY, XML_NODE_IS_FENCED, + XML_NODE_IS_MAINTENANCE, XML_NODE_IS_REMOTE, XML_NVPAIR_ATTR_NAME, + XML_NVPAIR_ATTR_VALUE, XML_OP_ATTR_ALLOW_MIGRATE, XML_OP_ATTR_DIGESTS_ALL, + XML_OP_ATTR_DIGESTS_SECURE, XML_OP_ATTR_INTERVAL_ORIGIN, + XML_OP_ATTR_ON_FAIL, XML_OP_ATTR_PENDING, XML_OP_ATTR_START_DELAY, + XML_ORDER_ATTR_FIRST, XML_ORDER_ATTR_FIRST_ACTION, XML_ORDER_ATTR_KIND, + XML_ORDER_ATTR_THEN, XML_ORDER_ATTR_THEN_ACTION, XML_PING_ATTR_CRMDSTATE, + XML_PING_ATTR_PACEMAKERDSTATE, XML_PING_ATTR_PACEMAKERDSTATE_INIT, + XML_PING_ATTR_PACEMAKERDSTATE_REMOTE, + XML_PING_ATTR_PACEMAKERDSTATE_RUNNING, + XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE, + XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN, + XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS, + XML_PING_ATTR_PACEMAKERDSTATE_WAITPING, XML_PING_ATTR_STATUS, + XML_PING_ATTR_SYSFROM, XML_REMOTE_ATTR_RECONNECT_INTERVAL, + XML_RSC_ATTR_CLEAR_INTERVAL, XML_RSC_ATTR_CLEAR_OP, + XML_RSC_ATTR_CONTAINER, XML_RSC_ATTR_CRITICAL, XML_RSC_ATTR_INCARNATION, + XML_RSC_ATTR_INTERLEAVE, XML_RSC_ATTR_INTERNAL_RSC, + XML_RSC_ATTR_MAINTENANCE, XML_RSC_ATTR_MANAGED, XML_RSC_ATTR_MULTIPLE, + XML_RSC_ATTR_NOTIFY, XML_RSC_ATTR_ORDERED, XML_RSC_ATTR_PROMOTABLE, + XML_RSC_ATTR_REMOTE_NODE, XML_RSC_ATTR_REMOTE_RA_ADDR, + XML_RSC_ATTR_REMOTE_RA_PORT, XML_RSC_ATTR_REMOTE_RA_SERVER, + XML_RSC_ATTR_REQUIRES, XML_RSC_ATTR_RESTART, XML_RSC_ATTR_STICKINESS, + XML_RSC_ATTR_TARGET, XML_RSC_ATTR_TARGET_ROLE, XML_RSC_ATTR_UNIQUE, + XML_RSC_OP_LAST_CHANGE, XML_RSC_OP_T_EXEC, XML_RSC_OP_T_QUEUE, + XML_RULE_ATTR_BOOLEAN_OP, XML_RULE_ATTR_ROLE, XML_RULE_ATTR_SCORE, + XML_RULE_ATTR_SCORE_ATTRIBUTE, XML_TAG_ATTRS, XML_TAG_ATTR_SETS, + XML_TAG_CIB, XML_TAG_DIFF, XML_TAG_EXPRESSION, XML_TAG_FAILED, + XML_TAG_FENCING_LEVEL, XML_TAG_FENCING_TOPOLOGY, XML_TAG_GRAPH, + XML_TAG_META_SETS, XML_TAG_OPTIONS, XML_TAG_PARAM, XML_TAG_PARAMS, + XML_TAG_RESOURCE_REF, XML_TAG_RULE, XML_TAG_TRANSIENT_NODEATTRS, + XML_TAG_UTILIZATION, XML_TICKET_ATTR_LOSS_POLICY, and + XML_TICKET_ATTR_TICKET + + libcrmcommon: deprecate direct access to all members of pcmk_scheduler_t, + pcmk_tag_t, and pcmk_ticket_t + + libcrmcommon: deprecate pcmk_rsc_methods_t, pcmk_assignment_methods_t, + struct pe_action_s, struct pe_resource_s, struct resource_alloc_functions_s, + struct resource_object_functions_s, struct pe_node_s, and + struct pe_node_shared_s, including all their members + + libcrmcommon: deprecate enums action_fail_response, action_tasks, + expression_type, node_type, pcmk_rsc_flags, pcmk_scheduler_flags, + pe_action_flags, pe_discover_e, pe_obj_types, rsc_recovery_type, and + rsc_start_requirement, including all their values + + liblrmd: deprecate constants F_LRMD_ALERT, F_LRMD_ALERT_ID, + F_LRMD_ALERT_PATH, F_LRMD_CALLBACK_TOKEN, F_LRMD_CALLDATA, F_LRMD_CALLID, + F_LRMD_CALLOPTS, F_LRMD_CLASS, F_LRMD_CLIENTID, F_LRMD_CLIENTNAME, + F_LRMD_EXEC_RC, F_LRMD_IPC_CLIENT, F_LRMD_IPC_IPC_SERVER, F_LRMD_IPC_MSG, + F_LRMD_IPC_MSG_FLAGS, F_LRMD_IPC_MSG_ID, F_LRMD_IPC_OP, F_LRMD_IPC_USER, + F_LRMD_IS_IPC_PROVIDER, F_LRMD_OPERATION, F_LRMD_OP_STATUS, F_LRMD_ORIGIN, + F_LRMD_PROTOCOL_VERSION, F_LRMD_PROVIDER, F_LRMD_RC, F_LRMD_REMOTE_MSG_ID, + F_LRMD_REMOTE_MSG_TYPE, F_LRMD_RSC, F_LRMD_RSC_ACTION, F_LRMD_RSC_DELETED, + F_LRMD_RSC_EXEC_TIME, F_LRMD_RSC_EXIT_REASON, F_LRMD_RSC_ID, + F_LRMD_RSC_INTERVAL, F_LRMD_RSC_OUTPUT, F_LRMD_RSC_QUEUE_TIME, + F_LRMD_RSC_RCCHANGE_TIME, F_LRMD_RSC_RUN_TIME, F_LRMD_RSC_START_DELAY, + F_LRMD_RSC_USERDATA_STR, F_LRMD_TIMEOUT, F_LRMD_TYPE, F_LRMD_WATCHDOG, + T_LRMD, T_LRMD_IPC_PROXY, T_LRMD_NOTIFY, T_LRMD_REPLY, and T_LRMD_RSC_OP + + libpacemaker: distribute pacemaker.h header to allow high-level API usage + + libpe_rules: deprecate functions find_expression_type(), + pe_evaluate_rules(), pe_eval_expr(), pe_eval_rules(), pe_eval_subexpr(), + pe_expand_re_matches(), pe_test_expression(), and pe_test_rule() + + libpe_rules,libpe_status: move enum expression_type and globals + was_processing_error and was_processing_warning to libcrmcommon + + libpe_rules,libpe_status: deprecate role member of pe_op_eval_data + + libpe_rules,libpe_status: deprecate functions text2task(), fail2text(), + recovery2text(), role2text(), task2text(), and text2role() + + libpe_status: deprecate functions pe_find_node(), pe_pref(), + pe_rsc_is_anon_clone(), pe_rsc_is_bundled(), pe_rsc_is_clone(), + pe_rsc_is_unique_clone(), + + libpe_status: deprecate global resource_class_functions + + libstonithd: deprecate constants T_STONITH_NOTIFY_DISCONNECT, + T_STONITH_NOTIFY_FENCE, T_STONITH_NOTIFY_HISTORY, and + T_STONITH_NOTIFY_HISTORY_SYNCED + * Tue Dec 19 2023 Ken Gaillot Pacemaker-2.1.7 - 1388 commits with 358 files changed, 23771 insertions(+), 17219 deletions(-) @@ -1606,7 +2068,7 @@ + resources: drop broken configdir parameter from ocf:pacemaker:controld - For further details, see: - https://wiki.clusterlabs.org/wiki/Pacemaker_2.0_Changes + https://projects.clusterlabs.org/w/projects/pacemaker/pacemaker_2.0_changes/ * Tue Nov 14 2017 Ken Gaillot Pacemaker-1.1.18 diff --git a/GNUmakefile b/GNUmakefile index 8cac498..21a46c6 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -34,8 +34,8 @@ build: init # Pass option depending on whether automake has been run or not USE_FILE = $(shell test -e rpm/Makefile || echo "-f Makefile.am") -.PHONY: $(PACKAGE).spec chroot dirty export mock rc release rpm rpmlint srpm -$(PACKAGE).spec chroot dirty export mock rc release rpm rpmlint srpm: +.PHONY: $(PACKAGE).spec chroot dirty export mock rc release rpm srpm +$(PACKAGE).spec chroot dirty export mock rc release rpm srpm: $(MAKE) $(AM_MAKEFLAGS) -C rpm $(USE_FILE) "$@" mock-% rpm-% spec-% srpm-%: FORCE diff --git a/INSTALL.md b/INSTALL.md index e03c594..9819aca 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -57,22 +57,23 @@ Also: | RPM packages via "make rpm" | 4.11 or later | rpm | rpm | (n/a) | | unit tests | 1.1.0 or later | libcmocka-devel | libcmocka-devel | libcmocka-dev | -## Optional testing dependencies -* procps and psmisc (if running cts-exec, cts-fencing, or CTS) -* valgrind (if running CTS valgrind tests) -* python3-systemd (if using CTS on cluster nodes running systemd) +## Optional Testing Dependencies +* procps and psmisc (if running cts-exec, cts-fencing, or CTS lab) +* valgrind (if running valgrind tests in cts-cli, cts-scheduler, or CTS lab) +* python3-dateutil and python3-systemd (if running CTS lab on cluster nodes + running systemd) * nmap (if not specifying an IP address base) -* oprofile (if running CTS profiling tests) -* dlm (to log DLM debugging info after CTS tests) +* oprofile (if running CTS lab profiling tests) +* dlm (to log DLM debugging info after CTS lab tests) * xmllint (to validate tool output in cts-cli) -## Simple install +## Simple Install $ make && sudo make install If GNU make is not your default make, use "gmake" instead. -## Detailed install +## Detailed Install First, browse the build options that are available: diff --git a/Makefile.am b/Makefile.am index c3e39b9..b2d8d55 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2003-2023 the Pacemaker project contributors +# Copyright 2003-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # diff --git a/README.markdown b/README.markdown index 4342875..d74f23a 100644 --- a/README.markdown +++ b/README.markdown @@ -72,5 +72,6 @@ See [CONTRIBUTING.md](https://github.com/ClusterLabs/pacemaker/blob/main/CONTRIB * [ClusterLabs website](https://www.clusterlabs.org/) * [Documentation](https://www.clusterlabs.org/pacemaker/doc/) * [Issues/Bugs](https://bugs.clusterlabs.org/) -* [Mailing lists](https://wiki.clusterlabs.org/wiki/Mailing_lists) for users and developers -* [ClusterLabs IRC channel](https://wiki.clusterlabs.org/wiki/ClusterLabs_IRC_channel) +* [Mailing lists](https://projects.clusterlabs.org/w/clusterlabs/clusterlabs_mailing_lists/) + for users and developers +* [ClusterLabs IRC channel](https://projects.clusterlabs.org/w/clusterlabs/clusterlabs_irc_channel/) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..a008885 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +## Supported Versions + +Pacemaker's 2.1 and 3.0 release series are actively developed and receive +security fixes. + +## Reporting a Vulnerability + +If you have a support contract with an operating system vendor such as Red Hat +or SUSE, please submit potentially security-related reports via the vendor's +usual method. Otherwise, please submit a report via: + + https://github.com/ClusterLabs/pacemaker/security + +## Past Vulnerabilities + +See https://projects.clusterlabs.org/w/cluster_administration/cves/ diff --git a/agents/ocf/HealthCPU.in b/agents/ocf/HealthCPU.in index 14e4b07..1a691a9 100755 --- a/agents/ocf/HealthCPU.in +++ b/agents/ocf/HealthCPU.in @@ -26,6 +26,7 @@ : ${OCF_FUNCTIONS:="${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs"} . "${OCF_FUNCTIONS}" : ${__OCF_ACTION:="$1"} +: ${OCF_RESKEY_dampening:="30s"} ####################################################################### @@ -196,9 +197,6 @@ fi if [ -z "${OCF_RESKEY_yellow_limit}" ] ; then OCF_RESKEY_yellow_limit=50 fi -if [ -z "${OCF_RESKEY_dampening}" ]; then - OCF_RESKEY_dampening="30s" -fi case "$__OCF_ACTION" in meta-data) meta_data diff --git a/agents/ocf/HealthIOWait.in b/agents/ocf/HealthIOWait.in index ba7a17a..65ce901 100755 --- a/agents/ocf/HealthIOWait.in +++ b/agents/ocf/HealthIOWait.in @@ -19,6 +19,7 @@ : ${OCF_FUNCTIONS:="${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs"} . "${OCF_FUNCTIONS}" : ${__OCF_ACTION:="$1"} +: ${OCF_RESKEY_dampening:="5s"} ####################################################################### @@ -60,12 +61,22 @@ the #health-iowait will go red if the %iowait of the CPU get higher than 15%. + + +The time to wait (dampening) in seconds for further changes before writing + +The time to wait (dampening) in seconds for further changes +before writing + + + + @@ -77,7 +88,7 @@ END agent_usage() { cat < -Lower limit of the temperature in deg C of the drive(s). Below this limit the status will be red. +Lower limit of the temperature in deg C of the drive(s). Below this limit the status of #health-smart will be red. Lower limit for the red smart attribute @@ -116,7 +116,7 @@ The path to the smartctl program, used for querying device health. The time to wait (dampening) for further changes to occur Dampening interval - + @@ -137,28 +137,30 @@ END check_temperature() { - if [ $1 -lt ${lower_red_limit} ] ; then - ocf_log info "Drive ${DRIVE} ${DEVICE} too cold: ${1} C" - attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" - return 1 - fi + if [ -n "$1" ]; then + if [ $1 -lt ${lower_red_limit} ] ; then + ocf_log info "Drive ${DRIVE} ${DEVICE} too cold: ${1} C" + attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" + return 1 + fi - if [ $1 -gt ${upper_red_limit} ] ; then - ocf_log info "Drive ${DRIVE} ${DEVICE} too hot: ${1} C" - attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" - return 1 - fi + if [ $1 -gt ${upper_red_limit} ] ; then + ocf_log info "Drive ${DRIVE} ${DEVICE} too hot: ${1} C" + attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}" + return 1 + fi - if [ $1 -lt ${lower_yellow_limit} ] ; then - ocf_log info "Drive ${DRIVE} ${DEVICE} quite cold: ${1} C" - attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}" - return 1 - fi + if [ $1 -lt ${lower_yellow_limit} ] ; then + ocf_log info "Drive ${DRIVE} ${DEVICE} quite cold: ${1} C" + attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}" + return 1 + fi - if [ $1 -gt ${upper_yellow_limit} ] ; then - ocf_log info "Drive ${DRIVE} ${DEVICE} quite hot: ${1} C" - attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}" - return 1 + if [ $1 -gt ${upper_yellow_limit} ] ; then + ocf_log info "Drive ${DRIVE} ${DEVICE} quite hot: ${1} C" + attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}" + return 1 + fi fi } @@ -350,7 +352,7 @@ fi case "$__OCF_ACTION" in start) HealthSMART_start;; stop) HealthSMART_stop;; - monitor) HealthSMART_monitor;; + monitor) HealthSMART_validate && HealthSMART_monitor;; validate-all) HealthSMART_validate;; reload-agent) HealthSMART_reload_agent;; meta-data) diff --git a/agents/ocf/SysInfo.in b/agents/ocf/SysInfo.in index 5c2c7c7..d3a18f9 100755 --- a/agents/ocf/SysInfo.in +++ b/agents/ocf/SysInfo.in @@ -160,9 +160,11 @@ UpdateStat() { value="$*" printf "%s:\t%s\n" "$name" "$value" if [ "$__OCF_ACTION" = "start" ] ; then - "${HA_SBIN_DIR}/attrd_updater" ${OCF_RESKEY_delay} -S status -n $name -B "$value" + "${HA_SBIN_DIR}/attrd_updater" -d ${OCF_RESKEY_delay} -S status \ + -n $name -B "$value" else - "${HA_SBIN_DIR}/attrd_updater" ${OCF_RESKEY_delay} -S status -n $name -v "$value" + "${HA_SBIN_DIR}/attrd_updater" -d ${OCF_RESKEY_delay} -S status \ + -n $name -v "$value" fi } diff --git a/agents/ocf/o2cb.in b/agents/ocf/o2cb.in index f85d2f4..6db9d2b 100755 --- a/agents/ocf/o2cb.in +++ b/agents/ocf/o2cb.in @@ -366,7 +366,8 @@ meta_data() { 1.0 -This Resource Agent controls the userspace daemon needed by OCFS2. +This Resource Agent controls the userspace daemon needed by OCFS2. This agent +is deprecated and will be removed in Pacemaker 3.0.0. OCFS2 daemon resource agent diff --git a/configure.ac b/configure.ac index 6bff02e..78357bf 100644 --- a/configure.ac +++ b/configure.ac @@ -1,7 +1,7 @@ dnl dnl autoconf for Pacemaker dnl -dnl Copyright 2009-2023 the Pacemaker project contributors +dnl Copyright 2009-2024 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl @@ -533,6 +533,9 @@ AC_ARG_WITH([coverage], yes_no_try "$with_coverage" "no" with_coverage=$? +AC_DEFINE_UNQUOTED([PCMK__WITH_COVERAGE], [$with_coverage], [Build with code coverage]) +AM_CONDITIONAL([BUILD_COVERAGE], [test $with_coverage -ne $DISABLED]) + AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], @@ -838,6 +841,11 @@ AC_DEFINE_UNQUOTED([CRM_SCHEMA_DIRECTORY], ["$CRM_SCHEMA_DIRECTORY"], [Location for the Pacemaker Relax-NG Schema]) AC_SUBST(CRM_SCHEMA_DIRECTORY) +PCMK__REMOTE_SCHEMA_DIR="${localstatedir}/lib/pacemaker/schemas" +AC_DEFINE_UNQUOTED([PCMK__REMOTE_SCHEMA_DIR], ["$PCMK__REMOTE_SCHEMA_DIR"], + [Location to store Relax-NG Schema files on remote nodes]) +AC_SUBST(PCMK__REMOTE_SCHEMA_DIR) + CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"], [Directory Pacemaker daemons should change to (without systemd, core files will go here)]) @@ -1195,26 +1203,24 @@ AC_CHECK_DECLS([assert_float_equal], [], [], [[ #include ]]) -cc_temp_flags "$CFLAGS -Wl,--wrap=uname" - -WRAPPABLE_UNAME="no" - -AC_MSG_CHECKING([if uname() can be wrapped]) -AC_RUN_IFELSE([AC_LANG_SOURCE([[ -#include -int __wrap_uname(struct utsname *buf) { -return 100; -} -int main(int argc, char **argv) { -struct utsname x; -return uname(&x) == 100 ? 0 : 1; -} -]])], - [ WRAPPABLE_UNAME="yes" ], [ WRAPPABLE_UNAME="no"]) -AC_MSG_RESULT([$WRAPPABLE_UNAME]) -AM_CONDITIONAL([WRAPPABLE_UNAME], [test x"$WRAPPABLE_UNAME" = x"yes"]) +dnl ======================================================================== +dnl Byte size +dnl ======================================================================== -cc_restore_flags +# Compile-time assert hack +# https://jonjagger.blogspot.com/2017/07/compile-time-assertions-in-c.html +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[ + switch (0) { + case 0: + case (CHAR_BIT == 8): + break; + } + ]])], + [], + [AC_MSG_FAILURE(m4_normalize([Pacemaker is not supported on + platforms where char is not 8 + bits]))]) dnl ======================================================================== dnl Structures @@ -2086,7 +2092,7 @@ CONFIG_FILES_EXEC([agents/ocf/ClusterMon], [tools/crm_standby], [tools/cibsecret], [tools/pcmk_simtimes], - [xml/version-diff.sh]) + [xml/rng-helper]) dnl Other files we output AC_CONFIG_FILES(Makefile \ @@ -2137,6 +2143,9 @@ AC_CONFIG_FILES(Makefile \ lib/Makefile \ lib/cib/Makefile \ lib/cluster/Makefile \ + lib/cluster/tests/Makefile \ + lib/cluster/tests/cluster/Makefile \ + lib/cluster/tests/cpg/Makefile \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/acl/Makefile \ @@ -2148,11 +2157,17 @@ AC_CONFIG_FILES(Makefile \ lib/common/tests/io/Makefile \ lib/common/tests/iso8601/Makefile \ lib/common/tests/lists/Makefile \ + lib/common/tests/nodes/Makefile \ lib/common/tests/nvpair/Makefile \ lib/common/tests/options/Makefile \ lib/common/tests/output/Makefile \ + lib/common/tests/probes/Makefile \ lib/common/tests/procfs/Makefile \ + lib/common/tests/resources/Makefile \ lib/common/tests/results/Makefile \ + lib/common/tests/rules/Makefile \ + lib/common/tests/scheduler/Makefile \ + lib/common/tests/schemas/Makefile \ lib/common/tests/scores/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ @@ -2163,6 +2178,9 @@ AC_CONFIG_FILES(Makefile \ lib/libpacemaker.pc \ lib/lrmd/Makefile \ lib/pacemaker/Makefile \ + lib/pacemaker/tests/Makefile \ + lib/pacemaker/tests/pcmk_resource/Makefile \ + lib/pacemaker/tests/pcmk_ticket/Makefile \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-cluster.pc \ @@ -2174,7 +2192,6 @@ AC_CONFIG_FILES(Makefile \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/native/Makefile \ - lib/pengine/tests/rules/Makefile \ lib/pengine/tests/status/Makefile \ lib/pengine/tests/unpack/Makefile \ lib/pengine/tests/utils/Makefile \ diff --git a/cts/README.md b/cts/README.md index cbf319a..595268d 100644 --- a/cts/README.md +++ b/cts/README.md @@ -103,6 +103,15 @@ CTS includes: *must* match the hosts' names as returned by `uname -n`; they do not have to match the machines' fully qualified domain names. +* Optionally, configure the exerciser as a log aggregator, using something like + `rsyslog` log forwarding. If aggregation is detected, the exerciser will look + for new messages locally instead of requesting them repeatedly from cluster + nodes. + * Currently, `/var/log/messages` on the exerciser is the only supported log + destination. Further, if it's specified explicitly on the command line as + the log file, then CTS lab will not check for aggregation. + * CTS lab does not currently detect systemd journal log aggregation. + ### Run diff --git a/cts/cli/regression.access_render.exp b/cts/cli/regression.access_render.exp index 37f093d..d818b65 100644 --- a/cts/cli/regression.access_render.exp +++ b/cts/cli/regression.access_render.exp @@ -1,22 +1,22 @@ Created new pacemaker configuration -Setting up shadow instance -A new shadow instance was created. To begin using it paste the following into your shell: - CIB_shadow=cts-cli ; export CIB_shadow +A new shadow instance was created. To begin using it, enter the following into your shell: + export CIB_shadow=cts-cli =#=#=#= Begin test: Configure some ACLs =#=#=#= =#=#=#= Current cib after: Configure some ACLs =#=#=#= - + - + + - + @@ -26,7 +26,7 @@ A new shadow instance was created. To begin using it paste the following into y * Passed: cibadmin - Configure some ACLs =#=#=#= Begin test: Enable ACLs =#=#=#= =#=#=#= Current cib after: Enable ACLs =#=#=#= - + @@ -37,12 +37,13 @@ A new shadow instance was created. To begin using it paste the following into y - + + - + @@ -52,7 +53,7 @@ A new shadow instance was created. To begin using it paste the following into y * Passed: crm_attribute - Enable ACLs =#=#=#= Begin test: An instance of ACLs render (into color) =#=#=#= -\x1b[34m +\x1b[34m \x1b[34m \x1b[34m \x1b[34m @@ -60,15 +61,16 @@ A new shadow instance was created. To begin using it paste the following into y \x1b[34m \x1b[34m \x1b[34m - \x1b[34m + \x1b[32m \x1b[34m \x1b[31m - \x1b[31m + \x1b[31m \x1b[31m + \x1b[31m \x1b[31m \x1b[31m \x1b[31m - \x1b[31m + \x1b[31m \x1b[31m \x1b[31m \x1b[34m @@ -78,7 +80,7 @@ A new shadow instance was created. To begin using it paste the following into y * Passed: cibadmin - An instance of ACLs render (into color) =#=#=#= Begin test: An instance of ACLs render (into namespacing) =#=#=#= - + @@ -86,15 +88,16 @@ A new shadow instance was created. To begin using it paste the following into y - + - + + - + @@ -105,7 +108,7 @@ A new shadow instance was created. To begin using it paste the following into y =#=#=#= Begin test: An instance of ACLs render (into text) =#=#=#= vvv---[ READABLE ]---vvv - + @@ -113,17 +116,20 @@ vvv---[ READABLE ]---vvv + + vvv---[ WRITABLE ]---vvv vvv---[ ~DENIED~ ]---vvv - + + - + diff --git a/cts/cli/regression.acls.exp b/cts/cli/regression.acls.exp index c0b0c4f..1822328 100644 --- a/cts/cli/regression.acls.exp +++ b/cts/cli/regression.acls.exp @@ -30,8 +30,8 @@ A new shadow instance was created. To begin using it, enter the following into y - - + + @@ -87,8 +87,8 @@ A new shadow instance was created. To begin using it, enter the following into y - - + + @@ -145,8 +145,8 @@ A new shadow instance was created. To begin using it, enter the following into y - - + + @@ -203,8 +203,8 @@ A new shadow instance was created. To begin using it, enter the following into y - - + + @@ -264,8 +264,8 @@ A new shadow instance was created. To begin using it, enter the following into y - - + + @@ -328,8 +328,8 @@ A new shadow instance was created. To begin using it, enter the following into y - - + + @@ -372,14 +372,9 @@ crm_attribute: Error performing operation: Permission denied =#=#=#= End test: unknownguy: Set stonith-enabled - Insufficient privileges (4) =#=#=#= * Passed: crm_attribute - unknownguy: Set stonith-enabled =#=#=#= Begin test: unknownguy: Create a resource =#=#=#= -pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@id] -pcmk__apply_creation_acl trace: Creation of scaffolding with id="" is implicitly allowed +pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@id='dummy'] +pcmk__apply_creation_acl trace: ACLs disallow creation of with id="dummy" Call failed: Permission denied - - - - - =#=#=#= End test: unknownguy: Create a resource - Insufficient privileges (4) =#=#=#= * Passed: cibadmin - unknownguy: Create a resource =#=#=#= Begin test: l33t-haxor: Query configuration =#=#=#= @@ -433,8 +428,8 @@ Call failed: Permission denied - - + + @@ -506,8 +501,8 @@ pcmk__apply_creation_acl trace: ACLs allow creation of with id="cib-bo - - + + @@ -577,8 +572,8 @@ Call failed: Permission denied - - + + @@ -643,8 +638,8 @@ Call failed: Permission denied - - + + @@ -711,8 +706,8 @@ Call failed: Permission denied - - + + @@ -801,8 +796,8 @@ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attribut - - + + @@ -877,8 +872,8 @@ Stopped - - + + @@ -951,8 +946,8 @@ Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role - - + + @@ -1028,8 +1023,8 @@ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attribut - - + + @@ -1154,8 +1149,8 @@ Call failed: Permission denied - - + + @@ -1229,8 +1224,8 @@ Call failed: Permission denied - - + + @@ -1303,8 +1298,8 @@ Call failed: Permission denied - - + + @@ -1377,8 +1372,8 @@ Call failed: Permission denied - - + + @@ -1451,8 +1446,8 @@ Call failed: Permission denied - - + + @@ -1522,8 +1517,8 @@ Call failed: Permission denied - - + + @@ -1589,8 +1584,8 @@ Call failed: Permission denied - - + + @@ -1656,8 +1651,8 @@ Call failed: Permission denied - - + + @@ -1723,8 +1718,8 @@ Call failed: Permission denied - - + + @@ -1790,8 +1785,8 @@ Call failed: Permission denied - - + + @@ -1857,8 +1852,8 @@ Call failed: Permission denied - - + + @@ -1924,8 +1919,8 @@ Call failed: Permission denied - - + + @@ -1991,8 +1986,8 @@ Call failed: Permission denied - - + + @@ -2058,8 +2053,8 @@ Call failed: Permission denied - - + + @@ -2127,8 +2122,8 @@ Call failed: Permission denied - - + + @@ -2196,8 +2191,8 @@ Call failed: Permission denied - - + + @@ -2273,8 +2268,8 @@ Call failed: Permission denied - - + + @@ -2323,14 +2318,9 @@ crm_attribute: Error performing operation: Permission denied =#=#=#= End test: unknownguy: Set stonith-enabled - Insufficient privileges (4) =#=#=#= * Passed: crm_attribute - unknownguy: Set stonith-enabled =#=#=#= Begin test: unknownguy: Create a resource =#=#=#= -pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@id] -pcmk__apply_creation_acl trace: Creation of scaffolding with id="" is implicitly allowed +pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@id='dummy'] +pcmk__apply_creation_acl trace: ACLs disallow creation of with id="dummy" Call failed: Permission denied - - - - - =#=#=#= End test: unknownguy: Create a resource - Insufficient privileges (4) =#=#=#= * Passed: cibadmin - unknownguy: Create a resource =#=#=#= Begin test: l33t-haxor: Query configuration =#=#=#= @@ -2388,8 +2378,8 @@ Call failed: Permission denied - - + + @@ -2469,8 +2459,8 @@ crm_attribute: Error performing operation: Permission denied - - + + @@ -2549,8 +2539,8 @@ Call failed: Permission denied - - + + @@ -2624,8 +2614,8 @@ Call failed: Permission denied - - + + @@ -2701,8 +2691,8 @@ Call failed: Permission denied - - + + @@ -2800,8 +2790,8 @@ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attribut - - + + @@ -2885,8 +2875,8 @@ Stopped - - + + @@ -2968,8 +2958,8 @@ Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role - - + + @@ -3054,8 +3044,8 @@ Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attribut - - + + @@ -3189,8 +3179,8 @@ Call failed: Permission denied - - + + @@ -3273,8 +3263,8 @@ Call failed: Permission denied - - + + @@ -3356,8 +3346,8 @@ Call failed: Permission denied - - + + @@ -3439,8 +3429,8 @@ Call failed: Permission denied - - + + @@ -3522,8 +3512,8 @@ Call failed: Permission denied - - + + @@ -3602,8 +3592,8 @@ Call failed: Permission denied - - + + @@ -3678,8 +3668,8 @@ Call failed: Permission denied - - + + @@ -3754,8 +3744,8 @@ Call failed: Permission denied - - + + @@ -3830,8 +3820,8 @@ Call failed: Permission denied - - + + @@ -3906,8 +3896,8 @@ Call failed: Permission denied - - + + @@ -3982,8 +3972,8 @@ Call failed: Permission denied - - + + @@ -4058,8 +4048,8 @@ Call failed: Permission denied - - + + @@ -4134,8 +4124,8 @@ Call failed: Permission denied - - + + @@ -4210,8 +4200,8 @@ Call failed: Permission denied - - + + @@ -4288,8 +4278,8 @@ Call failed: Permission denied - - + + @@ -4366,8 +4356,8 @@ Call failed: Permission denied - - + + diff --git a/cts/cli/regression.daemons.exp b/cts/cli/regression.daemons.exp index b34fba8..d530c4a 100644 --- a/cts/cli/regression.daemons.exp +++ b/cts/cli/regression.daemons.exp @@ -1,18 +1,31 @@ =#=#=#= Begin test: Get CIB manager metadata =#=#=#= - - 1.1 - Cluster options used by Pacemaker's Cluster Information Base manager - Cluster Information Base manager options + + 1.1 + + + Cluster options used by Pacemaker's Cluster Information Base manager + + + Cluster Information Base manager options + - Enable Access Control Lists (ACLs) for the CIB - Enable Access Control Lists (ACLs) for the CIB + + Enable Access Control Lists (ACLs) for the CIB + + + Enable Access Control Lists (ACLs) for the CIB + - Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes). - Maximum IPC message backlog before disconnecting a cluster daemon + + Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes). + + + Maximum IPC message backlog before disconnecting a cluster daemon + @@ -20,247 +33,399 @@ =#=#=#= End test: Get CIB manager metadata - OK (0) =#=#=#= * Passed: pacemaker-based - Get CIB manager metadata =#=#=#= Begin test: Get controller metadata =#=#=#= - - 1.1 - Cluster options used by Pacemaker's controller - Pacemaker controller options + + 1.1 + + + Cluster options used by Pacemaker's controller + + + Pacemaker controller options + - Includes a hash which identifies the exact changeset the code was built from. Used for diagnostic purposes. - Pacemaker version on cluster node elected Designated Controller (DC) - + + Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes. + + + Pacemaker version on cluster node elected Designated Controller (DC) + + - Used for informational and diagnostic purposes. - The messaging stack on which Pacemaker is currently running - + + Used for informational and diagnostic purposes. + + + The messaging layer on which Pacemaker is currently running + + - This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents. - An arbitrary name for the cluster + + This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents. + + + An arbitrary name for the cluster + - The optimal value will depend on the speed and load of your network and the type of switches used. - How long to wait for a response from other nodes during start-up + + The optimal value will depend on the speed and load of your network and the type of switches used. + + + How long to wait for a response from other nodes during start-up + - Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure timeouts and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. Allowed values: Zero disables polling, while positive values are an interval in seconds(unless other units are specified, for example "5min") - Polling interval to recheck cluster state and evaluate rules with date specifications + + Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min"). + + + Polling interval to recheck cluster state and evaluate rules with date specifications + - - The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit - Maximum amount of system load that should be used by cluster nodes - - - - Maximum number of jobs that can be scheduled per node (defaults to 2x cores) - Maximum number of jobs that can be scheduled per node (defaults to 2x cores) - - - A cluster node may receive notification of its own fencing if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Allowed values are "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. - How a cluster node should react if notified of its own fencing - + + A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. Allowed values: stop, panic + + + How a cluster node should react if notified of its own fencing + + + - Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug. - *** Advanced Use Only *** + + Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + + + *** Advanced Use Only *** + - Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug. - *** Advanced Use Only *** + + Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + + + *** Advanced Use Only *** + - If you need to adjust this value, it probably indicates the presence of a bug. - *** Advanced Use Only *** + + If you need to adjust this value, it probably indicates the presence of a bug. + + + *** Advanced Use Only *** + - If you need to adjust this value, it probably indicates the presence of a bug. - *** Advanced Use Only *** + + If you need to adjust this value, it probably indicates the presence of a bug. + + + *** Advanced Use Only *** + - Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. - *** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions + + Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. + + + *** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions + - If this is set to a positive value, lost nodes are assumed to self-fence using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur. - How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use + + If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur. + + + How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use + - How many times fencing can fail before it will no longer be immediately re-attempted on a target - How many times fencing can fail before it will no longer be immediately re-attempted on a target + + How many times fencing can fail before it will no longer be immediately re-attempted on a target + + + How many times fencing can fail before it will no longer be immediately re-attempted on a target + - - What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, demote, suicide - What to do when the cluster does not have quorum - - - - - When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release. - Whether to lock resources to a cleanly shut down node - - - - If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined. - Do not lock resources to a cleanly shut down node longer than this - + + + The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit + + + Maximum amount of system load that should be used by cluster nodes + + - - Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. - How long to wait for a node that has joined the cluster to join the controller process group - + + + Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + + + Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + + =#=#=#= End test: Get controller metadata - OK (0) =#=#=#= * Passed: pacemaker-controld - Get controller metadata =#=#=#= Begin test: Get fencer metadata =#=#=#= - - 1.1 - Instance attributes available for all "stonith"-class resources and used by Pacemaker's fence daemon, formerly known as stonithd - Instance attributes available for all "stonith"-class resources + + 1.1 + + + Instance attributes available for all "stonith"-class resources and used by Pacemaker's fence daemon, formerly known as stonithd + + + Instance attributes available for all "stonith"-class resources + - some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of none can be used to tell the cluster not to supply any additional parameters. - Advanced use only: An alternate parameter to supply instead of 'port' + + Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters. + + + *** Advanced Use Only *** An alternate parameter to supply instead of 'port' + - Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2 - A mapping of host names to ports numbers for devices that do not support host names. - + + For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. + + + A mapping of node names to port numbers for devices that do not support node names. + + - A list of machines controlled by this device (Optional unless pcmk_host_list=static-list) - Eg. node1,node2,node3 - + + Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. + + + Nodes targeted by this device + + - Allowed values: dynamic-list (query the device via the 'list' command), static-list (check the pcmk_host_list attribute), status (query the device via the 'status' command), none (assume every device can fence every machine) - How to determine which machines are controlled by the device. - + + Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" Allowed values: dynamic-list, static-list, status, none + + + How to determine which nodes can be targeted by the device + + + - Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. - Enable a base delay for fencing actions and specify base delay value. + + Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. + + + Enable a delay of no more than the time specified before executing fencing actions. + - This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value.This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value per target. - Enable a base delay for fencing actions and specify base delay value. + + This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. + + + Enable a base delay for fencing actions and specify base delay value. + - Cluster property concurrent-fencing=true needs to be configured first.Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited. - The maximum number of actions can be performed in parallel on this device + + Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel. + + + The maximum number of actions can be performed in parallel on this device + - Some devices do not support the standard commands or may provide additional ones.\nUse this to specify an alternate, device-specific, command that implements the 'reboot' action. - Advanced use only: An alternate command to run instead of 'reboot' + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. + + + *** Advanced Use Only *** An alternate command to run instead of 'reboot' + - Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'reboot' actions. - Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. + + + *** Advanced Use Only *** Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout + - Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'reboot' actions before giving up. - Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. + + + *** Advanced Use Only *** The maximum number of times to try the 'reboot' command within the timeout period + - Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'off' action. - Advanced use only: An alternate command to run instead of 'off' + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. + + + *** Advanced Use Only *** An alternate command to run instead of 'off' + - Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'off' actions. - Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. + + + *** Advanced Use Only *** Specify an alternate timeout to use for 'off' actions instead of stonith-timeout + - Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'off' actions before giving up. - Advanced use only: The maximum number of times to retry the 'off' command within the timeout period + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. + + + *** Advanced Use Only *** The maximum number of times to try the 'off' command within the timeout period + - Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'on' action. - Advanced use only: An alternate command to run instead of 'on' + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. + + + *** Advanced Use Only *** An alternate command to run instead of 'on' + - Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'on' actions. - Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. + + + *** Advanced Use Only *** Specify an alternate timeout to use for 'on' actions instead of stonith-timeout + - Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'on' actions before giving up. - Advanced use only: The maximum number of times to retry the 'on' command within the timeout period + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. + + + *** Advanced Use Only *** The maximum number of times to try the 'on' command within the timeout period + - Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'list' action. - Advanced use only: An alternate command to run instead of 'list' + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. + + + *** Advanced Use Only *** An alternate command to run instead of 'list' + - Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'list' actions. - Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. + + + *** Advanced Use Only *** Specify an alternate timeout to use for 'list' actions instead of stonith-timeout + - Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'list' actions before giving up. - Advanced use only: The maximum number of times to retry the 'list' command within the timeout period + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. + + + *** Advanced Use Only *** The maximum number of times to try the 'list' command within the timeout period + - Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'monitor' action. - Advanced use only: An alternate command to run instead of 'monitor' + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. + + + *** Advanced Use Only *** An alternate command to run instead of 'monitor' + - Some devices need much more/less time to complete than normal.\nUse this to specify an alternate, device-specific, timeout for 'monitor' actions. - Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. + + + *** Advanced Use Only *** Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout + - Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'monitor' actions before giving up. - Advanced use only: The maximum number of times to retry the 'monitor' command within the timeout period + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. + + + *** Advanced Use Only *** The maximum number of times to try the 'monitor' command within the timeout period + - Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'status' action. - Advanced use only: An alternate command to run instead of 'status' + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. + + + *** Advanced Use Only *** An alternate command to run instead of 'status' + - Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'status' actions. - Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. + + + *** Advanced Use Only *** Specify an alternate timeout to use for 'status' actions instead of stonith-timeout + - Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'status' actions before giving up. - Advanced use only: The maximum number of times to retry the 'status' command within the timeout period + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. + + + *** Advanced Use Only *** The maximum number of times to try the 'status' command within the timeout period + @@ -268,186 +433,315 @@ =#=#=#= End test: Get fencer metadata - OK (0) =#=#=#= * Passed: pacemaker-fenced - Get fencer metadata =#=#=#= Begin test: Get scheduler metadata =#=#=#= - - 1.1 - Cluster options used by Pacemaker's scheduler - Pacemaker scheduler options + + 1.1 + + + Cluster options used by Pacemaker's scheduler + + + Pacemaker scheduler options + - What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, demote, suicide - What to do when the cluster does not have quorum + + What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, demote, suicide + + + What to do when the cluster does not have quorum + - + + + When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release. + + + Whether to lock resources to a cleanly shut down node + + + + + + If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined. + + + Do not lock resources to a cleanly shut down node longer than this + + + - Whether resources can run on any node by default - Whether resources can run on any node by default + + Whether resources can run on any node by default + + + Whether resources can run on any node by default + - Whether the cluster should refrain from monitoring, starting, and stopping resources - Whether the cluster should refrain from monitoring, starting, and stopping resources + + Whether the cluster should refrain from monitoring, starting, and stopping resources + + + Whether the cluster should refrain from monitoring, starting, and stopping resources + - When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold. - Whether a start failure should prevent a resource from being recovered on the same node + + When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold. + + + Whether a start failure should prevent a resource from being recovered on the same node + - Whether the cluster should check for active resources during start-up - Whether the cluster should check for active resources during start-up + + Whether the cluster should check for active resources during start-up + + + Whether the cluster should check for active resources during start-up + - - When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release. - Whether to lock resources to a cleanly shut down node - - - - If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined. - Do not lock resources to a cleanly shut down node longer than this - - - If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability. - *** Advanced Use Only *** Whether nodes may be fenced as part of recovery + + If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability. + + + *** Advanced Use Only *** Whether nodes may be fenced as part of recovery + - Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") Allowed values: reboot, off, poweroff - Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + + Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") Allowed values: reboot, off, poweroff + + + Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + - - This value is not used by Pacemaker, but is kept for backward compatibility, and certain legacy fence agents might use it. - *** Advanced Use Only *** Unused by Pacemaker + + How long to wait for on, off, and reboot fence actions to complete by default + + + How long to wait for on, off, and reboot fence actions to complete by default + - This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured. - Whether watchdog integration is enabled + + This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured. + + + Whether watchdog integration is enabled + - Allow performing fencing operations in parallel - Allow performing fencing operations in parallel + + Allow performing fencing operations in parallel + + + Allow performing fencing operations in parallel + - Setting this to false may lead to a "split-brain" situation,potentially leading to data loss and/or service unavailability. - *** Advanced Use Only *** Whether to fence unseen nodes at start-up + + Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability. + + + *** Advanced Use Only *** Whether to fence unseen nodes at start-up + - Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled. - Apply fencing delay targeting the lost nodes with the highest total resource priority + + Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled. + + + Apply fencing delay targeting the lost nodes with the highest total resource priority + - Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. - How long to wait for a node that has joined the cluster to join the controller process group + + Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + + + How long to wait for a node that has joined the cluster to join the controller process group + - The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. - Maximum time for node-to-node communication + + The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. + + + Maximum time for node-to-node communication + - The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load. - Maximum number of jobs that the cluster may execute in parallel across all nodes + + The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load. + + + Maximum number of jobs that the cluster may execute in parallel across all nodes + - The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) - The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + + The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + + + The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + - Whether the cluster should stop all active resources - Whether the cluster should stop all active resources + + Whether the cluster should stop all active resources + + + Whether the cluster should stop all active resources + - Whether to stop resources that were removed from the configuration - Whether to stop resources that were removed from the configuration + + Whether to stop resources that were removed from the configuration + + + Whether to stop resources that were removed from the configuration + - Whether to cancel recurring actions removed from the configuration - Whether to cancel recurring actions removed from the configuration + + Whether to cancel recurring actions removed from the configuration + + + Whether to cancel recurring actions removed from the configuration + - Values other than default are poorly tested and potentially dangerous. This option will be removed in a future release. - *** Deprecated *** Whether to remove stopped resources from the executor + + Values other than default are poorly tested and potentially dangerous. + + + *** Deprecated *** Whether to remove stopped resources from the executor + - Zero to disable, -1 to store unlimited. - The number of scheduler inputs resulting in errors to save + + Zero to disable, -1 to store unlimited. + + + The number of scheduler inputs resulting in errors to save + - Zero to disable, -1 to store unlimited. - The number of scheduler inputs resulting in warnings to save + + Zero to disable, -1 to store unlimited. + + + The number of scheduler inputs resulting in warnings to save + - Zero to disable, -1 to store unlimited. - The number of scheduler inputs without errors or warnings to save + + Zero to disable, -1 to store unlimited. + + + The number of scheduler inputs without errors or warnings to save + - Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". Allowed values: none, migrate-on-red, only-green, progressive, custom - How cluster should react to node health attributes + + Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". Allowed values: none, migrate-on-red, only-green, progressive, custom + + + How cluster should react to node health attributes + - - Only used when "node-health-strategy" is set to "progressive". - Base health score assigned to a node + + Only used when "node-health-strategy" is set to "progressive". + + + Base health score assigned to a node + - Only used when "node-health-strategy" is set to "custom" or "progressive". - The score to use for a node health attribute whose value is "green" + + Only used when "node-health-strategy" is set to "custom" or "progressive". + + + The score to use for a node health attribute whose value is "green" + - Only used when "node-health-strategy" is set to "custom" or "progressive". - The score to use for a node health attribute whose value is "yellow" + + Only used when "node-health-strategy" is set to "custom" or "progressive". + + + The score to use for a node health attribute whose value is "yellow" + - Only used when "node-health-strategy" is set to "custom" or "progressive". - The score to use for a node health attribute whose value is "red" + + Only used when "node-health-strategy" is set to "custom" or "progressive". + + + The score to use for a node health attribute whose value is "red" + - How the cluster should allocate resources to nodes Allowed values: default, utilization, minimal, balanced - How the cluster should allocate resources to nodes + + How the cluster should allocate resources to nodes Allowed values: default, utilization, minimal, balanced + + + How the cluster should allocate resources to nodes + - diff --git a/cts/cli/regression.rules.exp b/cts/cli/regression.rules.exp index cdfb5d1..a439773 100644 --- a/cts/cli/regression.rules.exp +++ b/cts/cli/regression.rules.exp @@ -37,7 +37,6 @@ log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expec log_xmllib_err error: XML Error: invalidxml log_xmllib_err error: XML Error: ^ crm_rule: Couldn't parse input string: invalidxml - =#=#=#= End test: crm_rule given invalid input XML - Invalid data given (65) =#=#=#= * Passed: crm_rule - crm_rule given invalid input XML =#=#=#= Begin test: crm_rule given invalid input XML (XML) =#=#=#= @@ -50,8 +49,7 @@ log_xmllib_err error: XML Error: ^ - crm_rule: Couldn't parse input string: invalidxml - + crm_rule: Couldn't parse input string: invalidxml @@ -65,7 +63,6 @@ log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expec log_xmllib_err error: XML Error: invalidxml log_xmllib_err error: XML Error: ^ crm_rule: Couldn't parse input from STDIN - =#=#=#= End test: crm_rule given invalid input XML on stdin - Invalid data given (65) =#=#=#= * Passed: echo - crm_rule given invalid input XML on stdin =#=#=#= Begin test: crm_rule given invalid input XML on stdin (XML) =#=#=#= @@ -78,8 +75,7 @@ log_xmllib_err error: XML Error: ^ - crm_rule: Couldn't parse input from STDIN - + crm_rule: Couldn't parse input from STDIN diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index accf781..6eef178 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -23,6 +23,910 @@ A new shadow instance was created. To begin using it, enter the following into y =#=#=#= End test: Validate CIB - OK (0) =#=#=#= * Passed: cibadmin - Validate CIB +=#=#=#= Begin test: List all available options (invalid type) =#=#=#= +crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster +=#=#=#= End test: List all available options (invalid type) - Incorrect usage (64) =#=#=#= +* Passed: crm_attribute - List all available options (invalid type) +=#=#=#= Begin test: List all available options (invalid type) (XML) =#=#=#= + + + + crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster + + + +=#=#=#= End test: List all available options (invalid type) (XML) - Incorrect usage (64) =#=#=#= +* Passed: crm_attribute - List all available options (invalid type) (XML) +=#=#=#= Begin test: List non-advanced cluster options =#=#=#= +Pacemaker cluster options + +Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section. + + * dc-version: Pacemaker version on cluster node elected Designated Controller (DC) + * Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes. + * Possible values (generated by Pacemaker): version (no default) + + * cluster-infrastructure: The messaging layer on which Pacemaker is currently running + * Used for informational and diagnostic purposes. + * Possible values (generated by Pacemaker): string (no default) + + * cluster-name: An arbitrary name for the cluster + * This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents. + * Possible values: string (no default) + + * dc-deadtime: How long to wait for a response from other nodes during start-up + * The optimal value will depend on the speed and load of your network and the type of switches used. + * Possible values: duration (default: ) + + * cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications + * Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min"). + * Possible values: duration (default: ) + + * fence-reaction: How a cluster node should react if notified of its own fencing + * A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. + * Possible values: "stop" (default), "panic" + + * no-quorum-policy: What to do when the cluster does not have quorum + * Possible values: "stop" (default), "freeze", "ignore", "demote", "suicide" + + * shutdown-lock: Whether to lock resources to a cleanly shut down node + * When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release. + * Possible values: boolean (default: ) + + * shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this + * If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined. + * Possible values: duration (default: ) + + * enable-acl: Enable Access Control Lists (ACLs) for the CIB + * Possible values: boolean (default: ) + + * symmetric-cluster: Whether resources can run on any node by default + * Possible values: boolean (default: ) + + * maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources + * Possible values: boolean (default: ) + + * start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node + * When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold. + * Possible values: boolean (default: ) + + * enable-startup-probes: Whether the cluster should check for active resources during start-up + * Possible values: boolean (default: ) + + * stonith-action: Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + * Possible values: "reboot" (default), "off", "poweroff" + + * stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default + * Possible values: duration (default: ) + + * have-watchdog: Whether watchdog integration is enabled + * This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured. + * Possible values (generated by Pacemaker): boolean (default: ) + + * stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use + * If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur. + * Possible values: timeout (default: ) + + * stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target + * Possible values: score (default: ) + + * concurrent-fencing: Allow performing fencing operations in parallel + * Possible values: boolean (default: ) + + * priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority + * Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled. + * Possible values: duration (default: ) + + * node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group + * Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + * Possible values: duration (default: ) + + * cluster-delay: Maximum time for node-to-node communication + * The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. + * Possible values: duration (default: ) + + * load-threshold: Maximum amount of system load that should be used by cluster nodes + * The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit + * Possible values: percentage (default: ) + + * node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + * Possible values: integer (default: ) + + * batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes + * The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load. + * Possible values: integer (default: ) + + * migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + * Possible values: integer (default: ) + + * cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon + * Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes). + * Possible values: nonnegative_integer (default: ) + + * stop-all-resources: Whether the cluster should stop all active resources + * Possible values: boolean (default: ) + + * stop-orphan-resources: Whether to stop resources that were removed from the configuration + * Possible values: boolean (default: ) + + * stop-orphan-actions: Whether to cancel recurring actions removed from the configuration + * Possible values: boolean (default: ) + + * pe-error-series-max: The number of scheduler inputs resulting in errors to save + * Zero to disable, -1 to store unlimited. + * Possible values: integer (default: ) + + * pe-warn-series-max: The number of scheduler inputs resulting in warnings to save + * Zero to disable, -1 to store unlimited. + * Possible values: integer (default: ) + + * pe-input-series-max: The number of scheduler inputs without errors or warnings to save + * Zero to disable, -1 to store unlimited. + * Possible values: integer (default: ) + + * node-health-strategy: How cluster should react to node health attributes + * Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". + * Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom" + + * node-health-base: Base health score assigned to a node + * Only used when "node-health-strategy" is set to "progressive". + * Possible values: score (default: ) + + * node-health-green: The score to use for a node health attribute whose value is "green" + * Only used when "node-health-strategy" is set to "custom" or "progressive". + * Possible values: score (default: ) + + * node-health-yellow: The score to use for a node health attribute whose value is "yellow" + * Only used when "node-health-strategy" is set to "custom" or "progressive". + * Possible values: score (default: ) + + * node-health-red: The score to use for a node health attribute whose value is "red" + * Only used when "node-health-strategy" is set to "custom" or "progressive". + * Possible values: score (default: ) + + * placement-strategy: How the cluster should allocate resources to nodes + * Possible values: "default" (default), "utilization", "minimal", "balanced" +=#=#=#= End test: List non-advanced cluster options - OK (0) =#=#=#= +* Passed: crm_attribute - List non-advanced cluster options +=#=#=#= Begin test: List non-advanced cluster options (XML) (shows all) =#=#=#= + + + 1.1 + Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section. + Pacemaker cluster options + + + Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes. + Pacemaker version on cluster node elected Designated Controller (DC) + + + + Used for informational and diagnostic purposes. + The messaging layer on which Pacemaker is currently running + + + + This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents. + An arbitrary name for the cluster + + + + The optimal value will depend on the speed and load of your network and the type of switches used. + How long to wait for a response from other nodes during start-up + + + + Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min"). + Polling interval to recheck cluster state and evaluate rules with date specifications + + + + A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. + How a cluster node should react if notified of its own fencing + + + + + Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + + + + Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + + + + If you need to adjust this value, it probably indicates the presence of a bug. + If you need to adjust this value, it probably indicates the presence of a bug. + + + + If you need to adjust this value, it probably indicates the presence of a bug. + If you need to adjust this value, it probably indicates the presence of a bug. + + + + Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. + Enabling this option will slow down cluster recovery under all conditions + + + + What to do when the cluster does not have quorum + What to do when the cluster does not have quorum + + + + + When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release. + Whether to lock resources to a cleanly shut down node + + + + If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined. + Do not lock resources to a cleanly shut down node longer than this + + + + Enable Access Control Lists (ACLs) for the CIB + Enable Access Control Lists (ACLs) for the CIB + + + + Whether resources can run on any node by default + Whether resources can run on any node by default + + + + Whether the cluster should refrain from monitoring, starting, and stopping resources + Whether the cluster should refrain from monitoring, starting, and stopping resources + + + + When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold. + Whether a start failure should prevent a resource from being recovered on the same node + + + + Whether the cluster should check for active resources during start-up + Whether the cluster should check for active resources during start-up + + + + If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability. + Whether nodes may be fenced as part of recovery + + + + Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + + + + + How long to wait for on, off, and reboot fence actions to complete by default + How long to wait for on, off, and reboot fence actions to complete by default + + + + This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured. + Whether watchdog integration is enabled + + + + If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur. + How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use + + + + How many times fencing can fail before it will no longer be immediately re-attempted on a target + How many times fencing can fail before it will no longer be immediately re-attempted on a target + + + + Allow performing fencing operations in parallel + Allow performing fencing operations in parallel + + + + Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability. + Whether to fence unseen nodes at start-up + + + + Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled. + Apply fencing delay targeting the lost nodes with the highest total resource priority + + + + Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + How long to wait for a node that has joined the cluster to join the controller process group + + + + The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. + Maximum time for node-to-node communication + + + + The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit + Maximum amount of system load that should be used by cluster nodes + + + + Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + + + + The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load. + Maximum number of jobs that the cluster may execute in parallel across all nodes + + + + The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + + + + Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes). + Maximum IPC message backlog before disconnecting a cluster daemon + + + + Whether the cluster should stop all active resources + Whether the cluster should stop all active resources + + + + Whether to stop resources that were removed from the configuration + Whether to stop resources that were removed from the configuration + + + + Whether to cancel recurring actions removed from the configuration + Whether to cancel recurring actions removed from the configuration + + + + + Values other than default are poorly tested and potentially dangerous. + Whether to remove stopped resources from the executor + + + + Zero to disable, -1 to store unlimited. + The number of scheduler inputs resulting in errors to save + + + + Zero to disable, -1 to store unlimited. + The number of scheduler inputs resulting in warnings to save + + + + Zero to disable, -1 to store unlimited. + The number of scheduler inputs without errors or warnings to save + + + + Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". + How cluster should react to node health attributes + + + + + Only used when "node-health-strategy" is set to "progressive". + Base health score assigned to a node + + + + Only used when "node-health-strategy" is set to "custom" or "progressive". + The score to use for a node health attribute whose value is "green" + + + + Only used when "node-health-strategy" is set to "custom" or "progressive". + The score to use for a node health attribute whose value is "yellow" + + + + Only used when "node-health-strategy" is set to "custom" or "progressive". + The score to use for a node health attribute whose value is "red" + + + + How the cluster should allocate resources to nodes + How the cluster should allocate resources to nodes + + + + + + + +=#=#=#= End test: List non-advanced cluster options (XML) (shows all) - OK (0) =#=#=#= +* Passed: crm_attribute - List non-advanced cluster options (XML) (shows all) +=#=#=#= Begin test: List all available cluster options =#=#=#= +Pacemaker cluster options + +Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section. + + * dc-version: Pacemaker version on cluster node elected Designated Controller (DC) + * Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes. + * Possible values (generated by Pacemaker): version (no default) + + * cluster-infrastructure: The messaging layer on which Pacemaker is currently running + * Used for informational and diagnostic purposes. + * Possible values (generated by Pacemaker): string (no default) + + * cluster-name: An arbitrary name for the cluster + * This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents. + * Possible values: string (no default) + + * dc-deadtime: How long to wait for a response from other nodes during start-up + * The optimal value will depend on the speed and load of your network and the type of switches used. + * Possible values: duration (default: ) + + * cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications + * Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min"). + * Possible values: duration (default: ) + + * fence-reaction: How a cluster node should react if notified of its own fencing + * A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. + * Possible values: "stop" (default), "panic" + + * no-quorum-policy: What to do when the cluster does not have quorum + * Possible values: "stop" (default), "freeze", "ignore", "demote", "suicide" + + * shutdown-lock: Whether to lock resources to a cleanly shut down node + * When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release. + * Possible values: boolean (default: ) + + * shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this + * If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined. + * Possible values: duration (default: ) + + * enable-acl: Enable Access Control Lists (ACLs) for the CIB + * Possible values: boolean (default: ) + + * symmetric-cluster: Whether resources can run on any node by default + * Possible values: boolean (default: ) + + * maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources + * Possible values: boolean (default: ) + + * start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node + * When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold. + * Possible values: boolean (default: ) + + * enable-startup-probes: Whether the cluster should check for active resources during start-up + * Possible values: boolean (default: ) + + * stonith-action: Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + * Possible values: "reboot" (default), "off", "poweroff" + + * stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default + * Possible values: duration (default: ) + + * have-watchdog: Whether watchdog integration is enabled + * This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured. + * Possible values (generated by Pacemaker): boolean (default: ) + + * stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use + * If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur. + * Possible values: timeout (default: ) + + * stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target + * Possible values: score (default: ) + + * concurrent-fencing: Allow performing fencing operations in parallel + * Possible values: boolean (default: ) + + * priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority + * Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled. + * Possible values: duration (default: ) + + * node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group + * Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + * Possible values: duration (default: ) + + * cluster-delay: Maximum time for node-to-node communication + * The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. + * Possible values: duration (default: ) + + * load-threshold: Maximum amount of system load that should be used by cluster nodes + * The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit + * Possible values: percentage (default: ) + + * node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + * Possible values: integer (default: ) + + * batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes + * The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load. + * Possible values: integer (default: ) + + * migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + * Possible values: integer (default: ) + + * cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon + * Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes). + * Possible values: nonnegative_integer (default: ) + + * stop-all-resources: Whether the cluster should stop all active resources + * Possible values: boolean (default: ) + + * stop-orphan-resources: Whether to stop resources that were removed from the configuration + * Possible values: boolean (default: ) + + * stop-orphan-actions: Whether to cancel recurring actions removed from the configuration + * Possible values: boolean (default: ) + + * pe-error-series-max: The number of scheduler inputs resulting in errors to save + * Zero to disable, -1 to store unlimited. + * Possible values: integer (default: ) + + * pe-warn-series-max: The number of scheduler inputs resulting in warnings to save + * Zero to disable, -1 to store unlimited. + * Possible values: integer (default: ) + + * pe-input-series-max: The number of scheduler inputs without errors or warnings to save + * Zero to disable, -1 to store unlimited. + * Possible values: integer (default: ) + + * node-health-strategy: How cluster should react to node health attributes + * Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". + * Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom" + + * node-health-base: Base health score assigned to a node + * Only used when "node-health-strategy" is set to "progressive". + * Possible values: score (default: ) + + * node-health-green: The score to use for a node health attribute whose value is "green" + * Only used when "node-health-strategy" is set to "custom" or "progressive". + * Possible values: score (default: ) + + * node-health-yellow: The score to use for a node health attribute whose value is "yellow" + * Only used when "node-health-strategy" is set to "custom" or "progressive". + * Possible values: score (default: ) + + * node-health-red: The score to use for a node health attribute whose value is "red" + * Only used when "node-health-strategy" is set to "custom" or "progressive". + * Possible values: score (default: ) + + * placement-strategy: How the cluster should allocate resources to nodes + * Possible values: "default" (default), "utilization", "minimal", "balanced" + + * ADVANCED OPTIONS: + + * election-timeout: Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + * Possible values: duration (default: ) + + * shutdown-escalation: Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + * Possible values: duration (default: ) + + * join-integration-timeout: If you need to adjust this value, it probably indicates the presence of a bug. + * Possible values: duration (default: ) + + * join-finalization-timeout: If you need to adjust this value, it probably indicates the presence of a bug. + * Possible values: duration (default: ) + + * transition-delay: Enabling this option will slow down cluster recovery under all conditions + * Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. + * Possible values: duration (default: ) + + * stonith-enabled: Whether nodes may be fenced as part of recovery + * If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability. + * Possible values: boolean (default: ) + + * startup-fencing: Whether to fence unseen nodes at start-up + * Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability. + * Possible values: boolean (default: ) + + * DEPRECATED OPTIONS (will be removed in a future release): + + * remove-after-stop: Whether to remove stopped resources from the executor + * Values other than default are poorly tested and potentially dangerous. + * Possible values: boolean (default: ) +=#=#=#= End test: List all available cluster options - OK (0) =#=#=#= +* Passed: crm_attribute - List all available cluster options +=#=#=#= Begin test: List all available cluster options (XML) =#=#=#= + + + 1.1 + Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section. + Pacemaker cluster options + + + Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes. + Pacemaker version on cluster node elected Designated Controller (DC) + + + + Used for informational and diagnostic purposes. + The messaging layer on which Pacemaker is currently running + + + + This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents. + An arbitrary name for the cluster + + + + The optimal value will depend on the speed and load of your network and the type of switches used. + How long to wait for a response from other nodes during start-up + + + + Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min"). + Polling interval to recheck cluster state and evaluate rules with date specifications + + + + A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. + How a cluster node should react if notified of its own fencing + + + + + Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + + + + Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug. + + + + If you need to adjust this value, it probably indicates the presence of a bug. + If you need to adjust this value, it probably indicates the presence of a bug. + + + + If you need to adjust this value, it probably indicates the presence of a bug. + If you need to adjust this value, it probably indicates the presence of a bug. + + + + Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. + Enabling this option will slow down cluster recovery under all conditions + + + + What to do when the cluster does not have quorum + What to do when the cluster does not have quorum + + + + + When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release. + Whether to lock resources to a cleanly shut down node + + + + If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined. + Do not lock resources to a cleanly shut down node longer than this + + + + Enable Access Control Lists (ACLs) for the CIB + Enable Access Control Lists (ACLs) for the CIB + + + + Whether resources can run on any node by default + Whether resources can run on any node by default + + + + Whether the cluster should refrain from monitoring, starting, and stopping resources + Whether the cluster should refrain from monitoring, starting, and stopping resources + + + + When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold. + Whether a start failure should prevent a resource from being recovered on the same node + + + + Whether the cluster should check for active resources during start-up + Whether the cluster should check for active resources during start-up + + + + If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability. + Whether nodes may be fenced as part of recovery + + + + Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") + + + + + How long to wait for on, off, and reboot fence actions to complete by default + How long to wait for on, off, and reboot fence actions to complete by default + + + + This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured. + Whether watchdog integration is enabled + + + + If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur. + How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use + + + + How many times fencing can fail before it will no longer be immediately re-attempted on a target + How many times fencing can fail before it will no longer be immediately re-attempted on a target + + + + Allow performing fencing operations in parallel + Allow performing fencing operations in parallel + + + + Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability. + Whether to fence unseen nodes at start-up + + + + Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled. + Apply fencing delay targeting the lost nodes with the highest total resource priority + + + + Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours. + How long to wait for a node that has joined the cluster to join the controller process group + + + + The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. + Maximum time for node-to-node communication + + + + The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit + Maximum amount of system load that should be used by cluster nodes + + + + Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + Maximum number of jobs that can be scheduled per node (defaults to 2x cores) + + + + The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load. + Maximum number of jobs that the cluster may execute in parallel across all nodes + + + + The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit) + + + + Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes). + Maximum IPC message backlog before disconnecting a cluster daemon + + + + Whether the cluster should stop all active resources + Whether the cluster should stop all active resources + + + + Whether to stop resources that were removed from the configuration + Whether to stop resources that were removed from the configuration + + + + Whether to cancel recurring actions removed from the configuration + Whether to cancel recurring actions removed from the configuration + + + + + Values other than default are poorly tested and potentially dangerous. + Whether to remove stopped resources from the executor + + + + Zero to disable, -1 to store unlimited. + The number of scheduler inputs resulting in errors to save + + + + Zero to disable, -1 to store unlimited. + The number of scheduler inputs resulting in warnings to save + + + + Zero to disable, -1 to store unlimited. + The number of scheduler inputs without errors or warnings to save + + + + Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". + How cluster should react to node health attributes + + + + + Only used when "node-health-strategy" is set to "progressive". + Base health score assigned to a node + + + + Only used when "node-health-strategy" is set to "custom" or "progressive". + The score to use for a node health attribute whose value is "green" + + + + Only used when "node-health-strategy" is set to "custom" or "progressive". + The score to use for a node health attribute whose value is "yellow" + + + + Only used when "node-health-strategy" is set to "custom" or "progressive". + The score to use for a node health attribute whose value is "red" + + + + How the cluster should allocate resources to nodes + How the cluster should allocate resources to nodes + + + + + + + +=#=#=#= End test: List all available cluster options (XML) - OK (0) =#=#=#= +* Passed: crm_attribute - List all available cluster options (XML) =#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#= @@ -33,7 +937,7 @@ crm_attribute: Error performing operation: No such device or address - + @@ -44,14 +948,230 @@ crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Configure something before erasing - OK (0) =#=#=#= * Passed: crm_attribute - Configure something before erasing +=#=#=#= Begin test: Test '++' XML attribute update syntax =#=#=#= +=#=#=#= Current cib after: Test '++' XML attribute update syntax =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '++' XML attribute update syntax - OK (0) =#=#=#= +* Passed: cibadmin - Test '++' XML attribute update syntax +=#=#=#= Begin test: Test '+=' XML attribute update syntax =#=#=#= +=#=#=#= Current cib after: Test '+=' XML attribute update syntax =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '+=' XML attribute update syntax - OK (0) =#=#=#= +* Passed: cibadmin - Test '+=' XML attribute update syntax +=#=#=#= Begin test: Test '++' nvpair value update syntax =#=#=#= +=#=#=#= Current cib after: Test '++' nvpair value update syntax =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '++' nvpair value update syntax - OK (0) =#=#=#= +* Passed: crm_attribute - Test '++' nvpair value update syntax +=#=#=#= Begin test: Test '++' nvpair value update syntax (XML) =#=#=#= + + + +=#=#=#= Current cib after: Test '++' nvpair value update syntax (XML) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '++' nvpair value update syntax (XML) - OK (0) =#=#=#= +* Passed: crm_attribute - Test '++' nvpair value update syntax (XML) +=#=#=#= Begin test: Test '+=' nvpair value update syntax =#=#=#= +=#=#=#= Current cib after: Test '+=' nvpair value update syntax =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '+=' nvpair value update syntax - OK (0) =#=#=#= +* Passed: crm_attribute - Test '+=' nvpair value update syntax +=#=#=#= Begin test: Test '+=' nvpair value update syntax (XML) =#=#=#= + + + +=#=#=#= Current cib after: Test '+=' nvpair value update syntax (XML) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '+=' nvpair value update syntax (XML) - OK (0) =#=#=#= +* Passed: crm_attribute - Test '+=' nvpair value update syntax (XML) +=#=#=#= Begin test: Test '++' XML attribute update syntax (--score not set) =#=#=#= +=#=#=#= Current cib after: Test '++' XML attribute update syntax (--score not set) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '++' XML attribute update syntax (--score not set) - OK (0) =#=#=#= +* Passed: cibadmin - Test '++' XML attribute update syntax (--score not set) +=#=#=#= Begin test: Test '+=' XML attribute update syntax (--score not set) =#=#=#= +=#=#=#= Current cib after: Test '+=' XML attribute update syntax (--score not set) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '+=' XML attribute update syntax (--score not set) - OK (0) =#=#=#= +* Passed: cibadmin - Test '+=' XML attribute update syntax (--score not set) +=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) =#=#=#= +=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) - OK (0) =#=#=#= +* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set) +=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#= + + + +=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#= +* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set) (XML) +=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) =#=#=#= +=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) - OK (0) =#=#=#= +* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set) +=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#= + + + +=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#= +* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#= Begin test: Require --force for CIB erasure =#=#=#= cibadmin: The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed. =#=#=#= Current cib after: Require --force for CIB erasure =#=#=#= - + - + @@ -177,7 +1297,7 @@ cibadmin: The supplied command is considered dangerous. To prevent accidental de =#=#=#= Begin test: Create operation should fail =#=#=#= Call failed: File exists - + @@ -461,8 +1581,8 @@ Revised Cluster Status: =#=#=#= End test: Create second node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Create second node attribute =#=#=#= Begin test: Query node attributes by pattern =#=#=#= -scope=nodes name=ram value=1024M -scope=nodes name=rattr value=XYZ +scope=nodes name=ram value=1024M +scope=nodes name=rattr value=XYZ =#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Query node attributes by pattern =#=#=#= Begin test: Update node attributes by pattern =#=#=#= @@ -644,8 +1764,8 @@ Current cluster status: =#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a second transient node attribute =#=#=#= Begin test: Query transient node attributes by pattern =#=#=#= -scope=status name=fail-count-foo value=3 -scope=status name=fail-count-bar value=5 +scope=status name=fail-count-foo value=3 +scope=status name=fail-count-bar value=5 =#=#=#= End test: Query transient node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Query transient node attributes by pattern =#=#=#= Begin test: Update transient node attributes by pattern =#=#=#= @@ -757,7 +1877,7 @@ crm_attribute: Error: must specify attribute name or pattern to delete =#=#=#= End test: Set a utilization node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a utilization node attribute =#=#=#= Begin test: Query utilization node attribute =#=#=#= -scope=nodes name=cpu value=1 +scope=nodes name=cpu value=1 =#=#=#= End test: Query utilization node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query utilization node attribute =#=#=#= Begin test: Digest calculation =#=#=#= @@ -832,7 +1952,7 @@ Call failed: Update was older than existing configuration =#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#= * Passed: cibadmin - Replace operation should fail =#=#=#= Begin test: Default standby value =#=#=#= -scope=status name=standby value=off +scope=status name=standby value=off =#=#=#= Current cib after: Default standby value =#=#=#= @@ -904,7 +2024,7 @@ scope=status name=standby value=off =#=#=#= End test: Set standby status - OK (0) =#=#=#= * Passed: crm_standby - Set standby status =#=#=#= Begin test: Query standby value =#=#=#= -scope=nodes name=standby value=true +scope=nodes name=standby value=true =#=#=#= Current cib after: Query standby value =#=#=#= @@ -1020,17 +2140,835 @@ crm_resource: non-option ARGV-elements: =#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource run with extra arguments -=#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#= -crm_resource: --resource cannot be used with --class, --agent, and --provider -=#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#= -* Passed: crm_resource - crm_resource given both -r and resource config -=#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#= -crm_resource: --class, --agent, and --provider can only be used with --validate and --force-* -=#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#= -* Passed: crm_resource - crm_resource given resource config with invalid action -=#=#=#= Begin test: Create a resource meta attribute =#=#=#= -unpack_resources error: Resource start-up disabled since no STONITH resources have been defined -unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option +=#=#=#= Begin test: List all available resource options (invalid type) =#=#=#= +crm_resource: Error parsing option --list-options +=#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#= +* Passed: crm_resource - List all available resource options (invalid type) +=#=#=#= Begin test: List all available resource options (invalid type) (XML) =#=#=#= +crm_resource: Error parsing option --list-options +=#=#=#= End test: List all available resource options (invalid type) (XML) - Incorrect usage (64) =#=#=#= +* Passed: crm_resource - List all available resource options (invalid type) (XML) +=#=#=#= Begin test: List non-advanced primitive meta-attributes =#=#=#= +Primitive meta-attributes + +Meta-attributes applicable to primitive resources + + * priority: Resource assignment priority + * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. + * Possible values: score (default: ) + + * critical: Default value for influence in colocation constraints + * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. + * Possible values: boolean (default: ) + + * target-role: State the cluster should attempt to keep this resource in + * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". + * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted" + + * is-managed: Whether the cluster is allowed to actively change the resource's state + * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. + * Possible values: boolean (default: ) + + * maintenance: If true, the cluster will not schedule any actions involving the resource + * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. + * Possible values: boolean (default: ) + + * resource-stickiness: Score to add to the current node when a resource is already active + * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. + * Possible values: score (no default) + + * requires: Conditions under which the resource can be started + * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". + * Possible values: "nothing", "quorum", "fencing", "unfencing" + + * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there. + * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. + * Possible values: score (default: ) + + * failure-timeout: Number of seconds before acting as if a failure had not occurred + * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. + * Possible values: duration (default: ) + + * multiple-active: What to do if the cluster finds the resource active on more than one node + * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) + * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected" + + * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved + * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. + * Possible values: boolean (no default) + + * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it + * Possible values: boolean (default: ) + + * container-attribute-target: Where to check user-defined node attributes + * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). + * Possible values: string (no default) + + * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any + * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. + * Possible values: string (no default) + + * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote + * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. + * Possible values: string (no default) + + * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection + * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. + * Possible values: port (default: ) + + * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. + * Possible values: timeout (default: ) + + * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). + * Possible values: boolean (default: ) +=#=#=#= End test: List non-advanced primitive meta-attributes - OK (0) =#=#=#= +* Passed: crm_resource - List non-advanced primitive meta-attributes +=#=#=#= Begin test: List non-advanced primitive meta-attributes (XML) (shows all) =#=#=#= + + + 1.1 + Meta-attributes applicable to primitive resources + Primitive meta-attributes + + + If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. + Resource assignment priority + + + + Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. + Default value for influence in colocation constraints + + + + "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". + State the cluster should attempt to keep this resource in + + + + + If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. + Whether the cluster is allowed to actively change the resource's state + + + + If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. + If true, the cluster will not schedule any actions involving the resource + + + + Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. + Score to add to the current node when a resource is already active + + + + Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". + Conditions under which the resource can be started + + + + + Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. + Number of failures on a node before the resource becomes ineligible to run there. + + + + Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. + Number of seconds before acting as if a failure had not occurred + + + + What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) + What to do if the cluster finds the resource active on more than one node + + + + + Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. + Whether the cluster should try to "live migrate" this resource when it needs to be moved + + + + Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it + Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it + + + + Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). + Where to check user-defined node attributes + + + + Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. + Name of the Pacemaker Remote guest node this resource is associated with, if any + + + + If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. + If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote + + + + If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. + If remote-node is specified, port on the guest used for its Pacemaker Remote connection + + + + If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. + If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. + + + + If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). + If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). + + + + + + +=#=#=#= End test: List non-advanced primitive meta-attributes (XML) (shows all) - OK (0) =#=#=#= +* Passed: crm_resource - List non-advanced primitive meta-attributes (XML) (shows all) +=#=#=#= Begin test: List all available primitive meta-attributes =#=#=#= +Primitive meta-attributes + +Meta-attributes applicable to primitive resources + + * priority: Resource assignment priority + * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. + * Possible values: score (default: ) + + * critical: Default value for influence in colocation constraints + * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. + * Possible values: boolean (default: ) + + * target-role: State the cluster should attempt to keep this resource in + * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". + * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted" + + * is-managed: Whether the cluster is allowed to actively change the resource's state + * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. + * Possible values: boolean (default: ) + + * maintenance: If true, the cluster will not schedule any actions involving the resource + * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. + * Possible values: boolean (default: ) + + * resource-stickiness: Score to add to the current node when a resource is already active + * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. + * Possible values: score (no default) + + * requires: Conditions under which the resource can be started + * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". + * Possible values: "nothing", "quorum", "fencing", "unfencing" + + * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there. + * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. + * Possible values: score (default: ) + + * failure-timeout: Number of seconds before acting as if a failure had not occurred + * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. + * Possible values: duration (default: ) + + * multiple-active: What to do if the cluster finds the resource active on more than one node + * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) + * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected" + + * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved + * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. + * Possible values: boolean (no default) + + * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it + * Possible values: boolean (default: ) + + * container-attribute-target: Where to check user-defined node attributes + * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). + * Possible values: string (no default) + + * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any + * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. + * Possible values: string (no default) + + * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote + * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. + * Possible values: string (no default) + + * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection + * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. + * Possible values: port (default: ) + + * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. + * Possible values: timeout (default: ) + + * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). + * Possible values: boolean (default: ) +=#=#=#= End test: List all available primitive meta-attributes - OK (0) =#=#=#= +* Passed: crm_resource - List all available primitive meta-attributes +=#=#=#= Begin test: List all available primitive meta-attributes (XML) =#=#=#= + + + 1.1 + Meta-attributes applicable to primitive resources + Primitive meta-attributes + + + If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. + Resource assignment priority + + + + Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. + Default value for influence in colocation constraints + + + + "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". + State the cluster should attempt to keep this resource in + + + + + If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. + Whether the cluster is allowed to actively change the resource's state + + + + If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. + If true, the cluster will not schedule any actions involving the resource + + + + Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. + Score to add to the current node when a resource is already active + + + + Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". + Conditions under which the resource can be started + + + + + Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. + Number of failures on a node before the resource becomes ineligible to run there. + + + + Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. + Number of seconds before acting as if a failure had not occurred + + + + What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) + What to do if the cluster finds the resource active on more than one node + + + + + Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. + Whether the cluster should try to "live migrate" this resource when it needs to be moved + + + + Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it + Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it + + + + Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). + Where to check user-defined node attributes + + + + Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. + Name of the Pacemaker Remote guest node this resource is associated with, if any + + + + If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. + If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote + + + + If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. + If remote-node is specified, port on the guest used for its Pacemaker Remote connection + + + + If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. + If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. + + + + If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). + If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). + + + + + + +=#=#=#= End test: List all available primitive meta-attributes (XML) - OK (0) =#=#=#= +* Passed: crm_resource - List all available primitive meta-attributes (XML) +=#=#=#= Begin test: List non-advanced fencing parameters =#=#=#= +Fencing resource common parameters + +Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. + + * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names. + * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. + * Possible values: string (no default) + + * pcmk_host_list: Nodes targeted by this device + * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. + * Possible values: string (no default) + + * pcmk_host_check: How to determine which nodes can be targeted by the device + * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" + * Possible values: "dynamic-list", "static-list", "status", "none" + + * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions. + * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. + * Possible values: duration (default: ) + + * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value. + * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. + * Possible values: string (default: ) + + * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device + * Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel. + * Possible values: integer (default: ) +=#=#=#= End test: List non-advanced fencing parameters - OK (0) =#=#=#= +* Passed: crm_resource - List non-advanced fencing parameters +=#=#=#= Begin test: List non-advanced fencing parameters (XML) (shows all) =#=#=#= + + + 1.1 + Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. + Fencing resource common parameters + + + Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters. + An alternate parameter to supply instead of 'port' + + + + For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. + A mapping of node names to port numbers for devices that do not support node names. + + + + Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. + Nodes targeted by this device + + + + Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" + How to determine which nodes can be targeted by the device + + + + + Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. + Enable a delay of no more than the time specified before executing fencing actions. + + + + This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. + Enable a base delay for fencing actions and specify base delay value. + + + + Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel. + The maximum number of actions can be performed in parallel on this device + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. + An alternate command to run instead of 'reboot' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. + Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. + The maximum number of times to try the 'reboot' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. + An alternate command to run instead of 'off' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. + Specify an alternate timeout to use for 'off' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. + The maximum number of times to try the 'off' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. + An alternate command to run instead of 'on' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. + Specify an alternate timeout to use for 'on' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. + The maximum number of times to try the 'on' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. + An alternate command to run instead of 'list' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. + Specify an alternate timeout to use for 'list' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. + The maximum number of times to try the 'list' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. + An alternate command to run instead of 'monitor' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. + Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. + The maximum number of times to try the 'monitor' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. + An alternate command to run instead of 'status' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. + Specify an alternate timeout to use for 'status' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. + The maximum number of times to try the 'status' command within the timeout period + + + + + + +=#=#=#= End test: List non-advanced fencing parameters (XML) (shows all) - OK (0) =#=#=#= +* Passed: crm_resource - List non-advanced fencing parameters (XML) (shows all) +=#=#=#= Begin test: List all available fencing parameters =#=#=#= +Fencing resource common parameters + +Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. + + * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names. + * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. + * Possible values: string (no default) + + * pcmk_host_list: Nodes targeted by this device + * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. + * Possible values: string (no default) + + * pcmk_host_check: How to determine which nodes can be targeted by the device + * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" + * Possible values: "dynamic-list", "static-list", "status", "none" + + * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions. + * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. + * Possible values: duration (default: ) + + * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value. + * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. + * Possible values: string (default: ) + + * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device + * Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel. + * Possible values: integer (default: ) + + * ADVANCED OPTIONS: + + * pcmk_host_argument: An alternate parameter to supply instead of 'port' + * Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters. + * Possible values: string (default: ) + + * pcmk_reboot_action: An alternate command to run instead of 'reboot' + * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. + * Possible values: string (default: ) + + * pcmk_reboot_timeout: Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout + * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. + * Possible values: timeout (default: ) + + * pcmk_reboot_retries: The maximum number of times to try the 'reboot' command within the timeout period + * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. + * Possible values: integer (default: ) + + * pcmk_off_action: An alternate command to run instead of 'off' + * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. + * Possible values: string (default: ) + + * pcmk_off_timeout: Specify an alternate timeout to use for 'off' actions instead of stonith-timeout + * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. + * Possible values: timeout (default: ) + + * pcmk_off_retries: The maximum number of times to try the 'off' command within the timeout period + * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. + * Possible values: integer (default: ) + + * pcmk_on_action: An alternate command to run instead of 'on' + * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. + * Possible values: string (default: ) + + * pcmk_on_timeout: Specify an alternate timeout to use for 'on' actions instead of stonith-timeout + * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. + * Possible values: timeout (default: ) + + * pcmk_on_retries: The maximum number of times to try the 'on' command within the timeout period + * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. + * Possible values: integer (default: ) + + * pcmk_list_action: An alternate command to run instead of 'list' + * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. + * Possible values: string (default: ) + + * pcmk_list_timeout: Specify an alternate timeout to use for 'list' actions instead of stonith-timeout + * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. + * Possible values: timeout (default: ) + + * pcmk_list_retries: The maximum number of times to try the 'list' command within the timeout period + * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. + * Possible values: integer (default: ) + + * pcmk_monitor_action: An alternate command to run instead of 'monitor' + * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. + * Possible values: string (default: ) + + * pcmk_monitor_timeout: Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout + * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. + * Possible values: timeout (default: ) + + * pcmk_monitor_retries: The maximum number of times to try the 'monitor' command within the timeout period + * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. + * Possible values: integer (default: ) + + * pcmk_status_action: An alternate command to run instead of 'status' + * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. + * Possible values: string (default: ) + + * pcmk_status_timeout: Specify an alternate timeout to use for 'status' actions instead of stonith-timeout + * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. + * Possible values: timeout (default: ) + + * pcmk_status_retries: The maximum number of times to try the 'status' command within the timeout period + * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. + * Possible values: integer (default: ) +=#=#=#= End test: List all available fencing parameters - OK (0) =#=#=#= +* Passed: crm_resource - List all available fencing parameters +=#=#=#= Begin test: List all available fencing parameters (XML) =#=#=#= + + + 1.1 + Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. + Fencing resource common parameters + + + Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters. + An alternate parameter to supply instead of 'port' + + + + For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. + A mapping of node names to port numbers for devices that do not support node names. + + + + Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. + Nodes targeted by this device + + + + Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" + How to determine which nodes can be targeted by the device + + + + + Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. + Enable a delay of no more than the time specified before executing fencing actions. + + + + This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. + Enable a base delay for fencing actions and specify base delay value. + + + + Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel. + The maximum number of actions can be performed in parallel on this device + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. + An alternate command to run instead of 'reboot' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. + Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. + The maximum number of times to try the 'reboot' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. + An alternate command to run instead of 'off' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. + Specify an alternate timeout to use for 'off' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. + The maximum number of times to try the 'off' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. + An alternate command to run instead of 'on' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. + Specify an alternate timeout to use for 'on' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. + The maximum number of times to try the 'on' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. + An alternate command to run instead of 'list' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. + Specify an alternate timeout to use for 'list' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. + The maximum number of times to try the 'list' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. + An alternate command to run instead of 'monitor' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. + Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. + The maximum number of times to try the 'monitor' command within the timeout period + + + + Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. + An alternate command to run instead of 'status' + + + + Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. + Specify an alternate timeout to use for 'status' actions instead of stonith-timeout + + + + Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. + The maximum number of times to try the 'status' command within the timeout period + + + + + + +=#=#=#= End test: List all available fencing parameters (XML) - OK (0) =#=#=#= +* Passed: crm_resource - List all available fencing parameters (XML) +=#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#= +crm_resource: --resource cannot be used with --class, --agent, and --provider +=#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#= +* Passed: crm_resource - crm_resource given both -r and resource config +=#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#= +crm_resource: --class, --agent, and --provider can only be used with --validate and --force-* +=#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#= +* Passed: crm_resource - crm_resource given resource config with invalid action +=#=#=#= Begin test: Create a resource meta attribute =#=#=#= +unpack_resources error: Resource start-up disabled since no STONITH resources have been defined +unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= @@ -1166,6 +3104,13 @@ unpack_resources error: Resource start-up disabled since no STONITH resources h unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity + + + + + + + =#=#=#= End test: Create another resource meta attribute - OK (0) =#=#=#= @@ -2020,12 +3965,321 @@ Revised Cluster Status: -=#=#=#= End test: Bring resources online - OK (0) =#=#=#= -* Passed: crm_simulate - Bring resources online -=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= -crm_resource: Error performing operation: Requested item already exists -=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= - +=#=#=#= End test: Bring resources online - OK (0) =#=#=#= +* Passed: crm_simulate - Bring resources online +=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= +crm_resource: Error performing operation: Requested item already exists +=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= +* Passed: crm_resource - Try to move a resource to its existing location +=#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= +crm_resource: Resource 'xyz' not found +Error performing operation: No such object +=#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= +* Passed: crm_resource - Try to move a resource that doesn't exist +=#=#=#= Begin test: Move a resource from its existing location =#=#=#= +WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. + This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. + This will be the case even if node1 is the last node in the cluster +=#=#=#= Current cib after: Move a resource from its existing location =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= +* Passed: crm_resource - Move a resource from its existing location +=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= +Removing constraint: cli-ban-dummy-on-node1 +=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#= +* Passed: crm_resource - Clear out constraints generated by --move +=#=#=#= Begin test: Default ticket granted state =#=#=#= +false +=#=#=#= Current cib after: Default ticket granted state =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Default ticket granted state - OK (0) =#=#=#= +* Passed: crm_ticket - Default ticket granted state +=#=#=#= Begin test: Set ticket granted state =#=#=#= +=#=#=#= Current cib after: Set ticket granted state =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Set ticket granted state - OK (0) =#=#=#= +* Passed: crm_ticket - Set ticket granted state +=#=#=#= Begin test: List ticket IDs =#=#=#= +ticketA +=#=#=#= End test: List ticket IDs - OK (0) =#=#=#= +* Passed: crm_ticket - List ticket IDs +=#=#=#= Begin test: List ticket IDs, outputting in XML =#=#=#= + + + + + + +=#=#=#= End test: List ticket IDs, outputting in XML - OK (0) =#=#=#= +* Passed: crm_ticket - List ticket IDs, outputting in XML +=#=#=#= Begin test: Query ticket state =#=#=#= +State XML: + + +=#=#=#= End test: Query ticket state - OK (0) =#=#=#= +* Passed: crm_ticket - Query ticket state +=#=#=#= Begin test: Query ticket state, outputting as xml =#=#=#= + + + + + + +=#=#=#= End test: Query ticket state, outputting as xml - OK (0) =#=#=#= +* Passed: crm_ticket - Query ticket state, outputting as xml +=#=#=#= Begin test: Query ticket granted state =#=#=#= +false +=#=#=#= Current cib after: Query ticket granted state =#=#=#= + @@ -2072,21 +4326,27 @@ crm_resource: Error performing operation: Requested item already exists + + + -=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= -* Passed: crm_resource - Try to move a resource to its existing location -=#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= -crm_resource: Resource 'xyz' not found -Error performing operation: No such object -=#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= -* Passed: crm_resource - Try to move a resource that doesn't exist -=#=#=#= Begin test: Move a resource from its existing location =#=#=#= -WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. - This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool - This will be the case even if node1 is the last node in the cluster -=#=#=#= Current cib after: Move a resource from its existing location =#=#=#= - +=#=#=#= End test: Query ticket granted state - OK (0) =#=#=#= +* Passed: crm_ticket - Query ticket granted state +=#=#=#= Begin test: Query ticket granted state, outputting as xml =#=#=#= + + + + + + + + +=#=#=#= End test: Query ticket granted state, outputting as xml - OK (0) =#=#=#= +* Passed: crm_ticket - Query ticket granted state, outputting as xml +=#=#=#= Begin test: Delete ticket granted state =#=#=#= +=#=#=#= Current cib after: Delete ticket granted state =#=#=#= + @@ -2115,9 +4375,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score - - - + @@ -2135,14 +4393,16 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score + + + -=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= -* Passed: crm_resource - Move a resource from its existing location -=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= -Removing constraint: cli-ban-dummy-on-node1 -=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= - +=#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#= +* Passed: crm_ticket - Delete ticket granted state +=#=#=#= Begin test: Make a ticket standby =#=#=#= +=#=#=#= Current cib after: Make a ticket standby =#=#=#= + @@ -2189,14 +4449,17 @@ Removing constraint: cli-ban-dummy-on-node1 + + + -=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#= -* Passed: crm_resource - Clear out constraints generated by --move -=#=#=#= Begin test: Default ticket granted state =#=#=#= -false -=#=#=#= Current cib after: Default ticket granted state =#=#=#= - +=#=#=#= End test: Make a ticket standby - OK (0) =#=#=#= +* Passed: crm_ticket - Make a ticket standby +=#=#=#= Begin test: Query ticket standby state =#=#=#= +true +=#=#=#= Current cib after: Query ticket standby state =#=#=#= + @@ -2243,13 +4506,16 @@ false + + + -=#=#=#= End test: Default ticket granted state - OK (0) =#=#=#= -* Passed: crm_ticket - Default ticket granted state -=#=#=#= Begin test: Set ticket granted state =#=#=#= -=#=#=#= Current cib after: Set ticket granted state =#=#=#= - +=#=#=#= End test: Query ticket standby state - OK (0) =#=#=#= +* Passed: crm_ticket - Query ticket standby state +=#=#=#= Begin test: Activate a ticket =#=#=#= +=#=#=#= Current cib after: Activate a ticket =#=#=#= + @@ -2297,16 +4563,29 @@ false - + -=#=#=#= End test: Set ticket granted state - OK (0) =#=#=#= -* Passed: crm_ticket - Set ticket granted state -=#=#=#= Begin test: Query ticket granted state =#=#=#= +=#=#=#= End test: Activate a ticket - OK (0) =#=#=#= +* Passed: crm_ticket - Activate a ticket +=#=#=#= Begin test: List ticket details =#=#=#= +ticketA revoked (standby=false) +=#=#=#= End test: List ticket details - OK (0) =#=#=#= +* Passed: crm_ticket - List ticket details +=#=#=#= Begin test: List ticket details, outputting as XML =#=#=#= + + + + + + +=#=#=#= End test: List ticket details, outputting as XML - OK (0) =#=#=#= +* Passed: crm_ticket - List ticket details, outputting as XML +=#=#=#= Begin test: Add a second ticket =#=#=#= false -=#=#=#= Current cib after: Query ticket granted state =#=#=#= - +=#=#=#= Current cib after: Add a second ticket =#=#=#= + @@ -2354,15 +4633,15 @@ false - + -=#=#=#= End test: Query ticket granted state - OK (0) =#=#=#= -* Passed: crm_ticket - Query ticket granted state -=#=#=#= Begin test: Delete ticket granted state =#=#=#= -=#=#=#= Current cib after: Delete ticket granted state =#=#=#= - +=#=#=#= End test: Add a second ticket - OK (0) =#=#=#= +* Passed: crm_ticket - Add a second ticket +=#=#=#= Begin test: Set second ticket granted state =#=#=#= +=#=#=#= Current cib after: Set second ticket granted state =#=#=#= + @@ -2410,15 +4689,31 @@ false - + + -=#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#= -* Passed: crm_ticket - Delete ticket granted state -=#=#=#= Begin test: Make a ticket standby =#=#=#= -=#=#=#= Current cib after: Make a ticket standby =#=#=#= - +=#=#=#= End test: Set second ticket granted state - OK (0) =#=#=#= +* Passed: crm_ticket - Set second ticket granted state +=#=#=#= Begin test: List tickets =#=#=#= +ticketA revoked +ticketB revoked +=#=#=#= End test: List tickets - OK (0) =#=#=#= +* Passed: crm_ticket - List tickets +=#=#=#= Begin test: List tickets, outputting as XML =#=#=#= + + + + + + + +=#=#=#= End test: List tickets, outputting as XML - OK (0) =#=#=#= +* Passed: crm_ticket - List tickets, outputting as XML +=#=#=#= Begin test: Delete second ticket =#=#=#= +=#=#=#= Current cib after: Delete second ticket =#=#=#= + @@ -2466,16 +4761,15 @@ false - + -=#=#=#= End test: Make a ticket standby - OK (0) =#=#=#= -* Passed: crm_ticket - Make a ticket standby -=#=#=#= Begin test: Query ticket standby state =#=#=#= -true -=#=#=#= Current cib after: Query ticket standby state =#=#=#= - +=#=#=#= End test: Delete second ticket - OK (0) =#=#=#= +* Passed: cibadmin - Delete second ticket +=#=#=#= Begin test: Delete ticket standby state =#=#=#= +=#=#=#= Current cib after: Delete ticket standby state =#=#=#= + @@ -2523,15 +4817,15 @@ true - + -=#=#=#= End test: Query ticket standby state - OK (0) =#=#=#= -* Passed: crm_ticket - Query ticket standby state -=#=#=#= Begin test: Activate a ticket =#=#=#= -=#=#=#= Current cib after: Activate a ticket =#=#=#= - +=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#= +* Passed: crm_ticket - Delete ticket standby state +=#=#=#= Begin test: Delete ticket standby state =#=#=#= +=#=#=#= Current cib after: Delete ticket standby state =#=#=#= + @@ -2560,7 +4854,9 @@ true - + + + @@ -2579,15 +4875,37 @@ true - + -=#=#=#= End test: Activate a ticket - OK (0) =#=#=#= -* Passed: crm_ticket - Activate a ticket -=#=#=#= Begin test: Delete ticket standby state =#=#=#= -=#=#=#= Current cib after: Delete ticket standby state =#=#=#= - +=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#= +* Passed: cibadmin - Delete ticket standby state +=#=#=#= Begin test: Query ticket constraints =#=#=#= +Constraints XML: + + +=#=#=#= End test: Query ticket constraints - OK (0) =#=#=#= +* Passed: crm_ticket - Query ticket constraints +=#=#=#= Begin test: Query ticket constraints, outputting as xml =#=#=#= + + + + + + + + + + + + + +=#=#=#= End test: Query ticket constraints, outputting as xml - OK (0) =#=#=#= +* Passed: crm_ticket - Query ticket constraints, outputting as xml +=#=#=#= Begin test: Delete ticket constraint =#=#=#= +=#=#=#= Current cib after: Delete ticket constraint =#=#=#= + @@ -2639,13 +4957,13 @@ true -=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#= -* Passed: crm_ticket - Delete ticket standby state +=#=#=#= End test: Delete ticket constraint - OK (0) =#=#=#= +* Passed: cibadmin - Delete ticket constraint =#=#=#= Begin test: Ban a resource on unknown node =#=#=#= crm_resource: Node 'host1' not found Error performing operation: No such object =#=#=#= Current cib after: Ban a resource on unknown node =#=#=#= - + @@ -2731,7 +5049,7 @@ Revised Cluster Status: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= - + @@ -2813,10 +5131,10 @@ Revised Cluster Status: * Passed: crm_simulate - Create two more nodes and bring them online =#=#=#= Begin test: Ban dummy from node1 =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. - This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool + This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 =#=#=#= - + @@ -2912,7 +5230,7 @@ Locations: =#=#=#= Current cib after: Ban dummy from node2 =#=#=#= - + @@ -3019,7 +5337,7 @@ Revised Cluster Status: * dummy (ocf:pacemaker:Dummy): Started node3 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= - + @@ -3107,7 +5425,7 @@ Revised Cluster Status: =#=#=#= Current cib after: Move dummy to node1 =#=#=#= - + @@ -3193,7 +5511,7 @@ Revised Cluster Status: =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= - + @@ -3285,7 +5603,7 @@ Removing constraint: cli-ban-dummy-on-node2 Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= - + @@ -3333,7 +5651,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= - + @@ -3390,7 +5708,7 @@ Multiple attributes match name=is-managed A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= - + @@ -3442,7 +5760,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= - + @@ -3498,7 +5816,7 @@ Multiple attributes match name=is-managed Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= - + @@ -3555,7 +5873,7 @@ Multiple attributes match name=is-managed A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= - + @@ -3606,7 +5924,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= - + @@ -3654,7 +5972,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= - + @@ -3705,7 +6023,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=te A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= - + @@ -3755,7 +6073,7 @@ Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=i =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= - + @@ -3810,7 +6128,7 @@ Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#= - + @@ -3859,7 +6177,7 @@ Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-ma * Passed: crm_resource - Delete resource parent meta attribute (force) =#=#=#= Begin test: Restore duplicates =#=#=#= =#=#=#= Current cib after: Restore duplicates =#=#=#= - + @@ -3915,7 +6233,7 @@ Multiple attributes match name=is-managed Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= - + @@ -3964,7 +6282,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na * Passed: crm_resource - Delete resource child meta attribute =#=#=#= Begin test: Create the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Create the dummy-group resource group =#=#=#= - + @@ -4018,7 +6336,7 @@ Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed na =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#= - + @@ -4077,7 +6395,7 @@ Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attrib Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#= - + @@ -4137,7 +6455,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr * Passed: crm_resource - Create a resource meta attribute in dummy-group =#=#=#= Begin test: Delete the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#= - + @@ -4187,7 +6505,7 @@ Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-gr =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= - + @@ -4241,7 +6559,7 @@ Migration will take effect until: * Passed: crm_resource - Specify a lifetime when moving a resource =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= - + @@ -4291,10 +6609,10 @@ Migration will take effect until: =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#= Migration will take effect until: WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. - This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool + This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= - + @@ -4350,7 +6668,7 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score =#=#=#= Begin test: Remove expired constraints =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Remove expired constraints =#=#=#= - + @@ -4400,7 +6718,7 @@ Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#= Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#= - + @@ -4447,7 +6765,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_resource - Clear all implicit constraints for dummy =#=#=#= Begin test: Set a node health strategy =#=#=#= =#=#=#= Current cib after: Set a node health strategy =#=#=#= - + @@ -4495,7 +6813,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_attribute - Set a node health strategy =#=#=#= Begin test: Set a node health attribute =#=#=#= =#=#=#= Current cib after: Set a node health attribute =#=#=#= - + @@ -4556,7 +6874,7 @@ Removing constraint: cli-prefer-dummy * Passed: crm_resource - Show why a resource is not running on an unhealthy node =#=#=#= Begin test: Delete a resource =#=#=#= =#=#=#= Current cib after: Delete a resource =#=#=#= - + @@ -4609,8 +6927,8 @@ Removing constraint: cli-prefer-dummy - - + + @@ -4628,7 +6946,7 @@ Removing constraint: cli-prefer-dummy - + @@ -4636,7 +6954,7 @@ Removing constraint: cli-prefer-dummy - + @@ -4645,9 +6963,9 @@ Removing constraint: cli-prefer-dummy - - - + + + @@ -5225,11 +7543,36 @@ Resources colocated with gr2: A new shadow instance was created. To begin using it, enter the following into your shell: export CIB_shadow=cts-cli =#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it =#=#=#= -Set 'prim5' option: id=prim5-meta_attributes-target-role set=prim5-meta_attributes name=target-role value=Stopped -Set 'prim4' option: id=prim4-meta_attributes-target-role set=prim4-meta_attributes name=target-role value=Stopped -Set 'prim10' option: id=prim10-meta_attributes-target-role set=prim10-meta_attributes name=target-role value=Stopped -Set 'prim3' option: id=prim3-meta_attributes-target-role set=prim3-meta_attributes name=target-role value=Stopped -Set 'prim2' option: id=prim2-meta_attributes-target-role set=prim2-meta_attributes name=target-role value=Stopped + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + =#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for primitive and resources colocated with it =#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#= @@ -5238,8 +7581,21 @@ Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attribut =#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for group and resource colocated with it =#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it =#=#=#= -Set 'clone' option: id=clone-meta_attributes-target-role set=clone-meta_attributes name=target-role value=Stopped -Set 'prim9' option: id=prim9-meta_attributes-target-role set=prim9-meta_attributes name=target-role value=Stopped + + + + + + + + + + + + + + + =#=#=#= End test: Set a meta-attribute for clone and resource colocated with it - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for clone and resource colocated with it =#=#=#= Begin test: Show resource digests =#=#=#= @@ -5705,7 +8061,7 @@ Started: [ cluster01 cluster02 ] Promoted: [ cluster02 ] Unpromoted: [ cluster01 ] -Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 +Only 'private' parameters to 1m-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 Original: cluster01 capacity: Original: cluster02 capacity: Original: httpd-bundle-0 capacity: @@ -5774,7 +8130,7 @@ Current cluster status: Performing Requested Modifications: * Injecting ping_monitor_10000@cluster02=1 into the configuration - * Injecting attribute fail-count-ping#monitor_10000=value++ into /node_state '2' + * Injecting attribute fail-count-ping#monitor_10000=1 into /node_state '2' * Injecting attribute last-failure-ping#monitor_10000= into /node_state '2' Transition Summary: @@ -6142,7 +8498,7 @@ crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Update a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#= -scope=status name=master-promotable-rsc value=1 +scope=status name=master-promotable-rsc value=1 =#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating a nonexistent promotable score attribute =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#= @@ -6162,7 +8518,7 @@ scope=status name=master-promotable-rsc value=1 =#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Update an existing promotable score attribute (XML) =#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#= -scope=status name=master-promotable-rsc value=5 +scope=status name=master-promotable-rsc value=5 =#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating an existing promotable score attribute =#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#= @@ -6206,7 +8562,7 @@ crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Update a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Update a promotable score attribute to -INFINITY (XML) =#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY =#=#=#= -scope=status name=master-promotable-rsc value=-INFINITY +scope=status name=master-promotable-rsc value=-INFINITY =#=#=#= End test: Query after updating a promotable score attribute to -INFINITY - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY =#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY (XML) =#=#=#= @@ -6217,7 +8573,7 @@ scope=status name=master-promotable-rsc value=-INFINITY =#=#=#= End test: Query after updating a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY (XML) =#=#=#= Begin test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string =#=#=#= -scope=status name=master-promotable-rsc value=-INFINITY +scope=status name=master-promotable-rsc value=-INFINITY =#=#=#= End test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string - OK (0) =#=#=#= * Passed: crm_attribute - Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string =#=#=#= Begin test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings =#=#=#= @@ -7048,7 +9404,7 @@ Diff: +++ 1.4.1 (null) - + @@ -7667,7 +10023,7 @@ Diff: +++ 0.1.0 (null) -- /cib/status/node_state[@id='1'] -- /cib/status/node_state[@id='httpd-bundle-0'] -- /cib/status/node_state[@id='httpd-bundle-1'] -+ /cib: @crm_feature_set=3.19.0, @num_updates=0, @admin_epoch=0 ++ /cib: @validate-with=pacemaker-X, @num_updates=0, @admin_epoch=0 -- /cib: @cib-last-written, @update-origin, @update-client, @update-user, @have-quorum, @dc-uuid =#=#=#= End test: Get active shadow instance's diff (empty CIB) - Error occurred (1) =#=#=#= * Passed: crm_shadow - Get active shadow instance's diff (empty CIB) @@ -7679,29 +10035,30 @@ Diff: +++ 0.1.0 (null) - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - + + + + - + + @@ -7898,49 +10255,63 @@ crm_shadow: Could not access shadow instance 'cts-cli': No such file or director =#=#=#= End test: Switch to nonexistent shadow instance (force) (XML) - No such object (105) =#=#=#= * Passed: crm_shadow - Switch to nonexistent shadow instance (force) (XML) -=#=#=#= Begin test: Verify a file-specified invalid configuration, outputting as xml =#=#=#= +=#=#=#= Begin test: Verify a file-specified invalid configuration (text output) =#=#=#= +Errors found during check: config not valid +-V may provide more details +=#=#=#= End test: Verify a file-specified invalid configuration (text output) - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify a file-specified invalid configuration (text output) +=#=#=#= Begin test: Verify a file-specified invalid configuration (verbose text output) =#=#=#= +unpack_config warning: Blind faith: not fencing unseen nodes +Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource +Ignoring resource 'test2-clone' because configuration is invalid +CIB did not pass schema validation +Errors found during check: config not valid +=#=#=#= End test: Verify a file-specified invalid configuration (verbose text output) - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify a file-specified invalid configuration (verbose text output) +=#=#=#= Begin test: Verify a file-specified invalid configuration (quiet text output) =#=#=#= +=#=#=#= End test: Verify a file-specified invalid configuration (quiet text output) - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify a file-specified invalid configuration (quiet text output) +=#=#=#= Begin test: Verify a file-specified invalid configuration (XML output) =#=#=#= Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource Ignoring <clone> resource 'test2-clone' because configuration is invalid - crm_verify: Errors found during check: config not valid + CIB did not pass schema validation + Errors found during check: config not valid -=#=#=#= End test: Verify a file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= -* Passed: crm_verify - Verify a file-specified invalid configuration, outputting as xml -=#=#=#= Begin test: Verify another file-specified invalid configuration, outputting as xml =#=#=#= - +=#=#=#= End test: Verify a file-specified invalid configuration (XML output) - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify a file-specified invalid configuration (XML output) +=#=#=#= Begin test: Verify a file-specified invalid configuration (verbose XML output) =#=#=#= +unpack_config warning: Blind faith: not fencing unseen nodes + - Resource start-up disabled since no STONITH resources have been defined - Either configure some or disable STONITH with the stonith-enabled option - NOTE: Clusters with shared data need STONITH to ensure data integrity - Node pcmk-1 is unclean but cannot be fenced - Node pcmk-2 is unclean but cannot be fenced - crm_verify: Errors found during check: config not valid + Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource + Ignoring <clone> resource 'test2-clone' because configuration is invalid + CIB did not pass schema validation + Errors found during check: config not valid -=#=#=#= End test: Verify another file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= -* Passed: crm_verify - Verify another file-specified invalid configuration, outputting as xml -=#=#=#= Begin test: Verbosely verify a file-specified invalid configuration, outputting as xml =#=#=#= -unpack_config warning: Blind faith: not fencing unseen nodes - +=#=#=#= End test: Verify a file-specified invalid configuration (verbose XML output) - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify a file-specified invalid configuration (verbose XML output) +=#=#=#= Begin test: Verify a file-specified invalid configuration (quiet XML output) =#=#=#= + Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource Ignoring <clone> resource 'test2-clone' because configuration is invalid - crm_verify: Errors found during check: config not valid + CIB did not pass schema validation -=#=#=#= End test: Verbosely verify a file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= -* Passed: crm_verify - Verbosely verify a file-specified invalid configuration, outputting as xml -=#=#=#= Begin test: Verbosely verify another file-specified invalid configuration, outputting as xml =#=#=#= -(cluster_status@status.c:113) warning: Fencing and resource management disabled due to lack of quorum - +=#=#=#= End test: Verify a file-specified invalid configuration (quiet XML output) - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify a file-specified invalid configuration (quiet XML output) +=#=#=#= Begin test: Verify another file-specified invalid configuration (XML output) =#=#=#= + Resource start-up disabled since no STONITH resources have been defined @@ -7948,12 +10319,13 @@ unpack_config warning: Blind faith: not fencing unseen nodes NOTE: Clusters with shared data need STONITH to ensure data integrity Node pcmk-1 is unclean but cannot be fenced Node pcmk-2 is unclean but cannot be fenced - crm_verify: Errors found during check: config not valid + CIB did not pass schema validation + Errors found during check: config not valid -=#=#=#= End test: Verbosely verify another file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#= -* Passed: crm_verify - Verbosely verify another file-specified invalid configuration, outputting as xml +=#=#=#= End test: Verify another file-specified invalid configuration (XML output) - Invalid configuration (78) =#=#=#= +* Passed: crm_verify - Verify another file-specified invalid configuration (XML output) =#=#=#= Begin test: Verify a file-specified valid configuration, outputting as xml =#=#=#= diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp index d1aeeb5..00ee754 100644 --- a/cts/cli/regression.upgrade.exp +++ b/cts/cli/regression.upgrade.exp @@ -57,47 +57,28 @@ A new shadow instance was created. To begin using it, enter the following into y =#=#=#= End test: Configure the initial resource - OK (0) =#=#=#= * Passed: cibadmin - Configure the initial resource =#=#=#= Begin test: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#= -update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) -update_validation debug: Upgrading pacemaker-2.10-style configuration to pacemaker-3.0 with upgrade-2.10.xsl -apply_upgrade debug: Upgrading pacemaker-2.10-style configuration, pre-upgrade phase with upgrade-2.10-enter.xsl -apply_upgrade debug: Upgrading pacemaker-2.10-style configuration, main phase with upgrade-2.10.xsl +pcmk__update_schema debug: Schema pacemaker-2.10 validates +apply_upgrade debug: Upgrading schema from pacemaker-2.10 to pacemaker-3.0: applying pre-upgrade XSL transform upgrade-2.10-enter +apply_upgrade debug: Upgrading schema from pacemaker-2.10 to pacemaker-3.0: applying upgrade XSL transform upgrade-2.10 INFO: Resources-operation instance_attributes: mySmartFuse-monitor-inputpower (rsc=mySmartFuse, meta=mySmartFuse-inputpower-instanceparams): dropping requires INFO: Resources-operation instance_attributes: ... only start/promote operation taken into account INFO: Resources-operation instance_attributes: mySmartFuse-monitor-outputpower (rsc=mySmartFuse, meta=mySmartFuse-outputpower-instanceparams): dropping requires INFO: Resources-operation instance_attributes: ... only start/promote operation taken into account -apply_upgrade debug: Upgrading pacemaker-2.10-style configuration, post-upgrade phase with upgrade-2.10-leave.xsl +apply_upgrade debug: Upgrading schema from pacemaker-2.10 to pacemaker-3.0: applying post-upgrade XSL transform upgrade-2.10-leave DEBUG: instance_attributes: original element pointed to with @id-ref (mySmartFuse-outputpower-instanceparams) disappeared during upgrade -update_validation info: Transformation upgrade-2.10.xsl successful -update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) -update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1 -update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.1 -update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2 -update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.2 -update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 -update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.3 -update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.4 -update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 -update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.5 -update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 -update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.6 -update_validation debug: pacemaker-3.6-style configuration is also valid for pacemaker-3.7 -update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.7 -update_validation debug: pacemaker-3.7-style configuration is also valid for pacemaker-3.8 -update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.8 -update_validation debug: pacemaker-3.8-style configuration is also valid for pacemaker-3.9 -update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.9 -update_validation trace: Stopping at pacemaker-3.9 -update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.9 +apply_upgrade info: Schema upgrade from pacemaker-2.10 to pacemaker-3.0 succeeded +pcmk__update_schema debug: Schema pacemaker-3.0 validates +pcmk__update_schema debug: Schema pacemaker-3.1 validates +pcmk__update_schema debug: Schema pacemaker-3.2 validates +pcmk__update_schema debug: Schema pacemaker-3.3 validates +pcmk__update_schema debug: Schema pacemaker-3.4 validates +pcmk__update_schema debug: Schema pacemaker-3.5 validates +pcmk__update_schema debug: Schema pacemaker-3.6 validates +pcmk__update_schema debug: Schema pacemaker-3.7 validates +pcmk__update_schema debug: Schema pacemaker-3.8 validates +pcmk__update_schema debug: Schema pacemaker-3.9 validates +pcmk__update_schema debug: Schema pacemaker-3.10 validates +pcmk__update_schema info: Transformed the configuration schema to pacemaker-3.10 =#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#= diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp index 3a62ab4..f020b20 100644 --- a/cts/cli/regression.validity.exp +++ b/cts/cli/regression.validity.exp @@ -2,21 +2,6 @@ Created new pacemaker configuration A new shadow instance was created. To begin using it, enter the following into your shell: export CIB_shadow=cts-cli =#=#=#= Begin test: Try to make resulting CIB invalid (enum violation) =#=#=#= - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 Call failed: Update does not conform to the configured schema =#=#=#= Current cib after: Try to make resulting CIB invalid (enum violation) =#=#=#= @@ -36,117 +21,82 @@ Call failed: Update does not conform to the configured schema =#=#=#= End test: Try to make resulting CIB invalid (enum violation) - Invalid configuration (78) =#=#=#= * Passed: cibadmin - Try to make resulting CIB invalid (enum violation) =#=#=#= Begin test: Run crm_simulate with invalid CIB (enum violation) =#=#=#= -update_validation debug: Testing 'pacemaker-1.2' validation (1 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-1.2 validation failed -update_validation debug: Testing 'pacemaker-1.3' validation (2 of X) +pcmk__update_schema debug: Schema pacemaker-1.2 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-1.3 validation failed -update_validation debug: Testing 'pacemaker-2.0' validation (3 of X) +pcmk__update_schema debug: Schema pacemaker-1.3 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.0 validation failed -update_validation debug: Testing 'pacemaker-2.1' validation (4 of X) +pcmk__update_schema debug: Schema pacemaker-2.0 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.1 validation failed -update_validation debug: Testing 'pacemaker-2.2' validation (5 of X) +pcmk__update_schema debug: Schema pacemaker-2.1 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.2 validation failed -update_validation debug: Testing 'pacemaker-2.3' validation (6 of X) +pcmk__update_schema debug: Schema pacemaker-2.2 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.3 validation failed -update_validation debug: Testing 'pacemaker-2.4' validation (7 of X) +pcmk__update_schema debug: Schema pacemaker-2.3 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.4 validation failed -update_validation debug: Testing 'pacemaker-2.5' validation (8 of X) +pcmk__update_schema debug: Schema pacemaker-2.4 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.5 validation failed -update_validation debug: Testing 'pacemaker-2.6' validation (9 of X) +pcmk__update_schema debug: Schema pacemaker-2.5 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.6 validation failed -update_validation debug: Testing 'pacemaker-2.7' validation (10 of X) +pcmk__update_schema debug: Schema pacemaker-2.6 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.7 validation failed -update_validation debug: Testing 'pacemaker-2.8' validation (11 of X) +pcmk__update_schema debug: Schema pacemaker-2.7 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.8 validation failed -update_validation debug: Testing 'pacemaker-2.9' validation (12 of X) +pcmk__update_schema debug: Schema pacemaker-2.8 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.9 validation failed -update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) +pcmk__update_schema debug: Schema pacemaker-2.9 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-2.10 validation failed -update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) +pcmk__update_schema debug: Schema pacemaker-2.10 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.0 validation failed -update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) +pcmk__update_schema debug: Schema pacemaker-3.0 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.1 validation failed -update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) +pcmk__update_schema debug: Schema pacemaker-3.1 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.2 validation failed -update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) +pcmk__update_schema debug: Schema pacemaker-3.2 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.3 validation failed -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) +pcmk__update_schema debug: Schema pacemaker-3.3 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.4 validation failed -update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) +pcmk__update_schema debug: Schema pacemaker-3.4 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.5 validation failed -update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) +pcmk__update_schema debug: Schema pacemaker-3.5 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.6 validation failed -update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) +pcmk__update_schema debug: Schema pacemaker-3.6 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.7 validation failed -update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) +pcmk__update_schema debug: Schema pacemaker-3.7 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.8 validation failed -update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) +pcmk__update_schema debug: Schema pacemaker-3.8 does not validate element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.9 validation failed -Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.9 +pcmk__update_schema debug: Schema pacemaker-3.9 does not validate +element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order +element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order +pcmk__update_schema debug: Schema pacemaker-3.10 does not validate +Cannot upgrade configuration (claiming pacemaker-1.2 schema) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to the latest =#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation) =#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 Call failed: Update does not conform to the configured schema =#=#=#= Current cib after: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= @@ -166,99 +116,60 @@ Call failed: Update does not conform to the configured schema =#=#=#= End test: Try to make resulting CIB invalid (unrecognized validate-with) - Invalid configuration (78) =#=#=#= * Passed: cibadmin - Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= Begin test: Run crm_simulate with invalid CIB (unrecognized validate-with) =#=#=#= -update_validation debug: Unknown validation schema -update_validation debug: Testing 'pacemaker-1.0' validation (0 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-1.0 validation failed -update_validation debug: Testing 'pacemaker-1.2' validation (1 of X) +pcmk__update_schema debug: Schema pacemaker-1.0 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-1.2 validation failed -update_validation debug: Testing 'pacemaker-1.3' validation (2 of X) +pcmk__update_schema debug: Schema pacemaker-1.2 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-1.3 validation failed -update_validation debug: Testing 'pacemaker-2.0' validation (3 of X) +pcmk__update_schema debug: Schema pacemaker-1.3 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.0 validation failed -update_validation debug: Testing 'pacemaker-2.1' validation (4 of X) +pcmk__update_schema debug: Schema pacemaker-2.0 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.1 validation failed -update_validation debug: Testing 'pacemaker-2.2' validation (5 of X) +pcmk__update_schema debug: Schema pacemaker-2.1 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.2 validation failed -update_validation debug: Testing 'pacemaker-2.3' validation (6 of X) +pcmk__update_schema debug: Schema pacemaker-2.2 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.3 validation failed -update_validation debug: Testing 'pacemaker-2.4' validation (7 of X) +pcmk__update_schema debug: Schema pacemaker-2.3 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.4 validation failed -update_validation debug: Testing 'pacemaker-2.5' validation (8 of X) +pcmk__update_schema debug: Schema pacemaker-2.4 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.5 validation failed -update_validation debug: Testing 'pacemaker-2.6' validation (9 of X) +pcmk__update_schema debug: Schema pacemaker-2.5 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.6 validation failed -update_validation debug: Testing 'pacemaker-2.7' validation (10 of X) +pcmk__update_schema debug: Schema pacemaker-2.6 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.7 validation failed -update_validation debug: Testing 'pacemaker-2.8' validation (11 of X) +pcmk__update_schema debug: Schema pacemaker-2.7 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.8 validation failed -update_validation debug: Testing 'pacemaker-2.9' validation (12 of X) +pcmk__update_schema debug: Schema pacemaker-2.8 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.9 validation failed -update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) +pcmk__update_schema debug: Schema pacemaker-2.9 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-2.10 validation failed -update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) +pcmk__update_schema debug: Schema pacemaker-2.10 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.0 validation failed -update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) +pcmk__update_schema debug: Schema pacemaker-3.0 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.1 validation failed -update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) +pcmk__update_schema debug: Schema pacemaker-3.1 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.2 validation failed -update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) +pcmk__update_schema debug: Schema pacemaker-3.2 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.3 validation failed -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) +pcmk__update_schema debug: Schema pacemaker-3.3 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.4 validation failed -update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) +pcmk__update_schema debug: Schema pacemaker-3.4 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.5 validation failed -update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) +pcmk__update_schema debug: Schema pacemaker-3.5 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.6 validation failed -update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) +pcmk__update_schema debug: Schema pacemaker-3.6 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.7 validation failed -update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) +pcmk__update_schema debug: Schema pacemaker-3.7 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.8 validation failed -update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) +pcmk__update_schema debug: Schema pacemaker-3.8 does not validate element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.9 validation failed -Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.9 +pcmk__update_schema debug: Schema pacemaker-3.9 does not validate +element cib: Relax-NG validity error : Invalid attribute validate-with for element cib +pcmk__update_schema debug: Schema pacemaker-3.10 does not validate +Cannot upgrade configuration (claiming pacemaker-9999.0 schema) to at least pacemaker-3.0 because it does not validate with any schema from the first to the latest =#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with) =#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - 15 - 16 Call failed: Update does not conform to the configured schema =#=#=#= Current cib after: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= @@ -278,75 +189,32 @@ Call failed: Update does not conform to the configured schema =#=#=#= End test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) - Invalid configuration (78) =#=#=#= * Passed: cibadmin - Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= Begin test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#= -update_validation debug: Testing 'pacemaker-1.2' validation (1 of X) element tags: Relax-NG validity error : Element configuration has extra content: tags -update_validation trace: pacemaker-1.2 validation failed -update_validation debug: Testing 'pacemaker-1.3' validation (2 of X) -update_validation debug: pacemaker-1.3-style configuration is also valid for pacemaker-2.0 -update_validation debug: Testing 'pacemaker-2.0' validation (3 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.0 -update_validation debug: pacemaker-2.0-style configuration is also valid for pacemaker-2.1 -update_validation debug: Testing 'pacemaker-2.1' validation (4 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.1 -update_validation debug: pacemaker-2.1-style configuration is also valid for pacemaker-2.2 -update_validation debug: Testing 'pacemaker-2.2' validation (5 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.2 -update_validation debug: pacemaker-2.2-style configuration is also valid for pacemaker-2.3 -update_validation debug: Testing 'pacemaker-2.3' validation (6 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.3 -update_validation debug: pacemaker-2.3-style configuration is also valid for pacemaker-2.4 -update_validation debug: Testing 'pacemaker-2.4' validation (7 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.4 -update_validation debug: pacemaker-2.4-style configuration is also valid for pacemaker-2.5 -update_validation debug: Testing 'pacemaker-2.5' validation (8 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.5 -update_validation debug: pacemaker-2.5-style configuration is also valid for pacemaker-2.6 -update_validation debug: Testing 'pacemaker-2.6' validation (9 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.6 -update_validation debug: pacemaker-2.6-style configuration is also valid for pacemaker-2.7 -update_validation debug: Testing 'pacemaker-2.7' validation (10 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.7 -update_validation debug: pacemaker-2.7-style configuration is also valid for pacemaker-2.8 -update_validation debug: Testing 'pacemaker-2.8' validation (11 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.8 -update_validation debug: pacemaker-2.8-style configuration is also valid for pacemaker-2.9 -update_validation debug: Testing 'pacemaker-2.9' validation (12 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.9 -update_validation debug: pacemaker-2.9-style configuration is also valid for pacemaker-2.10 -update_validation debug: Testing 'pacemaker-2.10' validation (13 of X) -update_validation debug: Configuration valid for schema: pacemaker-2.10 -update_validation debug: pacemaker-2.10-style configuration is also valid for pacemaker-3.0 -update_validation debug: Testing 'pacemaker-3.0' validation (14 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.0 -update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1 -update_validation debug: Testing 'pacemaker-3.1' validation (15 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.1 -update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2 -update_validation debug: Testing 'pacemaker-3.2' validation (16 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.2 -update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 -update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.3 -update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.4 -update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5 -update_validation debug: Testing 'pacemaker-3.5' validation (19 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.5 -update_validation debug: pacemaker-3.5-style configuration is also valid for pacemaker-3.6 -update_validation debug: Testing 'pacemaker-3.6' validation (20 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.6 -update_validation debug: pacemaker-3.6-style configuration is also valid for pacemaker-3.7 -update_validation debug: Testing 'pacemaker-3.7' validation (21 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.7 -update_validation debug: pacemaker-3.7-style configuration is also valid for pacemaker-3.8 -update_validation debug: Testing 'pacemaker-3.8' validation (22 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.8 -update_validation debug: pacemaker-3.8-style configuration is also valid for pacemaker-3.9 -update_validation debug: Testing 'pacemaker-3.9' validation (23 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.9 -update_validation trace: Stopping at pacemaker-3.9 -update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.9 +pcmk__update_schema debug: Schema pacemaker-1.2 does not validate +pcmk__update_schema debug: Schema pacemaker-1.3 validates +pcmk__update_schema debug: Schema pacemaker-2.0 validates +pcmk__update_schema debug: Schema pacemaker-2.1 validates +pcmk__update_schema debug: Schema pacemaker-2.2 validates +pcmk__update_schema debug: Schema pacemaker-2.3 validates +pcmk__update_schema debug: Schema pacemaker-2.4 validates +pcmk__update_schema debug: Schema pacemaker-2.5 validates +pcmk__update_schema debug: Schema pacemaker-2.6 validates +pcmk__update_schema debug: Schema pacemaker-2.7 validates +pcmk__update_schema debug: Schema pacemaker-2.8 validates +pcmk__update_schema debug: Schema pacemaker-2.9 validates +pcmk__update_schema debug: Schema pacemaker-2.10 validates +pcmk__update_schema debug: Schema pacemaker-3.0 validates +pcmk__update_schema debug: Schema pacemaker-3.1 validates +pcmk__update_schema debug: Schema pacemaker-3.2 validates +pcmk__update_schema debug: Schema pacemaker-3.3 validates +pcmk__update_schema debug: Schema pacemaker-3.4 validates +pcmk__update_schema debug: Schema pacemaker-3.5 validates +pcmk__update_schema debug: Schema pacemaker-3.6 validates +pcmk__update_schema debug: Schema pacemaker-3.7 validates +pcmk__update_schema debug: Schema pacemaker-3.8 validates +pcmk__update_schema debug: Schema pacemaker-3.9 validates +pcmk__update_schema debug: Schema pacemaker-3.10 validates +pcmk__update_schema info: Transformed the configuration schema to pacemaker-3.10 unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity @@ -387,7 +255,7 @@ Revised Cluster Status: =#=#=#= End test: Make resulting CIB valid, although without validate-with attribute - OK (0) =#=#=#= * Passed: cibadmin - Make resulting CIB valid, although without validate-with attribute =#=#=#= Begin test: Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#= -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity @@ -460,6 +328,8 @@ element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order +element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order +element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order =#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#= @@ -478,7 +348,9 @@ element rsc_order: Relax-NG validity error : Element constraints has extra conte =#=#=#= End test: Make resulting CIB invalid, and without validate-with attribute - OK (0) =#=#=#= * Passed: cibadmin - Make resulting CIB invalid, and without validate-with attribute =#=#=#= Begin test: Run crm_simulate with invalid CIB, also without validate-with attribute =#=#=#= -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) +validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order +validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order diff --git a/cts/cli/tickets.xml b/cts/cli/tickets.xml new file mode 100644 index 0000000..7130abc --- /dev/null +++ b/cts/cli/tickets.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/cts-attrd.in b/cts/cts-attrd.in index b594ac3..c9a219d 100644 --- a/cts/cts-attrd.in +++ b/cts/cts-attrd.in @@ -196,7 +196,7 @@ class AttributeTests(Tests): test.add_cmd("attrd_updater", "--name DDD -U 555 --set=foo --output-as=xml") test.add_cmd_check_stdout("attrd_updater", "--name DDD -Q --output-as=xml", "name=\"DDD\" value=\"555\"") - test.add_log_pattern("Processed 1 private change for DDD, id=n/a, set=foo") + test.add_log_pattern("Processed 1 private change for DDD (set foo)") def build_multiple_query_tests(self): """ Add tests that set and query an attribute across multiple nodes """ diff --git a/cts/cts-cli.in b/cts/cts-cli.in index f4cb7c3..37dd530 100755 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,6 +1,6 @@ #!@BASH_PATH@ # -# Copyright 2008-2023 the Pacemaker project contributors +# Copyright 2008-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -23,8 +23,8 @@ Options: --help Display this text, then exit -V, --verbose Display any differences from expected output -t 'TEST [...]' Run only specified tests - (default: 'daemons dates error_codes tools crm_mon acls - validity upgrade rules feature_set'). + (default: 'access_render daemons dates error_codes tools + crm_mon acls validity upgrade rules feature_set'). Other tests: agents (must be run in an installed environment). -p DIR Look for executables in DIR (may be specified multiple times) -v, --valgrind Run all commands under valgrind @@ -43,8 +43,8 @@ shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX) num_errors=0 num_passed=0 verbose=0 -tests="daemons dates error_codes tools crm_mon acls validity upgrade rules " -tests="$tests feature_set" +tests="access_render daemons dates error_codes tools crm_mon acls validity" +tests="$tests upgrade rules feature_set" do_save=0 XMLLINT_CMD= VALGRIND_CMD= @@ -550,7 +550,7 @@ function test_crm_mon() { desc="XML output of active unmanaged resource on offline node" cmd="crm_mon -1 --output-as=xml" - test_assert $CRM_EX_OK 0 + test_assert_validate $CRM_EX_OK 0 desc="Brief text output of active unmanaged resource on offline node" cmd="crm_mon -1 --brief" @@ -812,12 +812,84 @@ function test_tools() { cmd="cibadmin -Q" test_assert $CRM_EX_OK + desc="List all available options (invalid type)" + cmd="crm_attribute --list-options=asdf" + test_assert $CRM_EX_USAGE 0 + + desc="List all available options (invalid type) (XML)" + cmd="crm_attribute --list-options=asdf --output-as=xml" + test_assert_validate $CRM_EX_USAGE 0 + + desc="List non-advanced cluster options" + cmd="crm_attribute --list-options=cluster" + test_assert $CRM_EX_OK 0 + + desc="List non-advanced cluster options (XML) (shows all)" + cmd="crm_attribute --list-options=cluster --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="List all available cluster options" + cmd="crm_attribute --list-options=cluster --all" + test_assert $CRM_EX_OK 0 + + desc="List all available cluster options (XML)" + cmd="crm_attribute --list-options=cluster --all --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + desc="Query the value of an attribute that does not exist" cmd="crm_attribute -n ABCD --query --quiet" test_assert $CRM_EX_NOSUCH 0 desc="Configure something before erasing" - cmd="crm_attribute -n cluster-delay -v 60s" + cmd="crm_attribute -n test_attr -v 5" + test_assert $CRM_EX_OK + + desc="Test '++' XML attribute update syntax" + cmd="cibadmin -M --score --xml-text=''" + test_assert $CRM_EX_OK + + desc="Test '+=' XML attribute update syntax" + cmd="cibadmin -M --score --xml-text=''" + test_assert $CRM_EX_OK + + desc="Test '++' nvpair value update syntax" + cmd="crm_attribute -n test_attr -v 'value++' --score" + test_assert $CRM_EX_OK + + desc="Test '++' nvpair value update syntax (XML)" + cmd="crm_attribute -n test_attr -v 'value++' --score --output-as=xml" + test_assert $CRM_EX_OK + + desc="Test '+=' nvpair value update syntax" + cmd="crm_attribute -n test_attr -v 'value+=2' --score" + test_assert $CRM_EX_OK + + desc="Test '+=' nvpair value update syntax (XML)" + cmd="crm_attribute -n test_attr -v 'value+=2' --score --output-as=xml" + test_assert $CRM_EX_OK + + desc="Test '++' XML attribute update syntax (--score not set)" + cmd="cibadmin -M --xml-text=''" + test_assert $CRM_EX_OK + + desc="Test '+=' XML attribute update syntax (--score not set)" + cmd="cibadmin -M --xml-text=''" + test_assert $CRM_EX_OK + + desc="Test '++' nvpair value update syntax (--score not set)" + cmd="crm_attribute -n test_attr -v 'value++'" + test_assert $CRM_EX_OK + + desc="Test '++' nvpair value update syntax (--score not set) (XML)" + cmd="crm_attribute -n test_attr -v 'value++' --output-as=xml" + test_assert $CRM_EX_OK + + desc="Test '+=' nvpair value update syntax (--score not set)" + cmd="crm_attribute -n test_attr -v 'value+=2'" + test_assert $CRM_EX_OK + + desc="Test '+=' nvpair value update syntax (--score not set) (XML)" + cmd="crm_attribute -n test_attr -v 'value+=2' --output-as=xml" test_assert $CRM_EX_OK desc="Require --force for CIB erasure" @@ -989,6 +1061,46 @@ function test_tools() { cmd="crm_resource foo bar" test_assert $CRM_EX_USAGE 0 + desc="List all available resource options (invalid type)" + cmd="crm_resource --list-options=asdf" + test_assert $CRM_EX_USAGE 0 + + desc="List all available resource options (invalid type) (XML)" + cmd="crm_resource --list-options=asdf --output-as=xml" + test_assert_validate $CRM_EX_USAGE 0 + + desc="List non-advanced primitive meta-attributes" + cmd="crm_resource --list-options=primitive" + test_assert $CRM_EX_OK 0 + + desc="List non-advanced primitive meta-attributes (XML) (shows all)" + cmd="crm_resource --list-options=primitive --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="List all available primitive meta-attributes" + cmd="crm_resource --list-options=primitive --all" + test_assert $CRM_EX_OK 0 + + desc="List all available primitive meta-attributes (XML)" + cmd="crm_resource --list-options=primitive --all --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="List non-advanced fencing parameters" + cmd="crm_resource --list-options=fencing" + test_assert $CRM_EX_OK 0 + + desc="List non-advanced fencing parameters (XML) (shows all)" + cmd="crm_resource --list-options=fencing --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="List all available fencing parameters" + cmd="crm_resource --list-options=fencing --all" + test_assert $CRM_EX_OK 0 + + desc="List all available fencing parameters (XML)" + cmd="crm_resource --list-options=fencing --all --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + desc="crm_resource given both -r and resource config" cmd="crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy" test_assert $CRM_EX_USAGE 0 @@ -1039,19 +1151,19 @@ function test_tools() { desc="Set a non-existent attribute for a resource element with output-as=xml" cmd="crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml" - test_assert $CRM_EX_OK + test_assert_validate $CRM_EX_OK desc="Set an existent attribute for a resource element with output-as=xml" cmd="crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml" - test_assert $CRM_EX_OK + test_assert_validate $CRM_EX_OK desc="Delete an existent attribute for a resource element with output-as=xml" cmd="crm_resource -r dummy -d description --element --output-as=xml" - test_assert $CRM_EX_OK + test_assert_validate $CRM_EX_OK desc="Delete a non-existent attribute for a resource element with output-as=xml" cmd="crm_resource -r dummy -d description --element --output-as=xml" - test_assert $CRM_EX_OK + test_assert_validate $CRM_EX_OK desc="Set a non-existent attribute for a resource element without output-as=xml" cmd="crm_resource -r dummy --set-parameter=description -v test_description --element" @@ -1095,7 +1207,7 @@ function test_tools() { desc="Show XML configuration of resource, output as XML" cmd="crm_resource -q -r dummy --output-as=xml" - test_assert $CRM_EX_OK 0 + test_assert_validate $CRM_EX_OK 0 desc="Require a destination when migrating a resource that is stopped" cmd="crm_resource -r dummy -M" @@ -1137,10 +1249,30 @@ function test_tools() { cmd="crm_ticket -t ticketA -r --force" test_assert $CRM_EX_OK + desc="List ticket IDs" + cmd="crm_ticket -w" + test_assert $CRM_EX_OK 0 + + desc="List ticket IDs, outputting in XML" + cmd="crm_ticket -w --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="Query ticket state" + cmd="crm_ticket -t ticketA -q" + test_assert $CRM_EX_OK 0 + + desc="Query ticket state, outputting as xml" + cmd="crm_ticket -t ticketA -q --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + desc="Query ticket granted state" cmd="crm_ticket -t ticketA -G granted" test_assert $CRM_EX_OK + desc="Query ticket granted state, outputting as xml" + cmd="crm_ticket -t ticketA -G granted --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + desc="Delete ticket granted state" cmd="crm_ticket -t ticketA -D granted --force" test_assert $CRM_EX_OK @@ -1157,10 +1289,54 @@ function test_tools() { cmd="crm_ticket -t ticketA -a" test_assert $CRM_EX_OK + desc="List ticket details" + cmd="crm_ticket -L -t ticketA" + test_assert $CRM_EX_OK 0 + + desc="List ticket details, outputting as XML" + cmd="crm_ticket -L -t ticketA --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="Add a second ticket" + cmd="crm_ticket -t ticketB -G granted -d false" + test_assert $CRM_EX_OK + + desc="Set second ticket granted state" + cmd="crm_ticket -t ticketB -r --force" + test_assert $CRM_EX_OK + + desc="List tickets" + cmd="crm_ticket -l" + test_assert $CRM_EX_OK 0 + + desc="List tickets, outputting as XML" + cmd="crm_ticket -l --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="Delete second ticket" + cmd="cibadmin --delete --xml-text ''" + test_assert $CRM_EX_OK + desc="Delete ticket standby state" cmd="crm_ticket -t ticketA -D standby" test_assert $CRM_EX_OK + esc="Add a constraint to a ticket" + cmd="cibadmin -C -o constraints --xml-text ''" + test_assert $CRM_EX_OK + + desc="Query ticket constraints" + cmd="crm_ticket -t ticketA -c" + test_assert $CRM_EX_OK 0 + + desc="Query ticket constraints, outputting as xml" + cmd="crm_ticket -t ticketA -c --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + desc="Delete ticket constraint" + cmd="cibadmin --delete --xml-text ''" + test_assert $CRM_EX_OK + desc="Ban a resource on unknown node" cmd="crm_resource -r dummy -B -N host1" test_assert $CRM_EX_NOSUCH @@ -1363,16 +1539,16 @@ function test_tools() { unset CIB_file desc="Set a meta-attribute for primitive and resources colocated with it" - cmd="crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive" - test_assert $CRM_EX_OK 0 + cmd="crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml" + test_assert_validate $CRM_EX_OK 0 desc="Set a meta-attribute for group and resource colocated with it" cmd="crm_resource -r group --meta --set-parameter=target-role -v Stopped --recursive" test_assert $CRM_EX_OK 0 desc="Set a meta-attribute for clone and resource colocated with it" - cmd="crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive" - test_assert $CRM_EX_OK 0 + cmd="crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml" + test_assert_validate $CRM_EX_OK 0 unset CIB_shadow unset CIB_shadow_dir @@ -1580,7 +1756,7 @@ function test_tools() { desc="Update a promotable score attribute to -INFINITY (XML)" cmd="crm_attribute -N cluster01 -p -v -INFINITY --output-as=xml" - test_assert $CRM_EX_OK 0 + test_assert_validate $CRM_EX_OK 0 desc="Query after updating a promotable score attribute to -INFINITY" cmd="crm_attribute -N cluster01 -p -G" @@ -1588,7 +1764,7 @@ function test_tools() { desc="Query after updating a promotable score attribute to -INFINITY (XML)" cmd="crm_attribute -N cluster01 -p -G --output-as=xml" - test_assert $CRM_EX_OK 0 + test_assert_validate $CRM_EX_OK 0 desc="Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string" cmd="crm_attribute -N cluster01 -p '' -G" @@ -2293,20 +2469,32 @@ function test_tools() { CIB_file_invalid_1="$test_home/cli/crm_verify_invalid_bz.xml" CIB_file_invalid_2="$test_home/cli/crm_verify_invalid_no_stonith.xml" - desc="Verify a file-specified invalid configuration, outputting as xml" + desc="Verify a file-specified invalid configuration (text output)" + cmd="crm_verify --xml-file '$CIB_file_invalid_1'" + test_assert $CRM_EX_CONFIG 0 + + desc="Verify a file-specified invalid configuration (verbose text output)" + cmd="crm_verify --xml-file '$CIB_file_invalid_1' --verbose" + test_assert $CRM_EX_CONFIG 0 + + desc="Verify a file-specified invalid configuration (quiet text output)" + cmd="crm_verify --xml-file '$CIB_file_invalid_1' --quiet" + test_assert $CRM_EX_CONFIG 0 + + desc="Verify a file-specified invalid configuration (XML output)" cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml" test_assert_validate $CRM_EX_CONFIG 0 - desc="Verify another file-specified invalid configuration, outputting as xml" - cmd="crm_verify --xml-file '$CIB_file_invalid_2' --output-as=xml" + desc="Verify a file-specified invalid configuration (verbose XML output)" + cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml --verbose" test_assert_validate $CRM_EX_CONFIG 0 - desc="Verbosely verify a file-specified invalid configuration, outputting as xml" - cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml --verbose" + desc="Verify a file-specified invalid configuration (quiet XML output)" + cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml --quiet" test_assert_validate $CRM_EX_CONFIG 0 - desc="Verbosely verify another file-specified invalid configuration, outputting as xml" - cmd="crm_verify --xml-file '$CIB_file_invalid_2' --output-as=xml --verbose" + desc="Verify another file-specified invalid configuration (XML output)" + cmd="crm_verify --xml-file '$CIB_file_invalid_2' --output-as=xml" test_assert_validate $CRM_EX_CONFIG 0 export CIB_file="$test_home/cli/crm_mon.xml" @@ -2815,7 +3003,7 @@ function test_validity() { create_shadow_cib --create-empty pacemaker-1.2 orig_trace_fns="$PCMK_trace_functions" - export PCMK_trace_functions=apply_upgrade,update_validation + export PCMK_trace_functions=apply_upgrade,pcmk__update_schema cibadmin -C -o resources --xml-text '' cibadmin -C -o resources --xml-text '' @@ -2888,7 +3076,7 @@ test_upgrade() { create_shadow_cib --create-empty pacemaker-2.10 orig_trace_fns="$PCMK_trace_functions" - export PCMK_trace_functions=apply_upgrade,update_validation + export PCMK_trace_functions=apply_upgrade,pcmk__update_schema desc="Set stonith-enabled=false" cmd="crm_attribute -n stonith-enabled -v false" @@ -3167,25 +3355,23 @@ EOF # Ensure all command output is in portable locale for comparison export LC_ALL="C" test_access_render() { - local TMPXML + local TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.access_render.xml.XXXXXXXXXX) - TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.access_render.xml.XXXXXXXXXX) - export CIB_shadow_dir="${shadow_dir}" - - $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1 - export CIB_shadow=$shadow + create_shadow_cib --create-empty # Create a test CIB that has ACL roles cat < "$TMPXML" - - - - - - - - - + + + + + + + + + + EOF desc="Configure some ACLs" @@ -3198,7 +3384,7 @@ EOF unset CIB_user - # Run cibadmin --show-access on the test CIB with different users (tony here) + # Run cibadmin --show-access on the test CIB as an ACL-restricted user desc="An instance of ACLs render (into color)" cmd="cibadmin --force --show-access=color -Q --user tony" @@ -3230,7 +3416,7 @@ function test_feature_set() { desc="XML output, no mixed status" cmd="crm_mon --output-as=xml" - test_assert $CRM_EX_OK 0 + test_assert_validate $CRM_EX_OK 0 # Modify the CIB to fake that the cluster has mixed versions desc="Fake inconsistent feature set" @@ -3243,7 +3429,7 @@ function test_feature_set() { desc="XML output, mixed status" cmd="crm_mon --output-as=xml" - test_assert $CRM_EX_OK 0 + test_assert_validate $CRM_EX_OK 0 unset CIB_shadow_dir } @@ -3287,6 +3473,7 @@ done for t in $tests; do case "$t" in + access_render) ;; agents) ;; daemons) ;; dates) ;; @@ -3341,6 +3528,16 @@ for t in $tests; do eval TMPFILE_$t="$TMPFILE" test_$t > "$TMPFILE" + # @TODO Add a way to suppress this message within cibadmin, and then drop + # the handling here. + suppress="The supplied command can provide skewed result since it is run" + suppress="$suppress under user that also gets guarded per ACLs on their" + suppress="$suppress own right. Continuing since --force flag was provided." + + # This giant sed replaces content expected to change for each run + # (timestamps, source file line numbers, etc.), build (configure options, + # version numbers, etc.), or platform (system messages, etc.). + # # last-rc-change= is always numeric in the CIB. However, for the crm_mon # test we also need to compare against the XML output of the crm_mon # program. There, these are shown as human readable strings (like the @@ -3354,10 +3551,15 @@ for t in $tests; do -e 's/last_change time=\".*\"/last_change time=\"\"/' \ -e 's/ api-version="[^"]*"/ api-version="X"/' \ -e 's/ default="[^"]*"/ default=""/' \ + -e 's/\(\* Possible values.*: .*\)(default: [^)]*)/\1(default: )/g' \ -e 's/ version="[^"]*"/ version=""/' \ -e 's/request=\".*\(crm_[a-zA-Z0-9]*\)/request=\"\1/' \ -e 's/crm_feature_set="[^"]*" //'\ + -e 's/@crm_feature_set=[0-9.]*, //'\ + -e 's/\( "${TMPFILE}.$$" mv -- "${TMPFILE}.$$" "$TMPFILE" diff --git a/cts/cts-fencing.in b/cts/cts-fencing.in index c2ed29a..6785144 100644 --- a/cts/cts-fencing.in +++ b/cts/cts-fencing.in @@ -133,7 +133,7 @@ class FenceTests(Tests): '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4"') test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # timeout is 5+1+4 = 10 - test.add_log_pattern("Total timeout set to 12") + test.add_log_pattern("Total timeout set to 12s") # custom timeout _WITH_ topology test = self.new_test("cpg_custom_timeout_2", @@ -141,15 +141,15 @@ class FenceTests(Tests): test.add_cmd('stonith_admin', '--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node1 node2 node3"') test.add_cmd('stonith_admin', - '--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1"') + '--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1000ms"') test.add_cmd('stonith_admin', - '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4000"') + '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4000s"') test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # timeout is 5+1+4000 = 4006 - test.add_log_pattern("Total timeout set to 4807") + test.add_log_pattern("Total timeout set to 4807s") def build_fence_merge_tests(self): """ Register tests to verify when fence operations should be merged """ @@ -262,7 +262,7 @@ class FenceTests(Tests): "--output-as=xml -R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") if test_type["use_cpg"]: test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -F node3 -t 2", ExitStatus.TIMEOUT) - test.add_log_pattern("Total timeout set to 7") + test.add_log_pattern("Total timeout set to 7s") else: test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -F node3 -t 2", ExitStatus.ERROR) @@ -284,7 +284,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") if test_type["use_cpg"]: - test.add_log_pattern("Total timeout set to 18") + test.add_log_pattern("Total timeout set to 18s") # test what happens when we try to use a missing fence-agent. for test_type in test_types: @@ -313,7 +313,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") - test.add_log_pattern("Total timeout set to 6") + test.add_log_pattern("Total timeout set to 6s") test.add_log_pattern("targeting node3 using true returned 0") @@ -331,7 +331,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -d node3 -i 1") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") - test.add_log_pattern("Total timeout set to 6") + test.add_log_pattern("Total timeout set to 6s") test.add_log_pattern("targeting node3 using true returned 0") # test what happens when the first fencing level has multiple devices. @@ -350,7 +350,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 20") - test.add_log_pattern("Total timeout set to 48") + test.add_log_pattern("Total timeout set to 48s") test.add_log_pattern("targeting node3 using false returned 1") test.add_log_pattern("targeting node3 using true returned 0") @@ -384,7 +384,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 3") - test.add_log_pattern("Total timeout set to 21") + test.add_log_pattern("Total timeout set to 21s") test.add_log_pattern("targeting node3 using false1 returned 1") test.add_log_pattern("targeting node3 using false2 returned 1") test.add_log_pattern("targeting node3 using true3 returned 0") @@ -451,7 +451,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 20") - test.add_log_pattern("Total timeout set to 96") + test.add_log_pattern("Total timeout set to 96s") test.add_log_pattern("targeting node3 using false1 returned 1") test.add_log_pattern("targeting node3 using false2 returned ", negative=True) @@ -659,7 +659,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -F node3 --delay 1") # Total fencing timeout takes all fencing delays into account. - test.add_log_pattern("Total timeout set to 582") + test.add_log_pattern("Total timeout set to 582s") # Fencing timeout for the first device takes the requested fencing delay into account. # Fencing timeout also takes pcmk_delay_base into account. @@ -869,7 +869,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") test.add_log_pattern("Remapping multiple-device reboot targeting node_fake") # timeout should be sum of off timeouts (1+2=3), not reboot timeouts (10+20=30) - test.add_log_pattern("Total timeout set to 3 for peer's fencing targeting node_fake") + test.add_log_pattern("Total timeout set to 3s for peer's fencing targeting node_fake") test.add_log_pattern("perform 'off' action targeting node_fake using true1") test.add_log_pattern("perform 'off' action targeting node_fake using true2") test.add_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") @@ -895,7 +895,7 @@ class FenceTests(Tests): test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") test.add_log_pattern("Remapping multiple-device reboot targeting node_fake") # timeout should be sum of off timeouts (1+2=3), not reboot timeouts (10+20=30) - test.add_log_pattern("Total timeout set to 3 for peer's fencing targeting node_fake") + test.add_log_pattern("Total timeout set to 3s for peer's fencing targeting node_fake") test.add_log_pattern("perform 'off' action targeting node_fake using true1") test.add_log_pattern("perform 'off' action targeting node_fake using true2") test.add_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") diff --git a/cts/cts-log-watcher.in b/cts/cts-log-watcher.in index cee9c94..a12ecfa 100644 --- a/cts/cts-log-watcher.in +++ b/cts/cts-log-watcher.in @@ -43,9 +43,6 @@ if __name__ == '__main__': skipthis=1 prefix = args[i+1] - elif args[i] == '-t' or args[i] == '--tag': - skipthis=1 - if not os.access(filename, os.R_OK): print(prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)) sys.exit(1) diff --git a/cts/scheduler/exp/utilization-order4.exp b/cts/scheduler/exp/utilization-order4.exp index 4a3d380..996eb1b 100644 --- a/cts/scheduler/exp/utilization-order4.exp +++ b/cts/scheduler/exp/utilization-order4.exp @@ -16,7 +16,7 @@ - + diff --git a/cts/scheduler/stderr/order-wrong-kind.stderr b/cts/scheduler/stderr/order-wrong-kind.stderr index 7090368..db35666 100644 --- a/cts/scheduler/stderr/order-wrong-kind.stderr +++ b/cts/scheduler/stderr/order-wrong-kind.stderr @@ -1 +1 @@ -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) \ No newline at end of file diff --git a/cts/scheduler/summary/order-wrong-kind.summary b/cts/scheduler/summary/order-wrong-kind.summary index 0e00bdf..903a25c 100644 --- a/cts/scheduler/summary/order-wrong-kind.summary +++ b/cts/scheduler/summary/order-wrong-kind.summary @@ -1,4 +1,4 @@ -Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations) +Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release) Current cluster status: * Node List: * Online: [ node1 ] diff --git a/cts/valgrind-pcmk.suppressions b/cts/valgrind-pcmk.suppressions index a05b9db..461edc2 100644 --- a/cts/valgrind-pcmk.suppressions +++ b/cts/valgrind-pcmk.suppressions @@ -6,14 +6,6 @@ fun:crm_get_msec } -{ - Ignore option parsing - Memcheck:Leak - fun:realloc - fun:crm_get_option_long - fun:main -} - { dlopen internals Memcheck:Leak diff --git a/daemons/attrd/attrd_alerts.c b/daemons/attrd/attrd_alerts.c index 495e18f..4e97743 100644 --- a/daemons/attrd/attrd_alerts.c +++ b/daemons/attrd/attrd_alerts.c @@ -1,5 +1,5 @@ /* - * Copyright 2015-2023 the Pacemaker project contributors + * Copyright 2015-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,11 +10,11 @@ #include #include #include -#include #include #include #include #include +#include #include #include #include "pacemaker-attrd.h" @@ -48,7 +48,7 @@ attrd_lrmd_connect(void) int ret = -ENOTCONN; for (int fails = 0; fails < max_attempts; ++fails) { - ret = the_lrmd->cmds->connect(the_lrmd, T_ATTRD, NULL); + ret = the_lrmd->cmds->connect(the_lrmd, PCMK__VALUE_ATTRD, NULL); if (ret == pcmk_ok) { break; } @@ -93,11 +93,11 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void } crmalerts = output; - if ((crmalerts != NULL) && !pcmk__xe_is(crmalerts, XML_CIB_TAG_ALERTS)) { - crmalerts = first_named_child(crmalerts, XML_CIB_TAG_ALERTS); + if ((crmalerts != NULL) && !pcmk__xe_is(crmalerts, PCMK_XE_ALERTS)) { + crmalerts = pcmk__xe_first_child(crmalerts, PCMK_XE_ALERTS, NULL, NULL); } if (!crmalerts) { - crm_notice("CIB query result has no " XML_CIB_TAG_ALERTS " section"); + crm_notice("CIB query result has no " PCMK_XE_ALERTS " section"); return; } @@ -113,7 +113,7 @@ attrd_read_options(gpointer user_data) CRM_CHECK(the_cib != NULL, return TRUE); call_id = the_cib->cmds->query(the_cib, - pcmk__cib_abs_xpath_for(XML_CIB_TAG_ALERTS), + pcmk__cib_abs_xpath_for(PCMK_XE_ALERTS), NULL, cib_xpath|cib_scope_local); the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, NULL, diff --git a/daemons/attrd/attrd_attributes.c b/daemons/attrd/attrd_attributes.c index 388c181..974de89 100644 --- a/daemons/attrd/attrd_attributes.c +++ b/daemons/attrd/attrd_attributes.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2022 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -26,55 +25,56 @@ static attribute_t * attrd_create_attribute(xmlNode *xml) { int is_private = 0; - int dampen = 0; + long long dampen = 0; const char *name = crm_element_value(xml, PCMK__XA_ATTR_NAME); const char *set_type = crm_element_value(xml, PCMK__XA_ATTR_SET_TYPE); const char *dampen_s = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING); attribute_t *a = NULL; if (set_type == NULL) { - set_type = XML_TAG_ATTR_SETS; + set_type = PCMK_XE_INSTANCE_ATTRIBUTES; } /* Set type is meaningful only when writing to the CIB. Private * attributes are not written. */ crm_element_value_int(xml, PCMK__XA_ATTR_IS_PRIVATE, &is_private); - if ((is_private != 0) - && !pcmk__str_any_of(set_type, XML_TAG_ATTR_SETS, XML_TAG_UTILIZATION, - NULL)) { + if (!is_private && !pcmk__str_any_of(set_type, + PCMK_XE_INSTANCE_ATTRIBUTES, + PCMK_XE_UTILIZATION, NULL)) { crm_warn("Ignoring attribute %s with invalid set type %s", pcmk__s(name, "(unidentified)"), set_type); return NULL; } - a = calloc(1, sizeof(attribute_t)); - CRM_ASSERT(a != NULL); - - a->is_private = is_private; - pcmk__str_update(&a->id, name); - pcmk__str_update(&a->set_type, set_type); + a = pcmk__assert_alloc(1, sizeof(attribute_t)); + a->id = pcmk__str_copy(name); + a->set_type = pcmk__str_copy(set_type); a->set_id = crm_element_value_copy(xml, PCMK__XA_ATTR_SET); - a->uuid = crm_element_value_copy(xml, PCMK__XA_ATTR_UUID); + a->user = crm_element_value_copy(xml, PCMK__XA_ATTR_USER); a->values = pcmk__strikey_table(NULL, attrd_free_attribute_value); - a->user = crm_element_value_copy(xml, PCMK__XA_ATTR_USER); - crm_trace("Performing all %s operations as user '%s'", a->id, a->user); + if (is_private) { + attrd_set_attr_flags(a, attrd_attr_is_private); + } if (dampen_s != NULL) { dampen = crm_get_msec(dampen_s); } - crm_trace("Created attribute %s with %s write delay", a->id, - (a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms)); - if(dampen > 0) { - a->timeout_ms = dampen; + if (dampen > 0) { + a->timeout_ms = (int) QB_MIN(dampen, INT_MAX); a->timer = attrd_add_timer(a->id, a->timeout_ms, a); } else if (dampen < 0) { crm_warn("Ignoring invalid delay %s for attribute %s", dampen_s, a->id); } + crm_trace("Created attribute %s with %s write delay and %s CIB user", + a->id, + ((dampen > 0)? pcmk__readable_interval(a->timeout_ms) : "no"), + pcmk__s(a->user, "default")); + g_hash_table_replace(attributes, a->id, a); return a; } @@ -83,7 +83,7 @@ static int attrd_update_dampening(attribute_t *a, xmlNode *xml, const char *attr) { const char *dvalue = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING); - int dampen = 0; + long long dampen = 0; if (dvalue == NULL) { crm_warn("Could not update %s: peer did not specify value for delay", @@ -100,7 +100,7 @@ attrd_update_dampening(attribute_t *a, xmlNode *xml, const char *attr) if (a->timeout_ms != dampen) { mainloop_timer_del(a->timer); - a->timeout_ms = dampen; + a->timeout_ms = (int) QB_MIN(dampen, INT_MAX); if (dampen > 0) { a->timer = attrd_add_timer(attr, a->timeout_ms, a); crm_info("Update attribute %s delay to %dms (%s)", @@ -136,20 +136,21 @@ xmlNode * attrd_add_value_xml(xmlNode *parent, const attribute_t *a, const attribute_value_t *v, bool force_write) { - xmlNode *xml = create_xml_node(parent, __func__); + xmlNode *xml = pcmk__xe_create(parent, __func__); crm_xml_add(xml, PCMK__XA_ATTR_NAME, a->id); + crm_xml_add(xml, PCMK__XA_ATTR_SET_TYPE, a->set_type); crm_xml_add(xml, PCMK__XA_ATTR_SET, a->set_id); - crm_xml_add(xml, PCMK__XA_ATTR_UUID, a->uuid); crm_xml_add(xml, PCMK__XA_ATTR_USER, a->user); pcmk__xe_add_node(xml, v->nodename, v->nodeid); - if (v->is_remote != 0) { + if (pcmk_is_set(v->flags, attrd_value_remote)) { crm_xml_add_int(xml, PCMK__XA_ATTR_IS_REMOTE, 1); } crm_xml_add(xml, PCMK__XA_ATTR_VALUE, v->current); crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, a->timeout_ms / 1000); - crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, a->is_private); - crm_xml_add_int(xml, PCMK__XA_ATTR_FORCE, force_write); + crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, + pcmk_is_set(a->flags, attrd_attr_is_private)); + crm_xml_add_int(xml, PCMK__XA_ATTRD_IS_FORCE_WRITE, force_write); return xml; } @@ -166,8 +167,7 @@ attrd_clear_value_seen(void) while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { - v->seen = FALSE; - crm_trace("Clear seen flag %s[%s] = %s.", a->id, v->nodename, v->current); + attrd_clear_value_flags(v, attrd_value_from_peer); } } } @@ -178,9 +178,9 @@ attrd_populate_attribute(xmlNode *xml, const char *attr) attribute_t *a = NULL; bool update_both = false; - const char *op = crm_element_value(xml, PCMK__XA_TASK); + const char *op = crm_element_value(xml, PCMK_XA_TASK); - // NULL because PCMK__ATTRD_CMD_SYNC_RESPONSE has no PCMK__XA_TASK + // NULL because PCMK__ATTRD_CMD_SYNC_RESPONSE has no PCMK_XA_TASK update_both = pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE_BOTH, pcmk__str_null_matches); @@ -210,3 +210,66 @@ attrd_populate_attribute(xmlNode *xml, const char *attr) return a; } + +/*! + * \internal + * \brief Get the XML ID used to write out an attribute set + * + * \param[in] attr Attribute to get set ID for + * \param[in] node_state_id XML ID of node state that attribute value is for + * + * \return Newly allocated string with XML ID to use for \p attr set + */ +char * +attrd_set_id(const attribute_t *attr, const char *node_state_id) +{ + char *set_id = NULL; + + CRM_ASSERT((attr != NULL) && (node_state_id != NULL)); + + if (attr->set_id == NULL) { + /* @COMPAT This should really take the set type into account. Currently + * we use the same XML ID for transient attributes and utilization + * attributes. It doesn't cause problems because the status section is + * not limited by the schema in any way, but it's still unfortunate. + * For backward compatibility reasons, we can't change this. + */ + set_id = crm_strdup_printf("%s-%s", PCMK_XE_STATUS, node_state_id); + } else { + /* @COMPAT When the user specifies a set ID for an attribute, it is the + * same for every node. That is less than ideal, but again, the schema + * doesn't enforce anything for the status section. We couldn't change + * it without allowing the set ID to vary per value rather than per + * attribute, which would break backward compatibility, pose design + * challenges, and potentially cause problems in rolling upgrades. + */ + set_id = pcmk__str_copy(attr->set_id); + } + crm_xml_sanitize_id(set_id); + return set_id; +} + +/*! + * \internal + * \brief Get the XML ID used to write out an attribute value + * + * \param[in] attr Attribute to get value XML ID for + * \param[in] node_state_id UUID of node that attribute value is for + * + * \return Newly allocated string with XML ID of \p attr value + */ +char * +attrd_nvpair_id(const attribute_t *attr, const char *node_state_id) +{ + char *nvpair_id = NULL; + + if (attr->set_id != NULL) { + nvpair_id = crm_strdup_printf("%s-%s", attr->set_id, attr->id); + + } else { + nvpair_id = crm_strdup_printf(PCMK_XE_STATUS "-%s-%s", + node_state_id, attr->id); + } + crm_xml_sanitize_id(nvpair_id); + return nvpair_id; +} diff --git a/daemons/attrd/attrd_cib.c b/daemons/attrd/attrd_cib.c index 80e5580..2537ade 100644 --- a/daemons/attrd/attrd_cib.c +++ b/daemons/attrd/attrd_cib.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2023 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -15,11 +15,12 @@ #include #include -#include +#include // cib__* #include #include #include #include +#include // pcmk__get_node() #include "pacemaker-attrd.h" @@ -50,8 +51,10 @@ attrd_cib_updated_cb(const char *event, xmlNode *msg) { const xmlNode *patchset = NULL; const char *client_name = NULL; + bool status_changed = false; if (attrd_shutting_down(true)) { + crm_debug("Ignoring CIB change during shutdown"); return; } @@ -59,29 +62,32 @@ attrd_cib_updated_cb(const char *event, xmlNode *msg) return; } - if (cib__element_in_patchset(patchset, XML_CIB_TAG_ALERTS)) { + if (cib__element_in_patchset(patchset, PCMK_XE_ALERTS)) { mainloop_set_trigger(attrd_config_read); } - if (!attrd_election_won()) { - // Don't write attributes if we're not the writer - return; - } + status_changed = cib__element_in_patchset(patchset, PCMK_XE_STATUS); - client_name = crm_element_value(msg, F_CIB_CLIENTNAME); + client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME); if (!cib__client_triggers_refresh(client_name)) { - // The CIB is still accurate + /* This change came from a source that ensured the CIB is consistent + * with our attributes table, so we don't need to write anything out. + */ return; } - if (cib__element_in_patchset(patchset, XML_CIB_TAG_NODES) - || cib__element_in_patchset(patchset, XML_CIB_TAG_STATUS)) { + if (!attrd_election_won()) { + // Don't write attributes if we're not the writer + return; + } - /* An unsafe client modified the nodes or status section. Write - * transient attributes to ensure they're up-to-date in the CIB. + if (status_changed || cib__element_in_patchset(patchset, PCMK_XE_NODES)) { + /* An unsafe client modified the PCMK_XE_NODES or PCMK_XE_STATUS + * section. Write transient attributes to ensure they're up-to-date in + * the CIB. */ if (client_name == NULL) { - client_name = crm_element_value(msg, F_CIB_CLIENTID); + client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTID); } crm_notice("Updating all attributes after %s event triggered by %s", event, pcmk__s(client_name, "(unidentified client)")); @@ -108,7 +114,7 @@ attrd_cib_connect(int max_retry) } attempts++; crm_debug("Connection attempt %d to the CIB manager", attempts); - rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command); + rc = the_cib->cmds->signon(the_cib, PCMK__VALUE_ATTRD, cib_command); } while ((rc != pcmk_ok) && (attempts < max_retry)); @@ -126,7 +132,8 @@ attrd_cib_connect(int max_retry) goto cleanup; } - rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, + rc = the_cib->cmds->add_notify_callback(the_cib, + PCMK__VALUE_CIB_DIFF_NOTIFY, attrd_cib_updated_cb); if (rc != pcmk_ok) { crm_err("Could not set CIB notification callback"); @@ -144,7 +151,7 @@ void attrd_cib_disconnect(void) { CRM_CHECK(the_cib != NULL, return); - the_cib->cmds->del_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, + the_cib->cmds->del_notify_callback(the_cib, PCMK__VALUE_CIB_DIFF_NOTIFY, attrd_cib_updated_cb); cib__clean_up_connection(&the_cib); } @@ -153,39 +160,44 @@ static void attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { - do_crm_log_unlikely(((rc != pcmk_ok)? LOG_NOTICE : LOG_DEBUG), - "Cleared transient attributes: %s " - CRM_XS " xpath=%s rc=%d", - pcmk_strerror(rc), (char *) user_data, rc); + const char *node = pcmk__s((const char *) user_data, "a node"); + + if (rc == pcmk_ok) { + crm_info("Cleared transient node attributes for %s from CIB", node); + } else { + crm_err("Unable to clear transient node attributes for %s from CIB: %s", + node, pcmk_strerror(rc)); + } } -#define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS +#define XPATH_TRANSIENT "//" PCMK__XE_NODE_STATE \ + "[@" PCMK_XA_UNAME "='%s']" \ + "/" PCMK__XE_TRANSIENT_ATTRIBUTES /*! * \internal - * \brief Wipe all transient attributes for this node from the CIB - * - * Clear any previous transient node attributes from the CIB. This is - * normally done by the DC's controller when this node leaves the cluster, but - * this handles the case where the node restarted so quickly that the - * cluster layer didn't notice. + * \brief Wipe all transient node attributes for a node from the CIB * - * \todo If pacemaker-attrd respawns after crashing (see PCMK_ENV_RESPAWNED), - * ideally we'd skip this and sync our attributes from the writer. - * However, currently we reject any values for us that the writer has, in - * attrd_peer_update(). + * \param[in] node Node to clear attributes for */ -static void -attrd_erase_attrs(void) +void +attrd_cib_erase_transient_attrs(const char *node) { int call_id = 0; - char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname); + char *xpath = NULL; + + CRM_CHECK(node != NULL, return); + + xpath = crm_strdup_printf(XPATH_TRANSIENT, node); - crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s", - xpath); + crm_debug("Clearing transient node attributes for %s from CIB using %s", + node, xpath); call_id = the_cib->cmds->remove(the_cib, xpath, NULL, cib_xpath); - the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath, + free(xpath); + + the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, + pcmk__str_copy(node), "attrd_erase_cb", attrd_erase_cb, free); } @@ -197,8 +209,17 @@ attrd_erase_attrs(void) void attrd_cib_init(void) { - // We have no attribute values in memory, wipe the CIB to match - attrd_erase_attrs(); + /* We have no attribute values in memory, so wipe the CIB to match. This is + * normally done by the DC's controller when this node leaves the cluster, but + * this handles the case where the node restarted so quickly that the + * cluster layer didn't notice. + * + * \todo If pacemaker-attrd respawns after crashing (see PCMK_ENV_RESPAWNED), + * ideally we'd skip this and sync our attributes from the writer. + * However, currently we reject any values for us that the writer has, in + * attrd_peer_update(). + */ + attrd_cib_erase_transient_attrs(attrd_cluster->uname); // Set a trigger for reading the CIB (for the alerts section) attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL); @@ -262,19 +283,24 @@ attrd_cib_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *use g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) { - do_crm_log(level, "* %s[%s]=%s", a->id, peer, v->requested); - free(v->requested); - v->requested = NULL; - if (rc != pcmk_ok) { - a->changed = true; /* Attempt write out again */ + if (rc == pcmk_ok) { + crm_info("* Wrote %s[%s]=%s", + a->id, peer, pcmk__s(v->requested, "(unset)")); + pcmk__str_update(&(v->requested), NULL); + } else { + do_crm_log(level, "* Could not write %s[%s]=%s", + a->id, peer, pcmk__s(v->requested, "(unset)")); + /* Reattempt write below if we are still the writer */ + attrd_set_attr_flags(a, attrd_attr_changed); } } - if (a->changed && attrd_election_won()) { + if (pcmk_is_set(a->flags, attrd_attr_changed) && attrd_election_won()) { if (rc == pcmk_ok) { /* We deferred a write of a new update because this update was in * progress. Write out the new value without additional delay. */ + crm_debug("Pending update for %s can be written now", a->id); write_attribute(a, false); /* We're re-attempting a write because the original failed; delay @@ -320,40 +346,27 @@ static int add_set_attr_update(const attribute_t *attr, const char *attr_id, const char *node_id, const char *set_id, const char *value) { - xmlNode *update = create_xml_node(NULL, XML_CIB_TAG_STATE); + xmlNode *update = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE); xmlNode *child = update; int rc = ENOMEM; - if (child == NULL) { - goto done; - } - crm_xml_add(child, XML_ATTR_ID, node_id); + crm_xml_add(child, PCMK_XA_ID, node_id); - child = create_xml_node(child, XML_TAG_TRANSIENT_NODEATTRS); - if (child == NULL) { - goto done; - } - crm_xml_add(child, XML_ATTR_ID, node_id); + child = pcmk__xe_create(child, PCMK__XE_TRANSIENT_ATTRIBUTES); + crm_xml_add(child, PCMK_XA_ID, node_id); - child = create_xml_node(child, attr->set_type); - if (child == NULL) { - goto done; - } - crm_xml_add(child, XML_ATTR_ID, set_id); + child = pcmk__xe_create(child, attr->set_type); + crm_xml_add(child, PCMK_XA_ID, set_id); - child = create_xml_node(child, XML_CIB_TAG_NVPAIR); - if (child == NULL) { - goto done; - } - crm_xml_add(child, XML_ATTR_ID, attr_id); - crm_xml_add(child, XML_NVPAIR_ATTR_NAME, attr->id); - crm_xml_add(child, XML_NVPAIR_ATTR_VALUE, value); + child = pcmk__xe_create(child, PCMK_XE_NVPAIR); + crm_xml_add(child, PCMK_XA_ID, attr_id); + crm_xml_add(child, PCMK_XA_NAME, attr->id); + crm_xml_add(child, PCMK_XA_VALUE, value); - rc = the_cib->cmds->modify(the_cib, XML_CIB_TAG_STATUS, update, + rc = the_cib->cmds->modify(the_cib, PCMK_XE_STATUS, update, cib_can_create|cib_transaction); rc = pcmk_legacy2rc(rc); -done: free_xml(update); return rc; } @@ -373,16 +386,16 @@ static int add_unset_attr_update(const attribute_t *attr, const char *attr_id, const char *node_id, const char *set_id) { - char *xpath = crm_strdup_printf("/" XML_TAG_CIB - "/" XML_CIB_TAG_STATUS - "/" XML_CIB_TAG_STATE - "[@" XML_ATTR_ID "='%s']" - "/" XML_TAG_TRANSIENT_NODEATTRS - "[@" XML_ATTR_ID "='%s']" - "/%s[@" XML_ATTR_ID "='%s']" - "/" XML_CIB_TAG_NVPAIR - "[@" XML_ATTR_ID "='%s' " - "and @" XML_NVPAIR_ATTR_NAME "='%s']", + char *xpath = crm_strdup_printf("/" PCMK_XE_CIB + "/" PCMK_XE_STATUS + "/" PCMK__XE_NODE_STATE + "[@" PCMK_XA_ID "='%s']" + "/" PCMK__XE_TRANSIENT_ATTRIBUTES + "[@" PCMK_XA_ID "='%s']" + "/%s[@" PCMK_XA_ID "='%s']" + "/" PCMK_XE_NVPAIR + "[@" PCMK_XA_ID "='%s' " + "and @" PCMK_XA_NAME "='%s']", node_id, node_id, attr->set_type, set_id, attr_id, attr->id); @@ -406,31 +419,17 @@ add_unset_attr_update(const attribute_t *attr, const char *attr_id, static int add_attr_update(const attribute_t *attr, const char *value, const char *node_id) { - char *set_id = NULL; - char *attr_id = NULL; + char *set_id = attrd_set_id(attr, node_id); + char *nvpair_id = attrd_nvpair_id(attr, node_id); int rc = pcmk_rc_ok; - if (attr->set_id != NULL) { - pcmk__str_update(&set_id, attr->set_id); - } else { - set_id = crm_strdup_printf("%s-%s", XML_CIB_TAG_STATUS, node_id); - } - crm_xml_sanitize_id(set_id); - - if (attr->uuid != NULL) { - pcmk__str_update(&attr_id, attr->uuid); - } else { - attr_id = crm_strdup_printf("%s-%s", set_id, attr->id); - } - crm_xml_sanitize_id(attr_id); - - if (value != NULL) { - rc = add_set_attr_update(attr, attr_id, node_id, set_id, value); + if (value == NULL) { + rc = add_unset_attr_update(attr, nvpair_id, node_id, set_id); } else { - rc = add_unset_attr_update(attr, attr_id, node_id, set_id); + rc = add_set_attr_update(attr, nvpair_id, node_id, set_id, value); } free(set_id); - free(attr_id); + free(nvpair_id); return rc; } @@ -454,13 +453,11 @@ send_alert_attributes_value(attribute_t *a, GHashTable *t) static void set_alert_attribute_value(GHashTable *t, attribute_value_t *v) { - attribute_value_t *a_v = NULL; - a_v = calloc(1, sizeof(attribute_value_t)); - CRM_ASSERT(a_v != NULL); + attribute_value_t *a_v = pcmk__assert_alloc(1, sizeof(attribute_value_t)); a_v->nodeid = v->nodeid; - a_v->nodename = strdup(v->nodename); - pcmk__str_update(&a_v->current, v->current); + a_v->nodename = pcmk__str_copy(v->nodename); + a_v->current = pcmk__str_copy(v->current); g_hash_table_replace(t, a_v->nodename, a_v); } @@ -493,7 +490,7 @@ write_attribute(attribute_t *a, bool ignore_delay) } /* If this attribute will be written to the CIB ... */ - if (!stand_alone && !a->is_private) { + if (!stand_alone && !pcmk_is_set(a->flags, attrd_attr_is_private)) { /* Defer the write if now's not a good time */ if (a->update && (a->update < last_cib_op_done)) { crm_info("Write out of '%s' continuing: update %d considered lost", @@ -520,21 +517,17 @@ write_attribute(attribute_t *a, bool ignore_delay) the_cib->cmds->set_user(the_cib, a->user); rc = the_cib->cmds->init_transaction(the_cib); if (rc != pcmk_ok) { - crm_err("Failed to write %s (id %s, set %s): Could not initiate " + crm_err("Failed to write %s (set %s): Could not initiate " "CIB transaction", - a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); + a->id, pcmk__s(a->set_id, "unspecified")); goto done; } } - /* Attribute will be written shortly, so clear changed flag */ - a->changed = false; - - /* We will check all peers' uuids shortly, so initialize this to false */ - a->unknown_peer_uuids = false; - - /* Attribute will be written shortly, so clear forced write flag */ - a->force_write = FALSE; + /* Attribute will be written shortly, so clear changed flag and force + * write flag, and initialize UUID missing flag to false. + */ + attrd_clear_attr_flags(a, attrd_attr_changed|attrd_attr_uuid_missing|attrd_attr_force_write); /* Make the table for the attribute trap */ alert_attribute_value = pcmk__strikey_table(NULL, @@ -543,79 +536,80 @@ write_attribute(attribute_t *a, bool ignore_delay) /* Iterate over each peer value of this attribute */ g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) { - crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, - CRM_GET_PEER_ANY); + const char *uuid = NULL; - /* If the value's peer info does not correspond to a peer, ignore it */ - if (peer == NULL) { - crm_notice("Cannot update %s[%s]=%s because peer not known", - a->id, v->nodename, v->current); - continue; - } + if (pcmk_is_set(v->flags, attrd_value_remote)) { + /* If this is a Pacemaker Remote node, the node's UUID is the same + * as its name, which we already have. + */ + uuid = v->nodename; - /* If we're just learning the peer's node id, remember it */ - if (peer->id && (v->nodeid == 0)) { - crm_trace("Learned ID %u for node %s", peer->id, v->nodename); - v->nodeid = peer->id; + } else { + // This will create a cluster node cache entry if none exists + crm_node_t *peer = pcmk__get_node(v->nodeid, v->nodename, NULL, + pcmk__node_search_any); + + uuid = peer->uuid; + + // Remember peer's node ID if we're just now learning it + if ((peer->id != 0) && (v->nodeid == 0)) { + crm_trace("Learned ID %u for node %s", peer->id, v->nodename); + v->nodeid = peer->id; + } } /* If this is a private attribute, no update needs to be sent */ - if (stand_alone || a->is_private) { + if (stand_alone || pcmk_is_set(a->flags, attrd_attr_is_private)) { private_updates++; continue; } - /* If the peer is found, but its uuid is unknown, defer write */ - if (peer->uuid == NULL) { - a->unknown_peer_uuids = true; - crm_notice("Cannot update %s[%s]=%s because peer UUID not known " - "(will retry if learned)", + // Defer write if this is a cluster node that's never been seen + if (uuid == NULL) { + attrd_set_attr_flags(a, attrd_attr_uuid_missing); + crm_notice("Cannot update %s[%s]='%s' now because node's UUID is " + "unknown (will retry if learned)", a->id, v->nodename, v->current); continue; } // Update this value as part of the CIB transaction we're building - rc = add_attr_update(a, v->current, peer->uuid); + rc = add_attr_update(a, v->current, uuid); if (rc != pcmk_rc_ok) { - crm_err("Failed to update %s[%s]=%s (peer known as %s, UUID %s, " - "ID %" PRIu32 "/%" PRIu32 "): %s", - a->id, v->nodename, v->current, peer->uname, peer->uuid, - peer->id, v->nodeid, pcmk_rc_str(rc)); + crm_err("Failed to update %s[%s]='%s': %s " + CRM_XS " node uuid=%s id=%" PRIu32, + a->id, v->nodename, v->current, pcmk_rc_str(rc), + uuid, v->nodeid); continue; } - crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID " - "%" PRIu32 "/%" PRIu32 ")", - a->id, v->nodename, v->current, - peer->uname, peer->uuid, peer->id, v->nodeid); + crm_debug("Writing %s[%s]=%s (node-state-id=%s node-id=%" PRIu32 ")", + a->id, v->nodename, pcmk__s(v->current, "(unset)"), + uuid, v->nodeid); cib_updates++; /* Preservation of the attribute to transmit alert */ set_alert_attribute_value(alert_attribute_value, v); - free(v->requested); - v->requested = NULL; - if (v->current) { - v->requested = strdup(v->current); - } + // Save this value so we can log it when write completes + pcmk__str_update(&(v->requested), v->current); } if (private_updates) { - crm_info("Processed %d private change%s for %s, id=%s, set=%s", + crm_info("Processed %d private change%s for %s (set %s)", private_updates, pcmk__plural_s(private_updates), - a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); + a->id, pcmk__s(a->set_id, "unspecified")); } if (cib_updates > 0) { - char *id = NULL; + char *id = pcmk__str_copy(a->id); // Commit transaction a->update = the_cib->cmds->end_transaction(the_cib, true, cib_none); - crm_info("Sent CIB request %d with %d change%s for %s (id %s, set %s)", + crm_info("Sent CIB request %d with %d change%s for %s (set %s)", a->update, cib_updates, pcmk__plural_s(cib_updates), - a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); + a->id, pcmk__s(a->set_id, "unspecified")); - pcmk__str_update(&id, a->id); if (the_cib->cmds->register_callback_full(the_cib, a->update, CIB_OP_TIMEOUT_S, FALSE, id, "attrd_cib_callback", @@ -653,18 +647,20 @@ attrd_write_attributes(uint32_t options) pcmk_is_set(options, attrd_write_all)? "all" : "changed"); g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { - if (!pcmk_is_set(options, attrd_write_all) && a->unknown_peer_uuids) { + if (!pcmk_is_set(options, attrd_write_all) && + pcmk_is_set(a->flags, attrd_attr_uuid_missing)) { // Try writing this attribute again, in case peer ID was learned - a->changed = true; - } else if (a->force_write) { + attrd_set_attr_flags(a, attrd_attr_changed); + } else if (pcmk_is_set(a->flags, attrd_attr_force_write)) { /* If the force_write flag is set, write the attribute. */ - a->changed = true; + attrd_set_attr_flags(a, attrd_attr_changed); } - if (pcmk_is_set(options, attrd_write_all) || a->changed) { + if (pcmk_is_set(options, attrd_write_all) || + pcmk_is_set(a->flags, attrd_attr_changed)) { bool ignore_delay = pcmk_is_set(options, attrd_write_no_delay); - if (a->force_write) { + if (pcmk_is_set(a->flags, attrd_attr_force_write)) { // Always ignore delay when forced write flag is set ignore_delay = true; } diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c index 86dc67b..fb3b4e5 100644 --- a/daemons/attrd/attrd_corosync.c +++ b/daemons/attrd/attrd_corosync.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2023 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -19,19 +19,19 @@ #include #include #include -#include +#include #include "pacemaker-attrd.h" static xmlNode * attrd_confirmation(int callid) { - xmlNode *node = create_xml_node(NULL, __func__); + xmlNode *node = pcmk__xe_create(NULL, __func__); - crm_xml_add(node, F_TYPE, T_ATTRD); - crm_xml_add(node, F_ORIG, get_local_node_name()); - crm_xml_add(node, PCMK__XA_TASK, PCMK__ATTRD_CMD_CONFIRM); - crm_xml_add_int(node, XML_LRM_ATTR_CALLID, callid); + crm_xml_add(node, PCMK__XA_T, PCMK__VALUE_ATTRD); + crm_xml_add(node, PCMK__XA_SRC, pcmk__cluster_local_node_name()); + crm_xml_add(node, PCMK_XA_TASK, PCMK__ATTRD_CMD_CONFIRM); + crm_xml_add_int(node, PCMK__XA_CALL_ID, callid); return node; } @@ -39,7 +39,7 @@ attrd_confirmation(int callid) static void attrd_peer_message(crm_node_t *peer, xmlNode *xml) { - const char *election_op = crm_element_value(xml, F_CRM_TASK); + const char *election_op = crm_element_value(xml, PCMK__XA_CRM_TASK); if (election_op) { attrd_handle_election_op(peer, xml); @@ -64,7 +64,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml) .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, PCMK__XA_TASK); + request.op = crm_element_value_copy(request.xml, PCMK_XA_TASK); CRM_CHECK(request.op != NULL, return); attrd_handle_request(&request); @@ -81,7 +81,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml) * response so the originating peer knows what they're a confirmation * for. */ - crm_element_value_int(xml, XML_LRM_ATTR_CALLID, &callid); + crm_element_value_int(xml, PCMK__XA_CALL_ID, &callid); reply = attrd_confirmation(callid); /* And then send the confirmation back to the originating peer. This @@ -106,22 +106,22 @@ attrd_cpg_dispatch(cpg_handle_t handle, uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; - char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); + char *data = pcmk__cpg_message_data(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { - xml = string2xml(data); + xml = pcmk__xml_parse(data); } if (xml == NULL) { crm_err("Bad message of class %d received from %s[%u]: '%.120s'", kind, from, nodeid, data); } else { - crm_node_t *peer = crm_get_peer(nodeid, from); - - attrd_peer_message(peer, xml); + attrd_peer_message(pcmk__get_node(nodeid, from, NULL, + pcmk__node_search_cluster_member), + xml); } free_xml(xml); @@ -143,86 +143,24 @@ attrd_cpg_destroy(gpointer unused) /*! * \internal - * \brief Override an attribute sync with a local value - * - * Broadcast the local node's value for an attribute that's different from the - * value provided in a peer's attribute synchronization response. This ensures a - * node's values for itself take precedence and all peers are kept in sync. + * \brief Broadcast an update for a single attribute value * - * \param[in] a Attribute entry to override - * - * \return Local instance of attribute value + * \param[in] a Attribute to broadcast + * \param[in] v Attribute value to broadcast */ -static attribute_value_t * -broadcast_local_value(const attribute_t *a) -{ - attribute_value_t *v = g_hash_table_lookup(a->values, attrd_cluster->uname); - xmlNode *sync = create_xml_node(NULL, __func__); - - crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); - attrd_add_value_xml(sync, a, v, false); - attrd_send_message(NULL, sync, false); - free_xml(sync); - return v; -} - -/*! - * \internal - * \brief Ensure a Pacemaker Remote node is in the correct peer cache - * - * \param[in] node_name Name of Pacemaker Remote node to check - */ -static void -cache_remote_node(const char *node_name) +void +attrd_broadcast_value(const attribute_t *a, const attribute_value_t *v) { - /* If we previously assumed this node was an unseen cluster node, - * remove its entry from the cluster peer cache. - */ - crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name, NULL); + xmlNode *op = pcmk__xe_create(NULL, PCMK_XE_OP); - if (dup && (dup->uuid == NULL)) { - reap_crm_member(0, node_name); - } - - // Ensure node is in the remote peer cache - CRM_ASSERT(crm_remote_peer_get(node_name) != NULL); + crm_xml_add(op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); + attrd_add_value_xml(op, a, v, false); + attrd_send_message(NULL, op, false); + free_xml(op); } #define state_text(state) pcmk__s((state), "in unknown state") -/*! - * \internal - * \brief Return host's hash table entry (creating one if needed) - * - * \param[in,out] values Hash table of values - * \param[in] host Name of peer to look up - * \param[in] xml XML describing the attribute - * - * \return Pointer to new or existing hash table entry - */ -static attribute_value_t * -attrd_lookup_or_create_value(GHashTable *values, const char *host, - const xmlNode *xml) -{ - attribute_value_t *v = g_hash_table_lookup(values, host); - int is_remote = 0; - - crm_element_value_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote); - if (is_remote) { - cache_remote_node(host); - } - - if (v == NULL) { - v = calloc(1, sizeof(attribute_value_t)); - CRM_ASSERT(v != NULL); - - pcmk__str_update(&v->nodename, host); - v->is_remote = is_remote; - g_hash_table_replace(values, v->nodename, v); - } - return(v); -} - static void attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) { @@ -254,7 +192,7 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da */ if (attrd_election_won() && !pcmk_is_set(peer->flags, crm_remote_node)) { - attrd_peer_sync(peer, NULL); + attrd_peer_sync(peer); } } else { // Remove all attribute values associated with lost nodes @@ -269,17 +207,14 @@ attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *da attrd_remove_voter(peer); attrd_remove_peer_protocol_ver(peer->uname); attrd_do_not_expect_from_peer(peer->uname); - - // Ensure remote nodes that come up are in the remote node cache - } else if (!gone && is_remote) { - cache_remote_node(peer->uname); } } static void record_peer_nodeid(attribute_value_t *v, const char *host) { - crm_node_t *known_peer = crm_get_peer(v->nodeid, host); + crm_node_t *known_peer = pcmk__get_node(v->nodeid, host, NULL, + pcmk__node_search_cluster_member); crm_trace("Learned %s has node id %s", known_peer->uname, known_peer->uuid); if (attrd_election_won()) { @@ -287,34 +222,63 @@ record_peer_nodeid(attribute_value_t *v, const char *host) } } +#define readable_value(rv_v) pcmk__s((rv_v)->current, "(unset)") + +#define readable_peer(p) \ + (((p) == NULL)? "all peers" : pcmk__s((p)->uname, "unknown peer")) + static void update_attr_on_host(attribute_t *a, const crm_node_t *peer, const xmlNode *xml, const char *attr, const char *value, const char *host, - bool filter, int is_force_write) + bool filter) { + int is_remote = 0; + bool changed = false; attribute_value_t *v = NULL; - v = attrd_lookup_or_create_value(a->values, host, xml); + // Create entry for value if not already existing + v = g_hash_table_lookup(a->values, host); + if (v == NULL) { + v = pcmk__assert_alloc(1, sizeof(attribute_value_t)); + + v->nodename = pcmk__str_copy(host); + g_hash_table_replace(a->values, v->nodename, v); + } + + // If value is for a Pacemaker Remote node, remember that + crm_element_value_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote); + if (is_remote) { + attrd_set_value_flags(v, attrd_value_remote); + CRM_ASSERT(pcmk__cluster_lookup_remote_node(host) != NULL); + } - if (filter && !pcmk__str_eq(v->current, value, pcmk__str_casei) - && pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei)) { + // Check whether the value changed + changed = !pcmk__str_eq(v->current, value, pcmk__str_casei); + if (changed && filter && pcmk__str_eq(host, attrd_cluster->uname, + pcmk__str_casei)) { + /* Broadcast the local value for an attribute that differs from the + * value provided in a peer's attribute synchronization response. This + * ensures a node's values for itself take precedence and all peers are + * kept in sync. + */ + v = g_hash_table_lookup(a->values, attrd_cluster->uname); crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", - attr, host, v->current, value, peer->uname); - v = broadcast_local_value(a); + attr, host, readable_value(v), value, peer->uname); + attrd_broadcast_value(a, v); - } else if (!pcmk__str_eq(v->current, value, pcmk__str_casei)) { + } else if (changed) { crm_notice("Setting %s[%s]%s%s: %s -> %s " CRM_XS " from %s with %s write delay", attr, host, a->set_type ? " in " : "", - pcmk__s(a->set_type, ""), pcmk__s(v->current, "(unset)"), + pcmk__s(a->set_type, ""), readable_value(v), pcmk__s(value, "(unset)"), peer->uname, (a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms)); pcmk__str_update(&v->current, value); - a->changed = true; + attrd_set_attr_flags(a, attrd_attr_changed); if (pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei) - && pcmk__str_eq(attr, XML_CIB_ATTR_SHUTDOWN, pcmk__str_none)) { + && pcmk__str_eq(attr, PCMK__NODE_ATTR_SHUTDOWN, pcmk__str_none)) { if (!pcmk__str_eq(value, "0", pcmk__str_null_matches)) { attrd_set_requesting_shutdown(); @@ -326,30 +290,37 @@ update_attr_on_host(attribute_t *a, const crm_node_t *peer, const xmlNode *xml, // Write out new value or start dampening timer if (a->timeout_ms && a->timer) { - crm_trace("Delayed write out (%dms) for %s", a->timeout_ms, attr); + crm_trace("Delaying write of %s %s for dampening", + attr, pcmk__readable_interval(a->timeout_ms)); mainloop_timer_start(a->timer); } else { attrd_write_or_elect_attribute(a); } } else { + int is_force_write = 0; + + crm_element_value_int(xml, PCMK__XA_ATTRD_IS_FORCE_WRITE, + &is_force_write); + if (is_force_write == 1 && a->timeout_ms && a->timer) { /* Save forced writing and set change flag. */ /* The actual attribute is written by Writer after election. */ - crm_trace("Unchanged %s[%s] from %s is %s(Set the forced write flag)", - attr, host, peer->uname, value); - a->force_write = TRUE; + crm_trace("%s[%s] from %s is unchanged (%s), forcing write", + attr, host, peer->uname, pcmk__s(value, "unset")); + attrd_set_attr_flags(a, attrd_attr_force_write); } else { - crm_trace("Unchanged %s[%s] from %s is %s", attr, host, peer->uname, value); + crm_trace("%s[%s] from %s is unchanged (%s)", + attr, host, peer->uname, pcmk__s(value, "unset")); } } - /* Set the seen flag for attribute processing held only in the own node. */ - v->seen = TRUE; + // This allows us to later detect local values that peer doesn't know about + attrd_set_value_flags(v, attrd_value_from_peer); /* If this is a cluster node whose node ID we are learning, remember it */ - if ((v->nodeid == 0) && (v->is_remote == FALSE) - && (crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, + if ((v->nodeid == 0) && !pcmk_is_set(v->flags, attrd_value_remote) + && (crm_element_value_int(xml, PCMK__XA_ATTR_HOST_ID, (int*)&v->nodeid) == 0) && (v->nodeid > 0)) { record_peer_nodeid(v, host); } @@ -361,16 +332,13 @@ attrd_peer_update_one(const crm_node_t *peer, xmlNode *xml, bool filter) attribute_t *a = NULL; const char *attr = crm_element_value(xml, PCMK__XA_ATTR_NAME); const char *value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); - const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); - int is_force_write = 0; + const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST); if (attr == NULL) { crm_warn("Could not update attribute: peer did not specify name"); return; } - crm_element_value_int(xml, PCMK__XA_ATTR_FORCE, &is_force_write); - a = attrd_populate_attribute(xml, attr); if (a == NULL) { return; @@ -381,16 +349,16 @@ attrd_peer_update_one(const crm_node_t *peer, xmlNode *xml, bool filter) GHashTableIter vIter; crm_debug("Setting %s for all hosts to %s", attr, value); - xml_remove_prop(xml, PCMK__XA_ATTR_NODE_ID); + pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_HOST_ID); g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, (gpointer *) & host, NULL)) { - update_attr_on_host(a, peer, xml, attr, value, host, filter, is_force_write); + update_attr_on_host(a, peer, xml, attr, value, host, filter); } } else { // Update attribute value for the given host - update_attr_on_host(a, peer, xml, attr, value, host, filter, is_force_write); + update_attr_on_host(a, peer, xml, attr, value, host, filter); } /* If this is a message from some attrd instance broadcasting its protocol @@ -412,13 +380,18 @@ broadcast_unseen_local_values(void) g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { + g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { - if (!(v->seen) && pcmk__str_eq(v->nodename, attrd_cluster->uname, - pcmk__str_casei)) { + + if (!pcmk_is_set(v->flags, attrd_value_from_peer) + && pcmk__str_eq(v->nodename, attrd_cluster->uname, + pcmk__str_casei)) { + crm_trace("* %s[%s]='%s' is local-only", + a->id, v->nodename, readable_value(v)); if (sync == NULL) { - sync = create_xml_node(NULL, __func__); - crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); + sync = pcmk__xe_create(NULL, __func__); + crm_xml_add(sync, PCMK_XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); } attrd_add_value_xml(sync, a, v, a->timeout_ms && a->timer); } @@ -435,17 +408,21 @@ broadcast_unseen_local_values(void) int attrd_cluster_connect(void) { + int rc = pcmk_rc_ok; + attrd_cluster = pcmk_cluster_new(); - attrd_cluster->destroy = attrd_cpg_destroy; - attrd_cluster->cpg.cpg_deliver_fn = attrd_cpg_dispatch; - attrd_cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; + pcmk_cluster_set_destroy_fn(attrd_cluster, attrd_cpg_destroy); + pcmk_cpg_set_deliver_fn(attrd_cluster, attrd_cpg_dispatch); + pcmk_cpg_set_confchg_fn(attrd_cluster, pcmk__cpg_confchg_cb); - crm_set_status_callback(&attrd_peer_change_cb); + pcmk__cluster_set_status_callback(&attrd_peer_change_cb); - if (crm_cluster_connect(attrd_cluster) == FALSE) { + rc = pcmk_cluster_connect(attrd_cluster); + rc = pcmk_rc2legacy(rc); + if (rc != pcmk_ok) { crm_err("Cluster connection failed"); - return -ENOTCONN; + return rc; } return pcmk_ok; } @@ -455,15 +432,19 @@ attrd_peer_clear_failure(pcmk__request_t *request) { xmlNode *xml = request->xml; const char *rsc = crm_element_value(xml, PCMK__XA_ATTR_RESOURCE); - const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); - const char *op = crm_element_value(xml, PCMK__XA_ATTR_OPERATION); - const char *interval_spec = crm_element_value(xml, PCMK__XA_ATTR_INTERVAL); - guint interval_ms = crm_parse_interval_spec(interval_spec); + const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST); + const char *op = crm_element_value(xml, PCMK__XA_ATTR_CLEAR_OPERATION); + const char *interval_spec = crm_element_value(xml, + PCMK__XA_ATTR_CLEAR_INTERVAL); + guint interval_ms = 0U; char *attr = NULL; GHashTableIter iter; regex_t regex; - crm_node_t *peer = crm_get_peer(0, request->peer); + crm_node_t *peer = pcmk__get_node(0, request->peer, NULL, + pcmk__node_search_cluster_member); + + pcmk_parse_interval_spec(interval_spec, &interval_ms); if (attrd_failure_regex(®ex, rsc, op, interval_ms) != pcmk_ok) { crm_info("Ignoring invalid request to clear failures for %s", @@ -471,10 +452,10 @@ attrd_peer_clear_failure(pcmk__request_t *request) return; } - crm_xml_add(xml, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE); + crm_xml_add(xml, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Make sure value is not set, so we delete */ - xml_remove_prop(xml, PCMK__XA_ATTR_VALUE); + pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_VALUE); g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) { @@ -492,7 +473,7 @@ attrd_peer_clear_failure(pcmk__request_t *request) * \internal * \brief Load attributes from a peer sync response * - * \param[in] peer Peer that sent clear request + * \param[in] peer Peer that sent sync response * \param[in] peer_won Whether peer is the attribute writer * \param[in,out] xml Request XML */ @@ -510,11 +491,11 @@ attrd_peer_sync_response(const crm_node_t *peer, bool peer_won, xmlNode *xml) } // Process each attribute update in the sync response - for (xmlNode *child = pcmk__xml_first_child(xml); child != NULL; - child = pcmk__xml_next(child)) { + for (xmlNode *child = pcmk__xe_first_child(xml, NULL, NULL, NULL); + child != NULL; child = pcmk__xe_next(child)) { + attrd_peer_update(peer, child, - crm_element_value(child, PCMK__XA_ATTR_NODE_NAME), - true); + crm_element_value(child, PCMK__XA_ATTR_HOST), true); } if (peer_won) { @@ -540,7 +521,9 @@ attrd_peer_remove(const char *host, bool uncache, const char *source) GHashTableIter aIter; CRM_CHECK(host != NULL, return); - crm_notice("Removing all %s attributes for peer %s", host, source); + crm_notice("Removing all %s attributes for node %s " + CRM_XS " %s reaping node from cache", + host, source, (uncache? "and" : "without")); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { @@ -550,33 +533,40 @@ attrd_peer_remove(const char *host, bool uncache, const char *source) } if (uncache) { - crm_remote_peer_cache_remove(host); - reap_crm_member(0, host); + pcmk__purge_node_from_cache(host, 0); } } +/*! + * \internal + * \brief Send all known attributes and values to a peer + * + * \param[in] peer Peer to send sync to (if NULL, broadcast to all peers) + */ void -attrd_peer_sync(crm_node_t *peer, xmlNode *xml) +attrd_peer_sync(crm_node_t *peer) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; - xmlNode *sync = create_xml_node(NULL, __func__); + xmlNode *sync = pcmk__xe_create(NULL, __func__); - crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); + crm_xml_add(sync, PCMK_XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { - crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone"); + crm_debug("Syncing %s[%s]='%s' to %s", + a->id, v->nodename, readable_value(v), + readable_peer(peer)); attrd_add_value_xml(sync, a, v, false); } } - crm_debug("Syncing values to %s", peer?peer->uname:"everyone"); + crm_debug("Syncing values to %s", readable_peer(peer)); attrd_send_message(peer, sync, false); free_xml(sync); } @@ -589,9 +579,10 @@ attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host, CRM_CHECK((peer != NULL) && (xml != NULL), return); if (xml->children != NULL) { - for (xmlNode *child = first_named_child(xml, XML_ATTR_OP); child != NULL; - child = crm_next_same_xml(child)) { - attrd_copy_xml_attributes(xml, child); + for (xmlNode *child = pcmk__xe_first_child(xml, PCMK_XE_OP, NULL, NULL); + child != NULL; child = pcmk__xe_next_same(child)) { + + pcmk__xe_copy_attrs(child, xml, pcmk__xaf_no_overwrite); attrd_peer_update_one(peer, child, filter); if (attrd_request_has_sync_point(child)) { diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c index 82fbe8a..0abd9c0 100644 --- a/daemons/attrd/attrd_elections.c +++ b/daemons/attrd/attrd_elections.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2023 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -8,9 +8,9 @@ */ #include -#include #include #include +#include #include "pacemaker-attrd.h" @@ -23,7 +23,7 @@ attrd_election_cb(gpointer user_data) attrd_declare_winner(); /* Update the peers after an election */ - attrd_peer_sync(NULL, NULL); + attrd_peer_sync(NULL); /* After winning an election, update the CIB with the values of all * attributes as the winner knows them. @@ -35,7 +35,7 @@ attrd_election_cb(gpointer user_data) void attrd_election_init(void) { - writer = election_init(T_ATTRD, attrd_cluster->uname, 120000, + writer = election_init(PCMK__VALUE_ATTRD, attrd_cluster->uname, 120000, attrd_election_cb); } @@ -69,7 +69,7 @@ attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml) enum election_result rc = 0; enum election_result previous = election_state(writer); - crm_xml_add(xml, F_CRM_HOST_FROM, peer->uname); + crm_xml_add(xml, PCMK__XA_SRC, peer->uname); // Don't become writer if we're shutting down rc = election_count_vote(writer, xml, !attrd_shutting_down(false)); diff --git a/daemons/attrd/attrd_ipc.c b/daemons/attrd/attrd_ipc.c index 05c4a69..0a2688e 100644 --- a/daemons/attrd/attrd_ipc.c +++ b/daemons/attrd/attrd_ipc.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -16,13 +16,13 @@ #include #include -#include #include #include #include #include #include #include +#include #include "pacemaker-attrd.h" @@ -32,22 +32,19 @@ static qb_ipcs_service_t *ipcs = NULL; * \internal * \brief Build the XML reply to a client query * - * param[in] attr Name of requested attribute - * param[in] host Name of requested host (or NULL for all hosts) + * \param[in] attr Name of requested attribute + * \param[in] host Name of requested host (or NULL for all hosts) * * \return New XML reply * \note Caller is responsible for freeing the resulting XML */ static xmlNode *build_query_reply(const char *attr, const char *host) { - xmlNode *reply = create_xml_node(NULL, __func__); + xmlNode *reply = pcmk__xe_create(NULL, __func__); attribute_t *a; - if (reply == NULL) { - return NULL; - } - crm_xml_add(reply, F_TYPE, T_ATTRD); - crm_xml_add(reply, F_SUBTYPE, PCMK__ATTRD_CMD_QUERY); + crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_ATTRD); + crm_xml_add(reply, PCMK__XA_SUBT, PCMK__ATTRD_CMD_QUERY); crm_xml_add(reply, PCMK__XA_ATTR_VERSION, ATTRD_PROTOCOL_VERSION); /* If desired attribute exists, add its value(s) to the reply */ @@ -67,11 +64,7 @@ static xmlNode *build_query_reply(const char *attr, const char *host) /* If a specific node was requested, add its value */ if (host) { v = g_hash_table_lookup(a->values, host); - host_value = create_xml_node(reply, XML_CIB_TAG_NODE); - if (host_value == NULL) { - free_xml(reply); - return NULL; - } + host_value = pcmk__xe_create(reply, PCMK_XE_NODE); pcmk__xe_add_node(host_value, host, 0); crm_xml_add(host_value, PCMK__XA_ATTR_VALUE, (v? v->current : NULL)); @@ -82,11 +75,7 @@ static xmlNode *build_query_reply(const char *attr, const char *host) g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) { - host_value = create_xml_node(reply, XML_CIB_TAG_NODE); - if (host_value == NULL) { - free_xml(reply); - return NULL; - } + host_value = pcmk__xe_create(reply, PCMK_XE_NODE); pcmk__xe_add_node(host_value, v->nodename, 0); crm_xml_add(host_value, PCMK__XA_ATTR_VALUE, v->current); } @@ -111,11 +100,11 @@ attrd_client_clear_failure(pcmk__request_t *request) } rsc = crm_element_value(xml, PCMK__XA_ATTR_RESOURCE); - op = crm_element_value(xml, PCMK__XA_ATTR_OPERATION); - interval_spec = crm_element_value(xml, PCMK__XA_ATTR_INTERVAL); + op = crm_element_value(xml, PCMK__XA_ATTR_CLEAR_OPERATION); + interval_spec = crm_element_value(xml, PCMK__XA_ATTR_CLEAR_INTERVAL); /* Map this to an update */ - crm_xml_add(xml, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE); + crm_xml_add(xml, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Add regular expression matching desired attributes */ @@ -126,22 +115,23 @@ attrd_client_clear_failure(pcmk__request_t *request) pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc); } else { - guint interval_ms = crm_parse_interval_spec(interval_spec); + guint interval_ms = 0U; + pcmk_parse_interval_spec(interval_spec, &interval_ms); pattern = crm_strdup_printf(ATTRD_RE_CLEAR_OP, rsc, op, interval_ms); } - crm_xml_add(xml, PCMK__XA_ATTR_PATTERN, pattern); + crm_xml_add(xml, PCMK__XA_ATTR_REGEX, pattern); free(pattern); } else { - crm_xml_add(xml, PCMK__XA_ATTR_PATTERN, ATTRD_RE_CLEAR_ALL); + crm_xml_add(xml, PCMK__XA_ATTR_REGEX, ATTRD_RE_CLEAR_ALL); } /* Make sure attribute and value are not set, so we delete via regex */ - xml_remove_prop(xml, PCMK__XA_ATTR_NAME); - xml_remove_prop(xml, PCMK__XA_ATTR_VALUE); + pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_NAME); + pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_VALUE); return attrd_client_update(request); } @@ -152,7 +142,7 @@ attrd_client_peer_remove(pcmk__request_t *request) xmlNode *xml = request->xml; // Host and ID are not used in combination, rather host has precedence - const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); + const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST); char *host_alloc = NULL; attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); @@ -160,18 +150,19 @@ attrd_client_peer_remove(pcmk__request_t *request) if (host == NULL) { int nodeid = 0; - crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, &nodeid); + crm_element_value_int(xml, PCMK__XA_ATTR_HOST_ID, &nodeid); if (nodeid > 0) { - crm_node_t *node = pcmk__search_cluster_node_cache(nodeid, NULL, - NULL); + crm_node_t *node = NULL; char *host_alloc = NULL; + node = pcmk__search_node_caches(nodeid, NULL, + pcmk__node_search_cluster_member); if (node && node->uname) { // Use cached name if available host = node->uname; } else { // Otherwise ask cluster layer - host_alloc = get_node_name(nodeid); + host_alloc = pcmk__cluster_node_name(nodeid); host = host_alloc; } pcmk__xe_add_node(xml, host, 0); @@ -211,8 +202,8 @@ attrd_client_query(pcmk__request_t *request) } /* Build the XML reply */ - reply = build_query_reply(attr, crm_element_value(query, - PCMK__XA_ATTR_NODE_NAME)); + reply = build_query_reply(attr, + crm_element_value(query, PCMK__XA_ATTR_HOST)); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Could not respond to query from %s: could not create XML reply", @@ -241,7 +232,7 @@ attrd_client_refresh(pcmk__request_t *request) static void handle_missing_host(xmlNode *xml) { - const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); + const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST); if (host == NULL) { crm_trace("Inferring host"); @@ -270,16 +261,16 @@ expand_regexes(xmlNode *xml, const char *attr, const char *value, const char *re int status = regexec(&r_patt, attr, 0, NULL, 0); if (status == 0) { - xmlNode *child = create_xml_node(xml, XML_ATTR_OP); + xmlNode *child = pcmk__xe_create(xml, PCMK_XE_OP); crm_trace("Matched %s with %s", attr, regex); matched = true; - /* Copy all the attributes from the parent over, but remove the - * regex and replace it with the name. + /* Copy all the non-conflicting attributes from the parent over, + * but remove the regex and replace it with the name. */ - attrd_copy_xml_attributes(xml, child); - xml_remove_prop(child, PCMK__XA_ATTR_PATTERN); + pcmk__xe_copy_attrs(child, xml, pcmk__xaf_no_overwrite); + pcmk__xe_remove_attr(child, PCMK__XA_ATTR_REGEX); crm_xml_add(child, PCMK__XA_ATTR_NAME, attr); } } @@ -310,7 +301,7 @@ handle_regexes(pcmk__request_t *request) const char *attr = crm_element_value(xml, PCMK__XA_ATTR_NAME); const char *value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); - const char *regex = crm_element_value(xml, PCMK__XA_ATTR_PATTERN); + const char *regex = crm_element_value(xml, PCMK__XA_ATTR_REGEX); rc = expand_regexes(xml, attr, value, regex); @@ -344,7 +335,7 @@ handle_value_expansion(const char **value, xmlNode *xml, const char *op, attribute_value_t *v = NULL; if (a) { - const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); + const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST); v = g_hash_table_lookup(a->values, host); } @@ -416,8 +407,10 @@ attrd_client_update(pcmk__request_t *request) * we also need to apply all the transformations in this function * to the children since they don't happen anywhere else. */ - for (xmlNode *child = first_named_child(xml, XML_ATTR_OP); child != NULL; - child = crm_next_same_xml(child)) { + for (xmlNode *child = pcmk__xe_first_child(xml, PCMK_XE_OP, NULL, + NULL); + child != NULL; child = pcmk__xe_next_same(child)) { + attr = crm_element_value(child, PCMK__XA_ATTR_NAME); value = crm_element_value(child, PCMK__XA_ATTR_VALUE); @@ -443,7 +436,7 @@ attrd_client_update(pcmk__request_t *request) * up into individual messages and call attrd_client_update on * each one. */ - pcmk__xe_foreach_child(xml, XML_ATTR_OP, send_child_update, request); + pcmk__xe_foreach_child(xml, PCMK_XE_OP, send_child_update, request); request->xml = orig_xml; } @@ -452,7 +445,7 @@ attrd_client_update(pcmk__request_t *request) attr = crm_element_value(xml, PCMK__XA_ATTR_NAME); value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); - regex = crm_element_value(xml, PCMK__XA_ATTR_PATTERN); + regex = crm_element_value(xml, PCMK__XA_ATTR_REGEX); if (handle_regexes(request) != pcmk_rc_ok) { /* Error handling was already dealt with in handle_regexes, so just return. */ @@ -473,7 +466,8 @@ attrd_client_update(pcmk__request_t *request) return NULL; } - crm_debug("Broadcasting %s[%s]=%s%s", attr, crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME), + crm_debug("Broadcasting %s[%s]=%s%s", + attr, crm_element_value(xml, PCMK__XA_ATTR_HOST), value, (attrd_election_won()? " (writer)" : "")); send_update_msg_to_cluster(request, xml); @@ -498,11 +492,11 @@ attrd_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) if (attrd_shutting_down(false)) { crm_info("Ignoring new connection from pid %d during shutdown", pcmk__client_pid(c)); - return -EPERM; + return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { - return -EIO; + return -ENOMEM; } return pcmk_ok; } @@ -572,7 +566,8 @@ attrd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) if (xml == NULL) { crm_debug("Unrecognizable IPC data from PID %d", pcmk__client_pid(c)); - pcmk__ipc_send_ack(client, id, flags, "ack", NULL, CRM_EX_PROTOCOL); + pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, + CRM_EX_PROTOCOL); return 0; } else { @@ -589,7 +584,7 @@ attrd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) CRM_ASSERT(client->user != NULL); pcmk__update_acl_user(xml, PCMK__XA_ATTR_USER, client->user); - request.op = crm_element_value_copy(request.xml, PCMK__XA_TASK); + request.op = crm_element_value_copy(request.xml, PCMK_XA_TASK); CRM_CHECK(request.op != NULL, return 0); attrd_handle_request(&request); diff --git a/daemons/attrd/attrd_messages.c b/daemons/attrd/attrd_messages.c index 89da6d8..edb33a5 100644 --- a/daemons/attrd/attrd_messages.c +++ b/daemons/attrd/attrd_messages.c @@ -1,5 +1,5 @@ /* - * Copyright 2022 the Pacemaker project contributors + * Copyright 2022-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -12,7 +12,8 @@ #include #include -#include +#include // pcmk__get_node() +#include #include "pacemaker-attrd.h" @@ -30,7 +31,7 @@ static int remove_sync_point_attribute(xmlNode *xml, void *data) { pcmk__xe_remove_matching_attrs(xml, is_sync_point_attr, NULL); - pcmk__xe_foreach_child(xml, XML_ATTR_OP, remove_sync_point_attribute, NULL); + pcmk__xe_foreach_child(xml, PCMK_XE_OP, remove_sync_point_attribute, NULL); return pcmk_rc_ok; } @@ -105,7 +106,8 @@ handle_confirm_request(pcmk__request_t *request) crm_debug("Received confirmation from %s", request->peer); - if (crm_element_value_int(request->xml, XML_LRM_ATTR_CALLID, &callid) == -1) { + if (crm_element_value_int(request->xml, PCMK__XA_CALL_ID, + &callid) == -1) { pcmk__set_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Could not get callid from XML"); } else { @@ -147,8 +149,14 @@ static xmlNode * handle_remove_request(pcmk__request_t *request) { if (request->peer != NULL) { - const char *host = crm_element_value(request->xml, PCMK__XA_ATTR_NODE_NAME); - attrd_peer_remove(host, true, request->peer); + const char *host = crm_element_value(request->xml, PCMK__XA_ATTR_HOST); + bool reap = false; + + if (pcmk__xe_get_bool_attr(request->xml, PCMK__XA_REAP, + &reap) != pcmk_rc_ok) { + reap = true; // Default to true for backward compatibility + } + attrd_peer_remove(host, reap, request->peer); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { @@ -166,20 +174,6 @@ handle_refresh_request(pcmk__request_t *request) } } -static xmlNode * -handle_sync_request(pcmk__request_t *request) -{ - if (request->peer != NULL) { - crm_node_t *peer = crm_get_peer(0, request->peer); - - attrd_peer_sync(peer, request->xml); - pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); - return NULL; - } else { - return handle_unknown_request(request); - } -} - static xmlNode * handle_sync_response_request(pcmk__request_t *request) { @@ -187,7 +181,8 @@ handle_sync_response_request(pcmk__request_t *request) return handle_unknown_request(request); } else { if (request->peer != NULL) { - crm_node_t *peer = crm_get_peer(0, request->peer); + crm_node_t *peer = pcmk__get_node(0, request->peer, NULL, + pcmk__node_search_cluster_member); bool peer_won = attrd_check_for_new_writer(peer, request->xml); if (!pcmk__str_eq(peer->uname, attrd_cluster->uname, pcmk__str_casei)) { @@ -204,8 +199,9 @@ static xmlNode * handle_update_request(pcmk__request_t *request) { if (request->peer != NULL) { - const char *host = crm_element_value(request->xml, PCMK__XA_ATTR_NODE_NAME); - crm_node_t *peer = crm_get_peer(0, request->peer); + const char *host = crm_element_value(request->xml, PCMK__XA_ATTR_HOST); + crm_node_t *peer = pcmk__get_node(0, request->peer, NULL, + pcmk__node_search_cluster_member); attrd_peer_update(peer, request->xml, host, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); @@ -251,7 +247,6 @@ attrd_register_handlers(void) { PCMK__ATTRD_CMD_PEER_REMOVE, handle_remove_request }, { PCMK__ATTRD_CMD_QUERY, handle_query_request }, { PCMK__ATTRD_CMD_REFRESH, handle_refresh_request }, - { PCMK__ATTRD_CMD_SYNC, handle_sync_request }, { PCMK__ATTRD_CMD_SYNC_RESPONSE, handle_sync_response_request }, { PCMK__ATTRD_CMD_UPDATE, handle_update_request }, { PCMK__ATTRD_CMD_UPDATE_DELAY, handle_update_request }, @@ -323,11 +318,11 @@ attrd_handle_request(pcmk__request_t *request) void attrd_broadcast_protocol(void) { - xmlNode *attrd_op = create_xml_node(NULL, __func__); + xmlNode *attrd_op = pcmk__xe_create(NULL, __func__); - crm_xml_add(attrd_op, F_TYPE, T_ATTRD); - crm_xml_add(attrd_op, F_ORIG, crm_system_name); - crm_xml_add(attrd_op, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE); + crm_xml_add(attrd_op, PCMK__XA_T, PCMK__VALUE_ATTRD); + crm_xml_add(attrd_op, PCMK__XA_SRC, crm_system_name); + crm_xml_add(attrd_op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); crm_xml_add(attrd_op, PCMK__XA_ATTR_NAME, CRM_ATTR_PROTOCOL); crm_xml_add(attrd_op, PCMK__XA_ATTR_VALUE, ATTRD_PROTOCOL_VERSION); crm_xml_add_int(attrd_op, PCMK__XA_ATTR_IS_PRIVATE, 1); @@ -344,9 +339,9 @@ attrd_broadcast_protocol(void) gboolean attrd_send_message(crm_node_t *node, xmlNode *data, bool confirm) { - const char *op = crm_element_value(data, PCMK__XA_TASK); + const char *op = crm_element_value(data, PCMK_XA_TASK); - crm_xml_add(data, F_TYPE, T_ATTRD); + crm_xml_add(data, PCMK__XA_T, PCMK__VALUE_ATTRD); crm_xml_add(data, PCMK__XA_ATTR_VERSION, ATTRD_PROTOCOL_VERSION); /* Request a confirmation from the destination peer node (which could @@ -358,5 +353,5 @@ attrd_send_message(crm_node_t *node, xmlNode *data, bool confirm) } attrd_xml_add_writer(data); - return send_cluster_message(node, crm_msg_attrd, data, TRUE); + return pcmk__cluster_send_message(node, crm_msg_attrd, data); } diff --git a/daemons/attrd/attrd_sync.c b/daemons/attrd/attrd_sync.c index 1a6c24c..de99db2 100644 --- a/daemons/attrd/attrd_sync.c +++ b/daemons/attrd/attrd_sync.c @@ -1,5 +1,5 @@ /* - * Copyright 2022-2023 the Pacemaker project contributors + * Copyright 2022-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -9,8 +9,8 @@ #include -#include -#include +#include +#include #include "pacemaker-attrd.h" @@ -113,7 +113,7 @@ sync_point_str(enum attrd_sync_point sync_point) } else if (sync_point == attrd_sync_point_cluster) { return PCMK__VALUE_CLUSTER; } else { - return "unknown"; + return PCMK_VALUE_UNKNOWN; } } @@ -145,13 +145,7 @@ attrd_add_client_to_waitlist(pcmk__request_t *request) waitlist = pcmk__intkey_table(free_waitlist_node); } - wl = calloc(sizeof(struct waitlist_node), 1); - - CRM_ASSERT(wl != NULL); - - wl->client_id = strdup(request->ipc_client->id); - - CRM_ASSERT(wl->client_id); + wl = pcmk__assert_alloc(1, sizeof(struct waitlist_node)); if (pcmk__str_eq(sync_point, PCMK__VALUE_LOCAL, pcmk__str_none)) { wl->sync_point = attrd_sync_point_local; @@ -162,6 +156,7 @@ attrd_add_client_to_waitlist(pcmk__request_t *request) return; } + wl->client_id = pcmk__str_copy(request->ipc_client->id); wl->ipc_id = request->ipc_id; wl->flags = request->flags; @@ -175,7 +170,7 @@ attrd_add_client_to_waitlist(pcmk__request_t *request) /* And then add the key to the request XML so we can uniquely identify * it when it comes time to issue the ACK. */ - crm_xml_add_int(request->xml, XML_LRM_ATTR_CALLID, waitlist_client); + crm_xml_add_int(request->xml, PCMK__XA_CALL_ID, waitlist_client); } /*! @@ -245,7 +240,7 @@ attrd_ack_waitlist_clients(enum attrd_sync_point sync_point, const xmlNode *xml) return; } - if (crm_element_value_int(xml, XML_LRM_ATTR_CALLID, &callid) == -1) { + if (crm_element_value_int(xml, PCMK__XA_CALL_ID, &callid) == -1) { crm_warn("Could not get callid from request XML"); return; } @@ -316,7 +311,8 @@ attrd_request_sync_point(xmlNode *xml) CRM_CHECK(xml != NULL, return NULL); if (xml->children != NULL) { - xmlNode *child = pcmk__xe_match(xml, XML_ATTR_OP, PCMK__XA_ATTR_SYNC_POINT, NULL); + xmlNode *child = pcmk__xe_first_child(xml, PCMK_XE_OP, + PCMK__XA_ATTR_SYNC_POINT, NULL); if (child) { return crm_element_value(child, PCMK__XA_ATTR_SYNC_POINT); @@ -381,8 +377,10 @@ confirmation_timeout_cb(gpointer data) } crm_trace("Timed out waiting for confirmations for client %s", client->id); - pcmk__ipc_send_ack(client, action->ipc_id, action->flags | crm_ipc_client_response, - "ack", ATTRD_PROTOCOL_VERSION, CRM_EX_TIMEOUT); + pcmk__ipc_send_ack(client, action->ipc_id, + action->flags|crm_ipc_client_response, + PCMK__XE_ACK, ATTRD_PROTOCOL_VERSION, + CRM_EX_TIMEOUT); g_hash_table_iter_remove(&iter); crm_trace("%d requests now in expected confirmations table", g_hash_table_size(expected_confirmations)); @@ -486,7 +484,7 @@ attrd_expect_confirmations(pcmk__request_t *request, attrd_confirmation_action_f expected_confirmations = pcmk__intkey_table((GDestroyNotify) free_action); } - if (crm_element_value_int(request->xml, XML_LRM_ATTR_CALLID, &callid) == -1) { + if (crm_element_value_int(request->xml, PCMK__XA_CALL_ID, &callid) == -1) { crm_err("Could not get callid from xml"); return; } @@ -499,23 +497,17 @@ attrd_expect_confirmations(pcmk__request_t *request, attrd_confirmation_action_f g_hash_table_iter_init(&iter, peer_protocol_vers); while (g_hash_table_iter_next(&iter, &host, &ver)) { if (ATTRD_SUPPORTS_CONFIRMATION(GPOINTER_TO_INT(ver))) { - char *s = strdup((char *) host); - - CRM_ASSERT(s != NULL); - respondents = g_list_prepend(respondents, s); + respondents = g_list_prepend(respondents, + pcmk__str_copy((char *) host)); } } - action = calloc(1, sizeof(struct confirmation_action)); - CRM_ASSERT(action != NULL); + action = pcmk__assert_alloc(1, sizeof(struct confirmation_action)); action->respondents = respondents; action->fn = fn; - action->xml = copy_xml(request->xml); - - action->client_id = strdup(request->ipc_client->id); - CRM_ASSERT(action->client_id != NULL); - + action->xml = pcmk__xml_copy(NULL, request->xml); + action->client_id = pcmk__str_copy(request->ipc_client->id); action->ipc_id = request->ipc_id; action->flags = request->flags; diff --git a/daemons/attrd/attrd_utils.c b/daemons/attrd/attrd_utils.c index 341ee1a..2d0bc76 100644 --- a/daemons/attrd/attrd_utils.c +++ b/daemons/attrd/attrd_utils.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "pacemaker-attrd.h" @@ -58,11 +58,12 @@ attrd_clear_requesting_shutdown(void) * \internal * \brief Check whether local attribute manager is shutting down * - * \param[in] if_requested Also consider presence of "shutdown" attribute + * \param[in] if_requested If \c true, also consider presence of + * \c PCMK__NODE_ATTR_SHUTDOWN attribute * * \return \c true if local attribute manager has begun shutdown sequence * or (if \p if_requested is \c true) whether local node has a nonzero - * "shutdown" attribute set, otherwise \c false + * \c PCMK__NODE_ATTR_SHUTDOWN attribute set, otherwise \c false * \note Most callers should pass \c false for \p if_requested, because the * attribute manager needs to continue performing while the controller is * shutting down, and even needs to be eligible for election in case all @@ -175,8 +176,8 @@ attrd_expand_value(const char *value, const char *old_value) } int_value += offset; - if (int_value > INFINITY) { - int_value = INFINITY; + if (int_value > PCMK_SCORE_INFINITY) { + int_value = PCMK_SCORE_INFINITY; } return int_value; } @@ -204,7 +205,7 @@ attrd_failure_regex(regex_t *regex, const char *rsc, const char *op, /* Create a pattern that matches desired attributes */ if (rsc == NULL) { - pattern = strdup(ATTRD_RE_CLEAR_ALL); + pattern = pcmk__str_copy(ATTRD_RE_CLEAR_ALL); } else if (op == NULL) { pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc); } else { @@ -238,7 +239,6 @@ attrd_free_attribute(gpointer data) free(a->id); free(a->set_id); free(a->set_type); - free(a->uuid); free(a->user); mainloop_timer_del(a->timer); @@ -288,11 +288,9 @@ attrd_update_minimum_protocol_ver(const char *host, const char *value) pcmk__scan_min_int(value, &ver, 0); if (ver > 0) { - char *host_name = strdup(host); - /* Record the peer attrd's protocol version. */ - CRM_ASSERT(host_name != NULL); - g_hash_table_insert(peer_protocol_vers, host_name, GINT_TO_POINTER(ver)); + g_hash_table_insert(peer_protocol_vers, pcmk__str_copy(host), + GINT_TO_POINTER(ver)); /* If the protocol version is a new minimum, record it as such. */ if (minimum_protocol_version == -1 || ver < minimum_protocol_version) { @@ -302,24 +300,3 @@ attrd_update_minimum_protocol_ver(const char *host, const char *value) } } } - -void -attrd_copy_xml_attributes(xmlNode *src, xmlNode *dest) -{ - /* Copy attributes from the wrapper parent node into the child node. - * We can't just use copy_in_properties because we want to skip any - * attributes that are already set on the child. For instance, if - * we were told to use a specific node, there will already be a node - * attribute on the child. Copying the parent's node attribute over - * could result in the wrong value. - */ - for (xmlAttrPtr a = pcmk__xe_first_attr(src); a != NULL; a = a->next) { - const char *p_name = (const char *) a->name; - const char *p_value = ((a == NULL) || (a->children == NULL)) ? NULL : - (const char *) a->children->content; - - if (crm_element_value(dest, p_name) == NULL) { - crm_xml_add(dest, p_name, p_value); - } - } -} diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c index 8091c5b..4ae5c8a 100644 --- a/daemons/attrd/pacemaker-attrd.c +++ b/daemons/attrd/pacemaker-attrd.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2023 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -20,8 +20,6 @@ #include #include -#include -#include #include #include #include @@ -31,7 +29,7 @@ #include #include -#include +#include #include "pacemaker-attrd.h" #define SUMMARY "daemon for managing Pacemaker node attributes" @@ -59,7 +57,7 @@ static pcmk__supported_format_t formats[] = { }; lrmd_t *the_lrmd = NULL; -crm_cluster_t *attrd_cluster = NULL; +pcmk_cluster_t *attrd_cluster = NULL; crm_trigger_t *attrd_config_read = NULL; crm_exit_t attrd_exit_status = CRM_EX_OK; @@ -136,7 +134,7 @@ main(int argc, char **argv) // Open additional log files pcmk__add_logfiles(log_files, out); - crm_log_init(T_ATTRD, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); + crm_log_init(PCMK__VALUE_ATTRD, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_notice("Starting Pacemaker node attribute manager%s", stand_alone ? " in standalone mode" : ""); diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h index b8929a7..76faf04 100644 --- a/daemons/attrd/pacemaker-attrd.h +++ b/daemons/attrd/pacemaker-attrd.h @@ -1,5 +1,5 @@ /* - * Copyright 2013-2023 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -16,7 +16,7 @@ #include #include #include -#include +#include /* * Legacy attrd (all pre-1.1.11 Pacemaker versions, plus all versions when used @@ -31,9 +31,8 @@ * -------- --------- ------------------- * 1 1.1.11 PCMK__ATTRD_CMD_UPDATE (PCMK__XA_ATTR_NAME only), * PCMK__ATTRD_CMD_PEER_REMOVE, PCMK__ATTRD_CMD_REFRESH, - * PCMK__ATTRD_CMD_FLUSH, PCMK__ATTRD_CMD_SYNC, - * PCMK__ATTRD_CMD_SYNC_RESPONSE - * 1 1.1.13 PCMK__ATTRD_CMD_UPDATE (with PCMK__XA_ATTR_PATTERN), + * PCMK__ATTRD_CMD_FLUSH, PCMK__ATTRD_CMD_SYNC_RESPONSE + * 1 1.1.13 PCMK__ATTRD_CMD_UPDATE (with PCMK__XA_ATTR_REGEX), * PCMK__ATTRD_CMD_QUERY * 1 1.1.15 PCMK__ATTRD_CMD_UPDATE_BOTH, * PCMK__ATTRD_CMD_UPDATE_DELAY @@ -42,14 +41,16 @@ * 4 2.1.5 Multiple attributes can be updated in a single IPC * message * 5 2.1.5 Peers can request confirmation of a sent message + * 6 2.1.7 PCMK__ATTRD_CMD_PEER_REMOVE supports PCMK__XA_REAP */ -#define ATTRD_PROTOCOL_VERSION "5" +#define ATTRD_PROTOCOL_VERSION "6" #define ATTRD_SUPPORTS_MULTI_MESSAGE(x) ((x) >= 4) #define ATTRD_SUPPORTS_CONFIRMATION(x) ((x) >= 5) -#define attrd_send_ack(client, id, flags) \ - pcmk__ipc_send_ack((client), (id), (flags), "ack", ATTRD_PROTOCOL_VERSION, CRM_EX_INDETERMINATE) +#define attrd_send_ack(client, id, flags) \ + pcmk__ipc_send_ack((client), (id), (flags), PCMK__XE_ACK, \ + ATTRD_PROTOCOL_VERSION, CRM_EX_INDETERMINATE) void attrd_init_mainloop(void); void attrd_run_mainloop(void); @@ -65,6 +66,7 @@ void attrd_ipc_fini(void); int attrd_cib_connect(int max_retry); void attrd_cib_disconnect(void); void attrd_cib_init(void); +void attrd_cib_erase_transient_attrs(const char *node); bool attrd_value_needs_expansion(const char *value); int attrd_expand_value(const char *value, const char *old_value); @@ -116,47 +118,75 @@ void attrd_declare_winner(void); void attrd_remove_voter(const crm_node_t *peer); void attrd_xml_add_writer(xmlNode *xml); -typedef struct attribute_s { - char *uuid; /* TODO: Remove if at all possible */ - char *id; - char *set_id; - char *set_type; - GHashTable *values; - int update; - int timeout_ms; - - /* TODO: refactor these three as a bitmask */ - bool changed; /* whether attribute value has changed since last write */ - bool unknown_peer_uuids; /* whether we know we're missing a peer uuid */ - gboolean is_private; /* whether to keep this attribute out of the CIB */ - - mainloop_timer_t *timer; - - char *user; - - gboolean force_write; /* Flag for updating attribute by ignoring delay */ +enum attrd_attr_flags { + attrd_attr_none = 0U, + attrd_attr_changed = (1U << 0), // Attribute value has changed since last write + attrd_attr_uuid_missing = (1U << 1), // Whether we know we're missing a peer UUID + attrd_attr_is_private = (1U << 2), // Whether to keep this attribute out of the CIB + attrd_attr_force_write = (1U << 3), // Update attribute by ignoring delay +}; +typedef struct attribute_s { + char *id; // Attribute name + char *set_type; // PCMK_XE_INSTANCE_ATTRIBUTES or PCMK_XE_UTILIZATION + char *set_id; // Set's XML ID to use when writing + char *user; // ACL user to use for CIB writes + int update; // Call ID of pending write + int timeout_ms; // How long to wait for more changes before writing + uint32_t flags; // Group of enum attrd_attr_flags + GHashTable *values; // Key: node name, value: attribute_value_t + mainloop_timer_t *timer; // Timer to use for timeout_ms } attribute_t; +#define attrd_set_attr_flags(attr, flags_to_set) do { \ + (attr)->flags = pcmk__set_flags_as(__func__, __LINE__, \ + LOG_TRACE, "Value for attribute", (attr)->id, \ + (attr)->flags, (flags_to_set), #flags_to_set); \ + } while (0) + +#define attrd_clear_attr_flags(attr, flags_to_clear) do { \ + (attr)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ + LOG_TRACE, "Value for attribute", (attr)->id, \ + (attr)->flags, (flags_to_clear), #flags_to_clear); \ + } while (0) + +enum attrd_value_flags { + attrd_value_none = 0U, + attrd_value_remote = (1U << 0), // Value is for Pacemaker Remote node + attrd_value_from_peer = (1U << 1), // Value is from peer sync response +}; + typedef struct attribute_value_s { - uint32_t nodeid; - gboolean is_remote; - char *nodename; - char *current; - char *requested; - gboolean seen; + char *nodename; // Node that this value is for + char *current; // Attribute value + char *requested; // Value specified in pending CIB write, if any + uint32_t nodeid; // Cluster node ID of node that this value is for + uint32_t flags; // Group of attrd_value_flags } attribute_value_t; -extern crm_cluster_t *attrd_cluster; +#define attrd_set_value_flags(attr_value, flags_to_set) do { \ + (attr_value)->flags = pcmk__set_flags_as(__func__, __LINE__, \ + LOG_TRACE, "Value for node", (attr_value)->nodename, \ + (attr_value)->flags, (flags_to_set), #flags_to_set); \ + } while (0) + +#define attrd_clear_value_flags(attr_value, flags_to_clear) do { \ + (attr_value)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ + LOG_TRACE, "Value for node", (attr_value)->nodename, \ + (attr_value)->flags, (flags_to_clear), #flags_to_clear); \ + } while (0) + +extern pcmk_cluster_t *attrd_cluster; extern GHashTable *attributes; extern GHashTable *peer_protocol_vers; #define CIB_OP_TIMEOUT_S 120 int attrd_cluster_connect(void); +void attrd_broadcast_value(const attribute_t *a, const attribute_value_t *v); void attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host, bool filter); -void attrd_peer_sync(crm_node_t *peer, xmlNode *xml); +void attrd_peer_sync(crm_node_t *peer); void attrd_peer_remove(const char *host, bool uncache, const char *source); void attrd_peer_clear_failure(pcmk__request_t *request); void attrd_peer_sync_response(const crm_node_t *peer, bool peer_won, @@ -176,6 +206,8 @@ void attrd_clear_value_seen(void); void attrd_free_attribute(gpointer data); void attrd_free_attribute_value(gpointer data); attribute_t *attrd_populate_attribute(xmlNode *xml, const char *attr); +char *attrd_set_id(const attribute_t *attr, const char *node_state_id); +char *attrd_nvpair_id(const attribute_t *attr, const char *node_state_id); enum attrd_write_options { attrd_write_changed = 0, @@ -214,8 +246,6 @@ void attrd_remove_client_from_waitlist(pcmk__client_t *client); const char *attrd_request_sync_point(xmlNode *xml); bool attrd_request_has_sync_point(xmlNode *xml); -void attrd_copy_xml_attributes(xmlNode *src, xmlNode *dest); - extern gboolean stand_alone; #endif /* PACEMAKER_ATTRD__H */ diff --git a/daemons/based/Makefile.am b/daemons/based/Makefile.am index 022fc47..c10b461 100644 --- a/daemons/based/Makefile.am +++ b/daemons/based/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2023 the Pacemaker project contributors +# Copyright 2004-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -8,6 +8,7 @@ # include $(top_srcdir)/mk/common.mk +include $(top_srcdir)/mk/man.mk EXTRA_DIST = cib.pam @@ -35,6 +36,12 @@ pacemaker_based_SOURCES = pacemaker-based.c \ based_remote.c \ based_transaction.c +if BUILD_XML_HELP +man7_MANS = pacemaker-based.7 +endif + +CLEANFILES = $(man7_MANS) + .PHONY: install-exec-hook install-exec-hook: if BUILD_LEGACY_LINKS diff --git a/daemons/based/based_callbacks.c b/daemons/based/based_callbacks.c index 4fac222..b1a8209 100644 --- a/daemons/based/based_callbacks.c +++ b/daemons/based/based_callbacks.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -25,7 +25,6 @@ #include #include -#include #include #include @@ -72,11 +71,11 @@ cib_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) if (cib_shutdown_flag) { crm_info("Ignoring new IPC client [%d] during shutdown", pcmk__client_pid(c)); - return -EPERM; + return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { - return -EIO; + return -ENOMEM; } return 0; } @@ -159,20 +158,20 @@ static xmlNode * create_cib_reply(const char *op, const char *call_id, const char *client_id, int call_options, int rc, xmlNode *call_data) { - xmlNode *reply = create_xml_node(NULL, "cib-reply"); + xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_CIB_REPLY); - CRM_ASSERT(reply != NULL); - - crm_xml_add(reply, F_TYPE, T_CIB); - crm_xml_add(reply, F_CIB_OPERATION, op); - crm_xml_add(reply, F_CIB_CALLID, call_id); - crm_xml_add(reply, F_CIB_CLIENTID, client_id); - crm_xml_add_int(reply, F_CIB_CALLOPTS, call_options); - crm_xml_add_int(reply, F_CIB_RC, rc); + crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_CIB); + crm_xml_add(reply, PCMK__XA_CIB_OP, op); + crm_xml_add(reply, PCMK__XA_CIB_CALLID, call_id); + crm_xml_add(reply, PCMK__XA_CIB_CLIENTID, client_id); + crm_xml_add_int(reply, PCMK__XA_CIB_CALLOPT, call_options); + crm_xml_add_int(reply, PCMK__XA_CIB_RC, rc); if (call_data != NULL) { + xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_CIB_CALLDATA); + crm_trace("Attaching reply output"); - add_message_xml(reply, F_CIB_CALLDATA, call_data); + pcmk__xml_copy(wrapper, call_data); } crm_log_xml_explicit(reply, "cib:reply"); @@ -189,7 +188,7 @@ do_local_notify(const xmlNode *notify_src, const char *client_id, CRM_ASSERT(notify_src && client_id); - crm_element_value_int(notify_src, F_CIB_CALLID, &call_id); + crm_element_value_int(notify_src, PCMK__XA_CIB_CALLID, &call_id); client_obj = pcmk__find_client_by_id(client_id); if (client_obj == NULL) { @@ -252,10 +251,10 @@ void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, pcmk__client_t *cib_client, gboolean privileged) { - const char *op = crm_element_value(op_request, F_CIB_OPERATION); + const char *op = crm_element_value(op_request, PCMK__XA_CIB_OP); int call_options = cib_none; - crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options); + crm_element_value_int(op_request, PCMK__XA_CIB_CALLOPT, &call_options); /* Requests with cib_transaction set should not be sent to based directly * (outside of a commit-transaction request) @@ -266,38 +265,43 @@ cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) { if (flags & crm_ipc_client_response) { - xmlNode *ack = create_xml_node(NULL, __func__); + xmlNode *ack = pcmk__xe_create(NULL, __func__); - crm_xml_add(ack, F_CIB_OPERATION, CRM_OP_REGISTER); - crm_xml_add(ack, F_CIB_CLIENTID, cib_client->id); + crm_xml_add(ack, PCMK__XA_CIB_OP, CRM_OP_REGISTER); + crm_xml_add(ack, PCMK__XA_CIB_CLIENTID, cib_client->id); pcmk__ipc_send_xml(cib_client, id, ack, flags); cib_client->request_id = 0; free_xml(ack); } return; - } else if (pcmk__str_eq(op, T_CIB_NOTIFY, pcmk__str_none)) { + } else if (pcmk__str_eq(op, PCMK__VALUE_CIB_NOTIFY, pcmk__str_none)) { /* Update the notify filters for this client */ int on_off = 0; crm_exit_t status = CRM_EX_OK; uint64_t bit = UINT64_C(0); - const char *type = crm_element_value(op_request, F_CIB_NOTIFY_TYPE); + const char *type = crm_element_value(op_request, + PCMK__XA_CIB_NOTIFY_TYPE); - crm_element_value_int(op_request, F_CIB_NOTIFY_ACTIVATE, &on_off); + crm_element_value_int(op_request, PCMK__XA_CIB_NOTIFY_ACTIVATE, + &on_off); crm_debug("Setting %s callbacks %s for client %s", type, (on_off? "on" : "off"), pcmk__client_name(cib_client)); - if (pcmk__str_eq(type, T_CIB_POST_NOTIFY, pcmk__str_casei)) { + if (pcmk__str_eq(type, PCMK__VALUE_CIB_POST_NOTIFY, pcmk__str_none)) { bit = cib_notify_post; - } else if (pcmk__str_eq(type, T_CIB_PRE_NOTIFY, pcmk__str_casei)) { + } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_PRE_NOTIFY, + pcmk__str_none)) { bit = cib_notify_pre; - } else if (pcmk__str_eq(type, T_CIB_UPDATE_CONFIRM, pcmk__str_casei)) { + } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_UPDATE_CONFIRMATION, + pcmk__str_none)) { bit = cib_notify_confirm; - } else if (pcmk__str_eq(type, T_CIB_DIFF_NOTIFY, pcmk__str_casei)) { + } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_DIFF_NOTIFY, + pcmk__str_none)) { bit = cib_notify_diff; } else { @@ -312,7 +316,7 @@ cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, } } - pcmk__ipc_send_ack(cib_client, id, flags, "ack", NULL, status); + pcmk__ipc_send_ack(cib_client, id, flags, PCMK__XE_ACK, NULL, status); return; } @@ -329,12 +333,13 @@ cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean xmlNode *op_request = pcmk__client_data2xml(cib_client, data, &id, &flags); if (op_request) { - crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options); + crm_element_value_int(op_request, PCMK__XA_CIB_CALLOPT, &call_options); } if (op_request == NULL) { crm_trace("Invalid message from %p", c); - pcmk__ipc_send_ack(cib_client, id, flags, "nack", NULL, CRM_EX_PROTOCOL); + pcmk__ipc_send_ack(cib_client, id, flags, PCMK__XE_NACK, NULL, + CRM_EX_PROTOCOL); return 0; } else if(cib_client == NULL) { @@ -349,12 +354,13 @@ cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean } if (cib_client->name == NULL) { - const char *value = crm_element_value(op_request, F_CIB_CLIENTNAME); + const char *value = crm_element_value(op_request, + PCMK__XA_CIB_CLIENTNAME); if (value == NULL) { cib_client->name = pcmk__itoa(cib_client->pid); } else { - cib_client->name = strdup(value); + cib_client->name = pcmk__str_copy(value); if (crm_is_daemon_name(value)) { pcmk__set_client_flags(cib_client, cib_is_daemon); } @@ -363,7 +369,7 @@ cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean /* Allow cluster daemons more leeway before being evicted */ if (pcmk_is_set(cib_client->flags, cib_is_daemon)) { - const char *qmax = cib_config_lookup("cluster-ipc-limit"); + const char *qmax = cib_config_lookup(PCMK_OPT_CLUSTER_IPC_LIMIT); if (pcmk__set_client_queue_max(cib_client, qmax)) { crm_trace("IPC threshold for client %s[%u] is now %u", @@ -372,11 +378,11 @@ cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean } } - crm_xml_add(op_request, F_CIB_CLIENTID, cib_client->id); - crm_xml_add(op_request, F_CIB_CLIENTNAME, cib_client->name); + crm_xml_add(op_request, PCMK__XA_CIB_CLIENTID, cib_client->id); + crm_xml_add(op_request, PCMK__XA_CIB_CLIENTNAME, cib_client->name); CRM_LOG_ASSERT(cib_client->user != NULL); - pcmk__update_acl_user(op_request, F_CIB_USER, cib_client->user); + pcmk__update_acl_user(op_request, PCMK__XA_CIB_USER, cib_client->user); cib_common_callback_worker(id, flags, op_request, cib_client, privileged); free_xml(op_request); @@ -393,7 +399,7 @@ cib_digester_cb(gpointer data) { if (based_is_primary) { char buffer[32]; - xmlNode *ping = create_xml_node(NULL, "ping"); + xmlNode *ping = pcmk__xe_create(NULL, PCMK__XE_PING); ping_seq++; free(ping_digest); @@ -402,12 +408,12 @@ cib_digester_cb(gpointer data) snprintf(buffer, 32, "%" PRIu64, ping_seq); crm_trace("Requesting peer digests (%s)", buffer); - crm_xml_add(ping, F_TYPE, "cib"); - crm_xml_add(ping, F_CIB_OPERATION, CRM_OP_PING); - crm_xml_add(ping, F_CIB_PING_ID, buffer); + crm_xml_add(ping, PCMK__XA_T, PCMK__VALUE_CIB); + crm_xml_add(ping, PCMK__XA_CIB_OP, CRM_OP_PING); + crm_xml_add(ping, PCMK__XA_CIB_PING_ID, buffer); - crm_xml_add(ping, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); - send_cluster_message(NULL, crm_msg_cib, ping, TRUE); + crm_xml_add(ping, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); + pcmk__cluster_send_message(NULL, crm_msg_cib, ping); free_xml(ping); } @@ -418,14 +424,17 @@ static void process_ping_reply(xmlNode *reply) { uint64_t seq = 0; - const char *host = crm_element_value(reply, F_ORIG); + const char *host = crm_element_value(reply, PCMK__XA_SRC); - xmlNode *pong = get_message_xml(reply, F_CIB_CALLDATA); - const char *seq_s = crm_element_value(pong, F_CIB_PING_ID); - const char *digest = crm_element_value(pong, XML_ATTR_DIGEST); + xmlNode *wrapper = pcmk__xe_first_child(reply, PCMK__XE_CIB_CALLDATA, NULL, + NULL); + xmlNode *pong = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + + const char *seq_s = crm_element_value(pong, PCMK__XA_CIB_PING_ID); + const char *digest = crm_element_value(pong, PCMK__XA_DIGEST); if (seq_s == NULL) { - crm_debug("Ignoring ping reply with no " F_CIB_PING_ID); + crm_debug("Ignoring ping reply with no " PCMK__XA_CIB_PING_ID); return; } else { @@ -447,7 +456,7 @@ process_ping_reply(xmlNode *reply) crm_trace("Ignoring ping reply %s from %s: cib updated since", seq_s, host); } else { - const char *version = crm_element_value(pong, XML_ATTR_CRM_VERSION); + const char *version = crm_element_value(pong, PCMK_XA_CRM_FEATURE_SET); if(ping_digest == NULL) { crm_trace("Calculating new digest"); @@ -456,16 +465,30 @@ process_ping_reply(xmlNode *reply) crm_trace("Processing ping reply %s from %s (%s)", seq_s, host, digest); if (!pcmk__str_eq(ping_digest, digest, pcmk__str_casei)) { - xmlNode *remote_cib = get_message_xml(pong, F_CIB_CALLDATA); + xmlNode *wrapper = pcmk__xe_first_child(pong, PCMK__XE_CIB_CALLDATA, + NULL, NULL); + xmlNode *remote_cib = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + + const char *admin_epoch_s = NULL; + const char *epoch_s = NULL; + const char *num_updates_s = NULL; + + if (remote_cib != NULL) { + admin_epoch_s = crm_element_value(remote_cib, + PCMK_XA_ADMIN_EPOCH); + epoch_s = crm_element_value(remote_cib, PCMK_XA_EPOCH); + num_updates_s = crm_element_value(remote_cib, + PCMK_XA_NUM_UPDATES); + } crm_notice("Local CIB %s.%s.%s.%s differs from %s: %s.%s.%s.%s %p", - crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN), - crm_element_value(the_cib, XML_ATTR_GENERATION), - crm_element_value(the_cib, XML_ATTR_NUMUPDATES), + crm_element_value(the_cib, PCMK_XA_ADMIN_EPOCH), + crm_element_value(the_cib, PCMK_XA_EPOCH), + crm_element_value(the_cib, PCMK_XA_NUM_UPDATES), ping_digest, host, - remote_cib?crm_element_value(remote_cib, XML_ATTR_GENERATION_ADMIN):"_", - remote_cib?crm_element_value(remote_cib, XML_ATTR_GENERATION):"_", - remote_cib?crm_element_value(remote_cib, XML_ATTR_NUMUPDATES):"_", + pcmk__s(admin_epoch_s, "_"), + pcmk__s(epoch_s, "_"), + pcmk__s(num_updates_s, "_"), digest, remote_cib); if(remote_cib && remote_cib->children) { @@ -513,10 +536,11 @@ static void queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { - cib_local_notify_t *notify = calloc(1, sizeof(cib_local_notify_t)); + cib_local_notify_t *notify = pcmk__assert_alloc(1, + sizeof(cib_local_notify_t)); notify->notify_src = notify_src; - notify->client_id = strdup(client_id); + notify->client_id = pcmk__str_copy(client_id); notify->sync_reply = sync_reply; notify->from_peer = from_peer; @@ -658,12 +682,12 @@ parse_peer_options_v1(const cib__operation_t *operation, xmlNode *request, const char *op = NULL; const char *host = NULL; const char *delegated = NULL; - const char *originator = crm_element_value(request, F_ORIG); - const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); + const char *originator = crm_element_value(request, PCMK__XA_SRC); + const char *reply_to = crm_element_value(request, PCMK__XA_CIB_ISREPLYTO); gboolean is_reply = pcmk__str_eq(reply_to, OUR_NODENAME, pcmk__str_casei); - if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { + if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { *needs_reply = FALSE; if (is_reply) { *local_notify = TRUE; @@ -675,7 +699,7 @@ parse_peer_options_v1(const cib__operation_t *operation, xmlNode *request, return TRUE; } - op = crm_element_value(request, F_CIB_OPERATION); + op = crm_element_value(request, PCMK__XA_CIB_OP); crm_trace("Processing legacy %s request sent by %s", op, originator); if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SHUTDOWN, pcmk__str_none)) { @@ -703,7 +727,7 @@ parse_peer_options_v1(const cib__operation_t *operation, xmlNode *request, return TRUE; } - host = crm_element_value(request, F_CIB_HOST); + host = crm_element_value(request, PCMK__XA_CIB_HOST); if (pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei)) { crm_trace("Processing %s request sent to us from %s", op, originator); return TRUE; @@ -719,7 +743,7 @@ parse_peer_options_v1(const cib__operation_t *operation, xmlNode *request, return TRUE; } - delegated = crm_element_value(request, F_CIB_DELEGATED); + delegated = crm_element_value(request, PCMK__XA_CIB_DELEGATED_FROM); if (delegated != NULL) { crm_trace("Ignoring message for primary instance"); @@ -755,10 +779,11 @@ parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, gboolean *process) { const char *host = NULL; - const char *delegated = crm_element_value(request, F_CIB_DELEGATED); - const char *op = crm_element_value(request, F_CIB_OPERATION); - const char *originator = crm_element_value(request, F_ORIG); - const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); + const char *delegated = crm_element_value(request, + PCMK__XA_CIB_DELEGATED_FROM); + const char *op = crm_element_value(request, PCMK__XA_CIB_OP); + const char *originator = crm_element_value(request, PCMK__XA_SRC); + const char *reply_to = crm_element_value(request, PCMK__XA_CIB_ISREPLYTO); gboolean is_reply = pcmk__str_eq(reply_to, OUR_NODENAME, pcmk__str_casei); @@ -767,7 +792,7 @@ parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, } if (pcmk__str_eq(op, PCMK__CIB_REQUEST_REPLACE, pcmk__str_none)) { - /* sync_our_cib() sets F_CIB_ISREPLY */ + // sync_our_cib() sets PCMK__XA_CIB_ISREPLYTO if (reply_to) { delegated = reply_to; } @@ -783,17 +808,18 @@ parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_UPGRADE, pcmk__str_none)) { /* Only the DC (node with the oldest software) should process - * this operation if F_CIB_SCHEMA_MAX is unset + * this operation if PCMK__XA_CIB_SCHEMA_MAX is unset. * * If the DC is happy it will then send out another * PCMK__CIB_REQUEST_UPGRADE which will tell all nodes to do the actual * upgrade. * - * Except this time F_CIB_SCHEMA_MAX will be set which puts a + * Except this time PCMK__XA_CIB_SCHEMA_MAX will be set which puts a * limit on how far newer nodes will go */ - const char *max = crm_element_value(request, F_CIB_SCHEMA_MAX); - const char *upgrade_rc = crm_element_value(request, F_CIB_UPGRADE_RC); + const char *max = crm_element_value(request, PCMK__XA_CIB_SCHEMA_MAX); + const char *upgrade_rc = crm_element_value(request, + PCMK__XA_CIB_UPGRADE_RC); crm_trace("Parsing upgrade %s for %s with max=%s and upgrade_rc=%s", (is_reply? "reply" : "request"), @@ -802,7 +828,7 @@ parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, if (upgrade_rc != NULL) { // Our upgrade request was rejected by DC, notify clients of result - crm_xml_add(request, F_CIB_RC, upgrade_rc); + crm_xml_add(request, PCMK__XA_CIB_RC, upgrade_rc); } else if ((max == NULL) && based_is_primary) { /* We are the DC, check if this upgrade is allowed */ @@ -817,7 +843,7 @@ parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, return FALSE; } - } else if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { + } else if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { crm_info("Detected legacy %s global update from %s", op, originator); send_sync_request(NULL); legacy_mode = TRUE; @@ -854,7 +880,7 @@ parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, *local_notify = pcmk__str_eq(delegated, OUR_NODENAME, pcmk__str_casei); - host = crm_element_value(request, F_CIB_HOST); + host = crm_element_value(request, PCMK__XA_CIB_HOST); if (pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei)) { crm_trace("Processing %s request sent to us from %s", op, originator); *needs_reply = TRUE; @@ -871,8 +897,10 @@ parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request, crm_trace("Processing %s request broadcast by %s call %s on %s " "(local clients will%s be notified)", op, - pcmk__s(crm_element_value(request, F_CIB_CLIENTNAME), "client"), - pcmk__s(crm_element_value(request, F_CIB_CALLID), "without ID"), + pcmk__s(crm_element_value(request, PCMK__XA_CIB_CLIENTNAME), + "client"), + pcmk__s(crm_element_value(request, PCMK__XA_CIB_CALLID), + "without ID"), originator, (*local_notify? "" : "not")); return TRUE; } @@ -904,12 +932,14 @@ parse_peer_options(const cib__operation_t *operation, xmlNode *request, static void forward_request(xmlNode *request) { - const char *op = crm_element_value(request, F_CIB_OPERATION); - const char *section = crm_element_value(request, F_CIB_SECTION); - const char *host = crm_element_value(request, F_CIB_HOST); - const char *originator = crm_element_value(request, F_ORIG); - const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); - const char *call_id = crm_element_value(request, F_CIB_CALLID); + const char *op = crm_element_value(request, PCMK__XA_CIB_OP); + const char *section = crm_element_value(request, PCMK__XA_CIB_SECTION); + const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); + const char *originator = crm_element_value(request, PCMK__XA_SRC); + const char *client_name = crm_element_value(request, + PCMK__XA_CIB_CLIENTNAME); + const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); + crm_node_t *peer = NULL; int log_level = LOG_INFO; @@ -926,13 +956,15 @@ forward_request(xmlNode *request) pcmk__s(client_name, "unspecified"), pcmk__s(call_id, "unspecified")); - crm_xml_add(request, F_CIB_DELEGATED, OUR_NODENAME); + crm_xml_add(request, PCMK__XA_CIB_DELEGATED_FROM, OUR_NODENAME); - send_cluster_message(((host != NULL)? crm_get_peer(0, host) : NULL), - crm_msg_cib, request, FALSE); + if (host != NULL) { + peer = pcmk__get_node(0, host, NULL, pcmk__node_search_cluster_member); + } + pcmk__cluster_send_message(peer, crm_msg_cib, request); // Return the request to its original state - xml_remove_prop(request, F_CIB_DELEGATED); + pcmk__xe_remove_attr(request, PCMK__XA_CIB_DELEGATED_FROM); } static gboolean @@ -957,8 +989,10 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb const char *digest = NULL; int format = 1; + xmlNode *wrapper = NULL; + CRM_LOG_ASSERT(result_diff != NULL); - digest = crm_element_value(result_diff, XML_ATTR_DIGEST); + digest = crm_element_value(result_diff, PCMK__XA_DIGEST); crm_element_value_int(result_diff, PCMK_XA_FORMAT, &format); cib_diff_version_details(result_diff, @@ -969,24 +1003,30 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb diff_del_admin_epoch, diff_del_epoch, diff_del_updates, diff_add_admin_epoch, diff_add_epoch, diff_add_updates, digest); - crm_xml_add(msg, F_CIB_ISREPLY, originator); - pcmk__xe_set_bool_attr(msg, F_CIB_GLOBAL_UPDATE, true); - crm_xml_add(msg, F_CIB_OPERATION, PCMK__CIB_REQUEST_APPLY_PATCH); - crm_xml_add(msg, F_CIB_USER, CRM_DAEMON_USER); + crm_xml_add(msg, PCMK__XA_CIB_ISREPLYTO, originator); + pcmk__xe_set_bool_attr(msg, PCMK__XA_CIB_UPDATE, true); + crm_xml_add(msg, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_APPLY_PATCH); + crm_xml_add(msg, PCMK__XA_CIB_USER, CRM_DAEMON_USER); if (format == 1) { CRM_ASSERT(digest != NULL); } - add_message_xml(msg, F_CIB_UPDATE_DIFF, result_diff); + wrapper = pcmk__xe_create(msg, PCMK__XE_CIB_UPDATE_DIFF); + pcmk__xml_copy(wrapper, result_diff); + crm_log_xml_explicit(msg, "copy"); - return send_cluster_message(NULL, crm_msg_cib, msg, TRUE); + return pcmk__cluster_send_message(NULL, crm_msg_cib, msg); } else if (originator != NULL) { /* send reply via HA to originating node */ + const crm_node_t *node = + pcmk__get_node(0, originator, NULL, + pcmk__node_search_cluster_member); + crm_trace("Sending request result to %s only", originator); - crm_xml_add(msg, F_CIB_ISREPLY, originator); - return send_cluster_message(crm_get_peer(0, originator), crm_msg_cib, msg, FALSE); + crm_xml_add(msg, PCMK__XA_CIB_ISREPLYTO, originator); + return pcmk__cluster_send_message(node, crm_msg_cib, msg); } return FALSE; @@ -1020,19 +1060,20 @@ cib_process_request(xmlNode *request, gboolean privileged, xmlNode *result_diff = NULL; int rc = pcmk_ok; - const char *op = crm_element_value(request, F_CIB_OPERATION); - const char *originator = crm_element_value(request, F_ORIG); - const char *host = crm_element_value(request, F_CIB_HOST); + const char *op = crm_element_value(request, PCMK__XA_CIB_OP); + const char *originator = crm_element_value(request, PCMK__XA_SRC); + const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); const char *target = NULL; - const char *call_id = crm_element_value(request, F_CIB_CALLID); - const char *client_id = crm_element_value(request, F_CIB_CLIENTID); - const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); - const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); + const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); + const char *client_id = crm_element_value(request, PCMK__XA_CIB_CLIENTID); + const char *client_name = crm_element_value(request, + PCMK__XA_CIB_CLIENTNAME); + const char *reply_to = crm_element_value(request, PCMK__XA_CIB_ISREPLYTO); const cib__operation_t *operation = NULL; cib__op_fn_t op_function = NULL; - crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); + crm_element_value_int(request, PCMK__XA_CIB_CALLOPT, &call_options); if ((host != NULL) && (*host == '\0')) { host = NULL; @@ -1053,7 +1094,7 @@ cib_process_request(xmlNode *request, gboolean privileged, crm_trace("Processing peer %s operation from %s/%s on %s intended for %s (reply=%s)", op, client_name, call_id, originator, target, reply_to); } else { - crm_xml_add(request, F_ORIG, OUR_NODENAME); + crm_xml_add(request, PCMK__XA_SRC, OUR_NODENAME); crm_trace("Processing local %s operation from %s/%s intended for %s", op, client_name, call_id, target); } @@ -1124,7 +1165,10 @@ cib_process_request(xmlNode *request, gboolean privileged, time_t finished = 0; time_t now = time(NULL); int level = LOG_INFO; - const char *section = crm_element_value(request, F_CIB_SECTION); + const char *section = crm_element_value(request, PCMK__XA_CIB_SECTION); + const char *admin_epoch_s = NULL; + const char *epoch_s = NULL; + const char *num_updates_s = NULL; rc = cib_process_command(request, operation, op_function, &op_reply, &result_diff, privileged); @@ -1132,7 +1176,7 @@ cib_process_request(xmlNode *request, gboolean privileged, if (!is_update) { level = LOG_TRACE; - } else if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { + } else if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { switch (rc) { case pcmk_ok: level = LOG_INFO; @@ -1150,13 +1194,19 @@ cib_process_request(xmlNode *request, gboolean privileged, level = LOG_WARNING; } + if (the_cib != NULL) { + admin_epoch_s = crm_element_value(the_cib, PCMK_XA_ADMIN_EPOCH); + epoch_s = crm_element_value(the_cib, PCMK_XA_EPOCH); + num_updates_s = crm_element_value(the_cib, PCMK_XA_NUM_UPDATES); + } + do_crm_log(level, "Completed %s operation for section %s: %s (rc=%d, origin=%s/%s/%s, version=%s.%s.%s)", op, section ? section : "'all'", pcmk_strerror(rc), rc, originator ? originator : "local", client_name, call_id, - the_cib ? crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN) : "0", - the_cib ? crm_element_value(the_cib, XML_ATTR_GENERATION) : "0", - the_cib ? crm_element_value(the_cib, XML_ATTR_NUMUPDATES) : "0"); + pcmk__s(admin_epoch_s, "0"), + pcmk__s(epoch_s, "0"), + pcmk__s(num_updates_s, "0")); finished = time(NULL); if ((finished - now) > 3) { @@ -1186,7 +1236,8 @@ cib_process_request(xmlNode *request, gboolean privileged, gboolean broadcast = FALSE; cib_local_bcast_num++; - crm_xml_add_int(request, F_CIB_LOCAL_NOTIFY_ID, cib_local_bcast_num); + crm_xml_add_int(request, PCMK__XA_CIB_LOCAL_NOTIFY_ID, + cib_local_bcast_num); broadcast = send_peer_reply(request, result_diff, originator, TRUE); if (broadcast && client_id && local_notify && op_reply) { @@ -1261,27 +1312,35 @@ static xmlNode * prepare_input(const xmlNode *request, enum cib__op_type type, const char **section) { + xmlNode *wrapper = NULL; xmlNode *input = NULL; *section = NULL; switch (type) { case cib__op_apply_patch: - if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { - input = get_message_xml(request, F_CIB_UPDATE_DIFF); - } else { - input = get_message_xml(request, F_CIB_CALLDATA); + { + const char *wrapper_name = PCMK__XE_CIB_CALLDATA; + + if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { + wrapper_name = PCMK__XE_CIB_UPDATE_DIFF; + } + wrapper = pcmk__xe_first_child(request, wrapper_name, NULL, + NULL); + input = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); } break; default: - input = get_message_xml(request, F_CIB_CALLDATA); - *section = crm_element_value(request, F_CIB_SECTION); + wrapper = pcmk__xe_first_child(request, PCMK__XE_CIB_CALLDATA, NULL, + NULL); + input = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + *section = crm_element_value(request, PCMK__XA_CIB_SECTION); break; } // Grab the specified section - if ((*section != NULL) && pcmk__xe_is(input, XML_TAG_CIB)) { + if ((*section != NULL) && pcmk__xe_is(input, PCMK_XE_CIB)) { input = pcmk_find_cib_element(input, *section); } @@ -1289,10 +1348,10 @@ prepare_input(const xmlNode *request, enum cib__op_type type, } // v1 and v2 patch formats -#define XPATH_CONFIG_CHANGE \ - "//" XML_CIB_TAG_CRMCONFIG " | " \ - "//" XML_DIFF_CHANGE \ - "[contains(@" XML_DIFF_PATH ",'/" XML_CIB_TAG_CRMCONFIG "/')]" +#define XPATH_CONFIG_CHANGE \ + "//" PCMK_XE_CRM_CONFIG " | " \ + "//" PCMK_XE_CHANGE \ + "[contains(@" PCMK_XA_PATH ",'/" PCMK_XE_CRM_CONFIG "/')]" static bool contains_config_change(xmlNode *diff) @@ -1323,10 +1382,11 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, const char *op = NULL; const char *section = NULL; - const char *call_id = crm_element_value(request, F_CIB_CALLID); - const char *client_id = crm_element_value(request, F_CIB_CLIENTID); - const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); - const char *originator = crm_element_value(request, F_ORIG); + const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); + const char *client_id = crm_element_value(request, PCMK__XA_CIB_CLIENTID); + const char *client_name = crm_element_value(request, + PCMK__XA_CIB_CLIENTNAME); + const char *originator = crm_element_value(request, PCMK__XA_SRC); int rc = pcmk_ok; @@ -1345,8 +1405,8 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, *cib_diff = NULL; /* Start processing the request... */ - op = crm_element_value(request, F_CIB_OPERATION); - crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); + op = crm_element_value(request, PCMK__XA_CIB_OP); + crm_element_value_int(request, PCMK__XA_CIB_CALLOPT, &call_options); if (!privileged && pcmk_is_set(operation->flags, cib__op_attr_privileged)) { rc = -EACCES; @@ -1357,7 +1417,7 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, input = prepare_input(request, operation->type, §ion); if (!pcmk_is_set(operation->flags, cib__op_attr_modifies)) { - rc = cib_perform_op(op, call_options, op_function, true, section, + rc = cib_perform_op(NULL, op, call_options, op_function, true, section, request, input, false, &config_changed, &the_cib, &result_cib, NULL, &output); @@ -1368,11 +1428,11 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, /* @COMPAT: Handle a valid write action (legacy) * * @TODO: Re-evaluate whether this is all truly legacy. The cib_force_diff - * portion is. However, F_CIB_GLOBAL_UPDATE may be set by a sync operation + * portion is. However, PCMK__XA_CIB_UPDATE may be set by a sync operation * even in non-legacy mode, and manage_counters tells xml_create_patchset() * whether to update version/epoch info. */ - if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) { + if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { manage_counters = false; cib__set_call_options(call_options, "call", cib_force_diff); crm_trace("Global update detected"); @@ -1390,7 +1450,7 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, } // result_cib must not be modified after cib_perform_op() returns - rc = cib_perform_op(op, call_options, op_function, false, section, + rc = cib_perform_op(NULL, op, call_options, op_function, false, section, request, input, manage_counters, &config_changed, &the_cib, &result_cib, cib_diff, &output); @@ -1426,8 +1486,8 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, } crm_trace("Activating %s->%s%s", - crm_element_value(the_cib, XML_ATTR_NUMUPDATES), - crm_element_value(result_cib, XML_ATTR_NUMUPDATES), + crm_element_value(the_cib, PCMK_XA_NUM_UPDATES), + crm_element_value(result_cib, PCMK_XA_NUM_UPDATES), (config_changed? " changed" : "")); rc = activateCibXml(result_cib, config_changed, op); @@ -1451,7 +1511,8 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, */ if ((operation->type == cib__op_commit_transact) && pcmk__str_eq(originator, OUR_NODENAME, pcmk__str_casei) - && compare_version(crm_element_value(the_cib, XML_ATTR_CRM_VERSION), + && compare_version(crm_element_value(the_cib, + PCMK_XA_CRM_FEATURE_SET), "3.19.0") < 0) { sync_our_cib(request, TRUE); @@ -1473,7 +1534,7 @@ cib_process_command(xmlNode *request, const cib__operation_t *operation, } else { crm_trace("Not activating %d %d %s", rc, pcmk_is_set(call_options, cib_dryrun), - crm_element_value(result_cib, XML_ATTR_NUMUPDATES)); + crm_element_value(result_cib, PCMK_XA_NUM_UPDATES)); if (result_cib != the_cib) { free_xml(result_cib); @@ -1507,7 +1568,7 @@ void cib_peer_callback(xmlNode * msg, void *private_data) { const char *reason = NULL; - const char *originator = crm_element_value(msg, F_ORIG); + const char *originator = crm_element_value(msg, PCMK__XA_SRC); if (cib_legacy_mode() && pcmk__str_eq(originator, OUR_NODENAME, @@ -1515,7 +1576,8 @@ cib_peer_callback(xmlNode * msg, void *private_data) /* message is from ourselves */ int bcast_id = 0; - if (!(crm_element_value_int(msg, F_CIB_LOCAL_NOTIFY_ID, &bcast_id))) { + if (crm_element_value_int(msg, PCMK__XA_CIB_LOCAL_NOTIFY_ID, + &bcast_id) == 0) { check_local_notify(bcast_id); } return; @@ -1525,8 +1587,8 @@ cib_peer_callback(xmlNode * msg, void *private_data) goto bail; } - if (crm_element_value(msg, F_CIB_CLIENTNAME) == NULL) { - crm_xml_add(msg, F_CIB_CLIENTNAME, originator); + if (crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME) == NULL) { + crm_xml_add(msg, PCMK__XA_CIB_CLIENTNAME, originator); } /* crm_log_xml_trace(msg, "Peer[inbound]"); */ @@ -1535,10 +1597,9 @@ cib_peer_callback(xmlNode * msg, void *private_data) bail: if (reason) { - const char *seq = crm_element_value(msg, F_SEQ); - const char *op = crm_element_value(msg, F_CIB_OPERATION); + const char *op = crm_element_value(msg, PCMK__XA_CIB_OP); - crm_warn("Discarding %s message (%s) from %s: %s", op, seq, originator, reason); + crm_warn("Discarding %s message from %s: %s", op, originator, reason); } } @@ -1565,7 +1626,7 @@ initiate_exit(void) int active = 0; xmlNode *leaving = NULL; - active = crm_active_peers(); + active = pcmk__cluster_num_active_nodes(); if (active < 2) { // This is the last active node terminate_cib(__func__, 0); return; @@ -1573,11 +1634,11 @@ initiate_exit(void) crm_info("Sending shutdown request to %d peers", active); - leaving = create_xml_node(NULL, "exit-notification"); - crm_xml_add(leaving, F_TYPE, "cib"); - crm_xml_add(leaving, F_CIB_OPERATION, PCMK__CIB_REQUEST_SHUTDOWN); + leaving = pcmk__xe_create(NULL, PCMK__XE_EXIT_NOTIFICATION); + crm_xml_add(leaving, PCMK__XA_T, PCMK__VALUE_CIB); + crm_xml_add(leaving, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_SHUTDOWN); - send_cluster_message(NULL, crm_msg_cib, leaving, TRUE); + pcmk__cluster_send_message(NULL, crm_msg_cib, leaving); free_xml(leaving); g_timeout_add(EXIT_ESCALATION_MS, cib_force_exit, NULL); @@ -1688,14 +1749,14 @@ terminate_cib(const char *caller, int fast) * peer caches). */ if (fast == 0) { - crm_cluster_disconnect(crm_cluster); + pcmk_cluster_disconnect(crm_cluster); } g_main_loop_quit(mainloop); } else { /* Quit via clean exit. Even the peer status callback can disconnect * here, because we're not returning control to the caller. */ - crm_cluster_disconnect(crm_cluster); + pcmk_cluster_disconnect(crm_cluster); pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(CRM_EX_OK); } diff --git a/daemons/based/based_io.c b/daemons/based/based_io.c index f252ac1..7410b03 100644 --- a/daemons/based/based_io.c +++ b/daemons/based/based_io.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -29,7 +29,6 @@ #include #include -#include #include #include #include @@ -258,20 +257,20 @@ readCibXmlFile(const char *dir, const char *file, gboolean discard_status) crm_err("*** Disabling disk writes to avoid confusing Valgrind ***"); } - status = find_xml_node(root, XML_CIB_TAG_STATUS, FALSE); + status = pcmk__xe_first_child(root, PCMK_XE_STATUS, NULL, NULL); if (discard_status && status != NULL) { - /* strip out the status section if there is one */ + // Strip out the PCMK_XE_STATUS section if there is one free_xml(status); status = NULL; } if (status == NULL) { - create_xml_node(root, XML_CIB_TAG_STATUS); + pcmk__xe_create(root, PCMK_XE_STATUS); } /* Do this before schema validation happens */ /* fill in some defaults */ - name = XML_ATTR_GENERATION_ADMIN; + name = PCMK_XA_ADMIN_EPOCH; value = crm_element_value(root, name); if (value == NULL) { crm_warn("No value for %s was specified in the configuration.", name); @@ -283,38 +282,38 @@ readCibXmlFile(const char *dir, const char *file, gboolean discard_status) crm_xml_add_int(root, name, 0); } - name = XML_ATTR_GENERATION; + name = PCMK_XA_EPOCH; value = crm_element_value(root, name); if (value == NULL) { crm_xml_add_int(root, name, 0); } - name = XML_ATTR_NUMUPDATES; + name = PCMK_XA_NUM_UPDATES; value = crm_element_value(root, name); if (value == NULL) { crm_xml_add_int(root, name, 0); } // Unset (DC should set appropriate value) - xml_remove_prop(root, XML_ATTR_DC_UUID); + pcmk__xe_remove_attr(root, PCMK_XA_DC_UUID); if (discard_status) { crm_log_xml_trace(root, "[on-disk]"); } - validation = crm_element_value(root, XML_ATTR_VALIDATION); - if (validate_xml(root, NULL, TRUE) == FALSE) { + validation = crm_element_value(root, PCMK_XA_VALIDATE_WITH); + if (!pcmk__configured_schema_validates(root)) { crm_err("CIB does not validate with %s", pcmk__s(validation, "no schema specified")); cib_status = -pcmk_err_schema_validation; + // @COMPAT Not specifying validate-with is deprecated since 2.1.8 } else if (validation == NULL) { - int version = 0; - - update_validation(&root, &version, 0, FALSE, FALSE); - if (version > 0) { + pcmk__update_schema(&root, NULL, false, false); + validation = crm_element_value(root, PCMK_XA_VALIDATE_WITH); + if (validation != NULL) { crm_notice("Enabling %s validation on" - " the existing (sane) configuration", get_schema_name(version)); + " the existing (sane) configuration", validation); } else { crm_err("CIB does not validate with any known schema"); cib_status = -pcmk_err_schema_validation; @@ -408,7 +407,7 @@ write_cib_contents(gpointer p) /* Make a copy of the CIB to write (possibly in a forked child) */ if (p) { /* Synchronous write out */ - cib_local = copy_xml(p); + cib_local = pcmk__xml_copy(NULL, p); } else { int pid = 0; @@ -445,7 +444,7 @@ write_cib_contents(gpointer p) /* In theory, we can scribble on the_cib here and not affect the parent, * but let's be safe anyway. */ - cib_local = copy_xml(the_cib); + cib_local = pcmk__xml_copy(NULL, the_cib); } /* Write the CIB */ diff --git a/daemons/based/based_messages.c b/daemons/based/based_messages.c index 35d639a..87b7eb1 100644 --- a/daemons/based/based_messages.c +++ b/daemons/based/based_messages.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -24,7 +24,6 @@ #include #include -#include #include #include @@ -45,11 +44,11 @@ cib_process_shutdown_req(const char *op, int options, const char *section, xmlNo xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { - const char *host = crm_element_value(req, F_ORIG); + const char *host = crm_element_value(req, PCMK__XA_SRC); *answer = NULL; - if (crm_element_value(req, F_CIB_ISREPLY) == NULL) { + if (crm_element_value(req, PCMK__XA_CIB_ISREPLYTO) == NULL) { crm_info("Peer %s is requesting to shut down", host); return pcmk_ok; } @@ -117,17 +116,21 @@ static int sync_in_progress = 0; void send_sync_request(const char *host) { - xmlNode *sync_me = create_xml_node(NULL, "sync-me"); + xmlNode *sync_me = pcmk__xe_create(NULL, "sync-me"); + crm_node_t *peer = NULL; crm_info("Requesting re-sync from %s", (host? host : "all peers")); sync_in_progress = 1; - crm_xml_add(sync_me, F_TYPE, "cib"); - crm_xml_add(sync_me, F_CIB_OPERATION, PCMK__CIB_REQUEST_SYNC_TO_ONE); - crm_xml_add(sync_me, F_CIB_DELEGATED, + crm_xml_add(sync_me, PCMK__XA_T, PCMK__VALUE_CIB); + crm_xml_add(sync_me, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_SYNC_TO_ONE); + crm_xml_add(sync_me, PCMK__XA_CIB_DELEGATED_FROM, stand_alone? "localhost" : crm_cluster->uname); - send_cluster_message(host ? crm_get_peer(0, host) : NULL, crm_msg_cib, sync_me, FALSE); + if (host != NULL) { + peer = pcmk__get_node(0, host, NULL, pcmk__node_search_cluster_member); + } + pcmk__cluster_send_message(peer, crm_msg_cib, sync_me); free_xml(sync_me); } @@ -135,38 +138,44 @@ int cib_process_ping(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { - const char *host = crm_element_value(req, F_ORIG); - const char *seq = crm_element_value(req, F_CIB_PING_ID); + const char *host = crm_element_value(req, PCMK__XA_SRC); + const char *seq = crm_element_value(req, PCMK__XA_CIB_PING_ID); char *digest = calculate_xml_versioned_digest(the_cib, FALSE, TRUE, CRM_FEATURE_SET); + xmlNode *wrapper = NULL; + crm_trace("Processing \"%s\" event %s from %s", op, seq, host); - *answer = create_xml_node(NULL, XML_CRM_TAG_PING); - - crm_xml_add(*answer, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); - crm_xml_add(*answer, XML_ATTR_DIGEST, digest); - crm_xml_add(*answer, F_CIB_PING_ID, seq); - - pcmk__if_tracing( - { - // Append additional detail so the receiver can log the differences - add_message_xml(*answer, F_CIB_CALLDATA, the_cib); - }, - if (the_cib != NULL) { - // Always include at least the version details - xmlNode *shallow = create_xml_node(NULL, - (const char *) the_cib->name); - - copy_in_properties(shallow, the_cib); - add_message_xml(*answer, F_CIB_CALLDATA, shallow); - free_xml(shallow); - } - ); + *answer = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE); + + crm_xml_add(*answer, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); + crm_xml_add(*answer, PCMK__XA_DIGEST, digest); + crm_xml_add(*answer, PCMK__XA_CIB_PING_ID, seq); + + wrapper = pcmk__xe_create(*answer, PCMK__XE_CIB_CALLDATA); + + if (the_cib != NULL) { + pcmk__if_tracing( + { + /* Append additional detail so the receiver can log the + * differences + */ + pcmk__xml_copy(wrapper, the_cib); + }, + { + // Always include at least the version details + const char *name = (const char *) the_cib->name; + xmlNode *shallow = pcmk__xe_create(wrapper, name); + + pcmk__xe_copy_attrs(shallow, the_cib, pcmk__xaf_none); + } + ); + } crm_info("Reporting our current digest to %s: %s for %s.%s.%s", host, digest, - crm_element_value(existing_cib, XML_ATTR_GENERATION_ADMIN), - crm_element_value(existing_cib, XML_ATTR_GENERATION), - crm_element_value(existing_cib, XML_ATTR_NUMUPDATES)); + crm_element_value(existing_cib, PCMK_XA_ADMIN_EPOCH), + crm_element_value(existing_cib, PCMK_XA_EPOCH), + crm_element_value(existing_cib, PCMK_XA_NUM_UPDATES)); free(digest); @@ -188,51 +197,51 @@ cib_process_upgrade_server(const char *op, int options, const char *section, xml *answer = NULL; - if(crm_element_value(req, F_CIB_SCHEMA_MAX)) { + if (crm_element_value(req, PCMK__XA_CIB_SCHEMA_MAX) != NULL) { /* The originator of an upgrade request sends it to the DC, without - * F_CIB_SCHEMA_MAX. If an upgrade is needed, the DC re-broadcasts the - * request with F_CIB_SCHEMA_MAX, and each node performs the upgrade - * (and notifies its local clients) here. + * PCMK__XA_CIB_SCHEMA_MAX. If an upgrade is needed, the DC + * re-broadcasts the request with PCMK__XA_CIB_SCHEMA_MAX, and each node + * performs the upgrade (and notifies its local clients) here. */ return cib_process_upgrade( op, options, section, req, input, existing_cib, result_cib, answer); } else { - int new_version = 0; - int current_version = 0; - xmlNode *scratch = copy_xml(existing_cib); - const char *host = crm_element_value(req, F_ORIG); - const char *value = crm_element_value(existing_cib, XML_ATTR_VALIDATION); - const char *client_id = crm_element_value(req, F_CIB_CLIENTID); - const char *call_opts = crm_element_value(req, F_CIB_CALLOPTS); - const char *call_id = crm_element_value(req, F_CIB_CALLID); + xmlNode *scratch = pcmk__xml_copy(NULL, existing_cib); + const char *host = crm_element_value(req, PCMK__XA_SRC); + const char *original_schema = NULL; + const char *new_schema = NULL; + const char *client_id = crm_element_value(req, PCMK__XA_CIB_CLIENTID); + const char *call_opts = crm_element_value(req, PCMK__XA_CIB_CALLOPT); + const char *call_id = crm_element_value(req, PCMK__XA_CIB_CALLID); crm_trace("Processing \"%s\" event", op); - if (value != NULL) { - current_version = get_schema_version(value); - } + original_schema = crm_element_value(existing_cib, + PCMK_XA_VALIDATE_WITH); + rc = pcmk__update_schema(&scratch, NULL, true, true); + rc = pcmk_rc2legacy(rc); + new_schema = crm_element_value(scratch, PCMK_XA_VALIDATE_WITH); - rc = update_validation(&scratch, &new_version, 0, TRUE, TRUE); - if (new_version > current_version) { - xmlNode *up = create_xml_node(NULL, __func__); + if (pcmk__cmp_schemas_by_name(new_schema, original_schema) > 0) { + xmlNode *up = pcmk__xe_create(NULL, __func__); rc = pcmk_ok; crm_notice("Upgrade request from %s verified", host); - crm_xml_add(up, F_TYPE, "cib"); - crm_xml_add(up, F_CIB_OPERATION, PCMK__CIB_REQUEST_UPGRADE); - crm_xml_add(up, F_CIB_SCHEMA_MAX, get_schema_name(new_version)); - crm_xml_add(up, F_CIB_DELEGATED, host); - crm_xml_add(up, F_CIB_CLIENTID, client_id); - crm_xml_add(up, F_CIB_CALLOPTS, call_opts); - crm_xml_add(up, F_CIB_CALLID, call_id); + crm_xml_add(up, PCMK__XA_T, PCMK__VALUE_CIB); + crm_xml_add(up, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_UPGRADE); + crm_xml_add(up, PCMK__XA_CIB_SCHEMA_MAX, new_schema); + crm_xml_add(up, PCMK__XA_CIB_DELEGATED_FROM, host); + crm_xml_add(up, PCMK__XA_CIB_CLIENTID, client_id); + crm_xml_add(up, PCMK__XA_CIB_CALLOPT, call_opts); + crm_xml_add(up, PCMK__XA_CIB_CALLID, call_id); if (cib_legacy_mode() && based_is_primary) { rc = cib_process_upgrade( op, options, section, up, input, existing_cib, result_cib, answer); } else { - send_cluster_message(NULL, crm_msg_cib, up, FALSE); + pcmk__cluster_send_message(NULL, crm_msg_cib, up); } free_xml(up); @@ -243,25 +252,27 @@ cib_process_upgrade_server(const char *op, int options, const char *section, xml if (rc != pcmk_ok) { // Notify originating peer so it can notify its local clients - crm_node_t *origin = pcmk__search_cluster_node_cache(0, host, NULL); + crm_node_t *origin = NULL; + + origin = pcmk__search_node_caches(0, host, + pcmk__node_search_cluster_member); crm_info("Rejecting upgrade request from %s: %s " CRM_XS " rc=%d peer=%s", host, pcmk_strerror(rc), rc, (origin? origin->uname : "lost")); if (origin) { - xmlNode *up = create_xml_node(NULL, __func__); - - crm_xml_add(up, F_TYPE, "cib"); - crm_xml_add(up, F_CIB_OPERATION, PCMK__CIB_REQUEST_UPGRADE); - crm_xml_add(up, F_CIB_DELEGATED, host); - crm_xml_add(up, F_CIB_ISREPLY, host); - crm_xml_add(up, F_CIB_CLIENTID, client_id); - crm_xml_add(up, F_CIB_CALLOPTS, call_opts); - crm_xml_add(up, F_CIB_CALLID, call_id); - crm_xml_add_int(up, F_CIB_UPGRADE_RC, rc); - if (send_cluster_message(origin, crm_msg_cib, up, TRUE) - == FALSE) { + xmlNode *up = pcmk__xe_create(NULL, __func__); + + crm_xml_add(up, PCMK__XA_T, PCMK__VALUE_CIB); + crm_xml_add(up, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_UPGRADE); + crm_xml_add(up, PCMK__XA_CIB_DELEGATED_FROM, host); + crm_xml_add(up, PCMK__XA_CIB_ISREPLYTO, host); + crm_xml_add(up, PCMK__XA_CIB_CLIENTID, client_id); + crm_xml_add(up, PCMK__XA_CIB_CALLOPT, call_opts); + crm_xml_add(up, PCMK__XA_CIB_CALLID, call_id); + crm_xml_add_int(up, PCMK__XA_CIB_UPGRADE_RC, rc); + if (!pcmk__cluster_send_message(origin, crm_msg_cib, up)) { crm_warn("Could not send CIB upgrade result to %s", host); } free_xml(up); @@ -351,7 +362,7 @@ cib_process_replace_svr(const char *op, int options, const char *section, xmlNod int rc = cib_process_replace(op, options, section, req, input, existing_cib, result_cib, answer); - if ((rc == pcmk_ok) && pcmk__xe_is(input, XML_TAG_CIB)) { + if ((rc == pcmk_ok) && pcmk__xe_is(input, PCMK_XE_CIB)) { sync_in_progress = 0; } return rc; @@ -370,32 +381,26 @@ static xmlNode * cib_msg_copy(xmlNode *msg) { static const char *field_list[] = { - F_XML_TAGNAME, - F_TYPE, - F_CIB_CLIENTID, - F_CIB_CALLOPTS, - F_CIB_CALLID, - F_CIB_OPERATION, - F_CIB_ISREPLY, - F_CIB_SECTION, - F_CIB_HOST, - F_CIB_RC, - F_CIB_DELEGATED, - F_CIB_OBJID, - F_CIB_OBJTYPE, - F_CIB_EXISTING, - F_CIB_SEENCOUNT, - F_CIB_TIMEOUT, - F_CIB_GLOBAL_UPDATE, - F_CIB_CLIENTNAME, - F_CIB_USER, - F_CIB_NOTIFY_TYPE, - F_CIB_NOTIFY_ACTIVATE + PCMK__XA_T, + PCMK__XA_CIB_CLIENTID, + PCMK__XA_CIB_CALLOPT, + PCMK__XA_CIB_CALLID, + PCMK__XA_CIB_OP, + PCMK__XA_CIB_ISREPLYTO, + PCMK__XA_CIB_SECTION, + PCMK__XA_CIB_HOST, + PCMK__XA_CIB_RC, + PCMK__XA_CIB_DELEGATED_FROM, + PCMK__XA_CIB_OBJECT, + PCMK__XA_CIB_OBJECT_TYPE, + PCMK__XA_CIB_UPDATE, + PCMK__XA_CIB_CLIENTNAME, + PCMK__XA_CIB_USER, + PCMK__XA_CIB_NOTIFY_TYPE, + PCMK__XA_CIB_NOTIFY_ACTIVATE, }; - xmlNode *copy = create_xml_node(NULL, "copy"); - - CRM_ASSERT(copy != NULL); + xmlNode *copy = pcmk__xe_create(NULL, PCMK__XE_COPY); for (int lpc = 0; lpc < PCMK__NELEM(field_list); lpc++) { const char *field = field_list[lpc]; @@ -414,10 +419,11 @@ sync_our_cib(xmlNode * request, gboolean all) { int result = pcmk_ok; char *digest = NULL; - const char *host = crm_element_value(request, F_ORIG); - const char *op = crm_element_value(request, F_CIB_OPERATION); - + const char *host = crm_element_value(request, PCMK__XA_SRC); + const char *op = crm_element_value(request, PCMK__XA_CIB_OP); + crm_node_t *peer = NULL; xmlNode *replace_request = NULL; + xmlNode *wrapper = NULL; CRM_CHECK(the_cib != NULL, return -EINVAL); CRM_CHECK(all || (host != NULL), return -EINVAL); @@ -427,24 +433,30 @@ sync_our_cib(xmlNode * request, gboolean all) replace_request = cib_msg_copy(request); if (host != NULL) { - crm_xml_add(replace_request, F_CIB_ISREPLY, host); + crm_xml_add(replace_request, PCMK__XA_CIB_ISREPLYTO, host); } if (all) { - xml_remove_prop(replace_request, F_CIB_HOST); + pcmk__xe_remove_attr(replace_request, PCMK__XA_CIB_HOST); } - crm_xml_add(replace_request, F_CIB_OPERATION, PCMK__CIB_REQUEST_REPLACE); - crm_xml_add(replace_request, "original_" F_CIB_OPERATION, op); - pcmk__xe_set_bool_attr(replace_request, F_CIB_GLOBAL_UPDATE, true); + crm_xml_add(replace_request, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_REPLACE); + + // @TODO Keep for tracing, or drop? + crm_xml_add(replace_request, PCMK__XA_ORIGINAL_CIB_OP, op); + + pcmk__xe_set_bool_attr(replace_request, PCMK__XA_CIB_UPDATE, true); - crm_xml_add(replace_request, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); + crm_xml_add(replace_request, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); digest = calculate_xml_versioned_digest(the_cib, FALSE, TRUE, CRM_FEATURE_SET); - crm_xml_add(replace_request, XML_ATTR_DIGEST, digest); + crm_xml_add(replace_request, PCMK__XA_DIGEST, digest); - add_message_xml(replace_request, F_CIB_CALLDATA, the_cib); + wrapper = pcmk__xe_create(replace_request, PCMK__XE_CIB_CALLDATA); + pcmk__xml_copy(wrapper, the_cib); - if (send_cluster_message - (all ? NULL : crm_get_peer(0, host), crm_msg_cib, replace_request, FALSE) == FALSE) { + if (!all) { + peer = pcmk__get_node(0, host, NULL, pcmk__node_search_cluster_member); + } + if (!pcmk__cluster_send_message(peer, crm_msg_cib, replace_request)) { result = -ENOTCONN; } free_xml(replace_request); @@ -463,8 +475,8 @@ cib_process_commit_transaction(const char *op, int options, const char *section, * On failure, our caller will free *result_cib. */ int rc = pcmk_rc_ok; - const char *client_id = crm_element_value(req, F_CIB_CLIENTID); - const char *origin = crm_element_value(req, F_ORIG); + const char *client_id = crm_element_value(req, PCMK__XA_CIB_CLIENTID); + const char *origin = crm_element_value(req, PCMK__XA_SRC); pcmk__client_t *client = pcmk__find_client_by_id(client_id); rc = based_commit_transaction(input, client, origin, result_cib); @@ -478,3 +490,49 @@ cib_process_commit_transaction(const char *op, int options, const char *section, } return pcmk_rc2legacy(rc); } + +int +cib_process_schemas(const char *op, int options, const char *section, xmlNode *req, + xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, + xmlNode **answer) +{ + xmlNode *wrapper = NULL; + xmlNode *data = NULL; + + const char *after_ver = NULL; + GList *schemas = NULL; + GList *already_included = NULL; + + *answer = pcmk__xe_create(NULL, PCMK__XA_SCHEMAS); + + wrapper = pcmk__xe_first_child(req, PCMK__XE_CIB_CALLDATA, NULL, NULL); + data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + if (data == NULL) { + crm_warn("No data specified in request"); + return -EPROTO; + } + + after_ver = crm_element_value(data, PCMK_XA_VERSION); + if (after_ver == NULL) { + crm_warn("No version specified in request"); + return -EPROTO; + } + + /* The client requested all schemas after the latest one we know about, which + * means the client is fully up-to-date. Return a properly formatted reply + * with no schemas. + */ + if (pcmk__str_eq(after_ver, pcmk__highest_schema_name(), pcmk__str_none)) { + return pcmk_ok; + } + + schemas = pcmk__schema_files_later_than(after_ver); + + for (GList *iter = schemas; iter != NULL; iter = iter->next) { + pcmk__build_schema_xml_node(*answer, iter->data, &already_included); + } + + g_list_free_full(schemas, free); + g_list_free_full(already_included, free); + return pcmk_ok; +} diff --git a/daemons/based/based_notify.c b/daemons/based/based_notify.c index 00a4c54..5160645 100644 --- a/daemons/based/based_notify.c +++ b/daemons/based/based_notify.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -26,7 +26,6 @@ #include #include -#include #include #include @@ -53,25 +52,27 @@ cib_notify_send_one(gpointer key, gpointer value, gpointer user_data) return; } - type = crm_element_value(update->msg, F_SUBTYPE); + type = crm_element_value(update->msg, PCMK__XA_SUBT); CRM_LOG_ASSERT(type != NULL); if (pcmk_is_set(client->flags, cib_notify_diff) - && pcmk__str_eq(type, T_CIB_DIFF_NOTIFY, pcmk__str_casei)) { + && pcmk__str_eq(type, PCMK__VALUE_CIB_DIFF_NOTIFY, pcmk__str_none)) { do_send = TRUE; } else if (pcmk_is_set(client->flags, cib_notify_confirm) - && pcmk__str_eq(type, T_CIB_UPDATE_CONFIRM, pcmk__str_casei)) { + && pcmk__str_eq(type, PCMK__VALUE_CIB_UPDATE_CONFIRMATION, + pcmk__str_none)) { do_send = TRUE; } else if (pcmk_is_set(client->flags, cib_notify_pre) - && pcmk__str_eq(type, T_CIB_PRE_NOTIFY, pcmk__str_casei)) { + && pcmk__str_eq(type, PCMK__VALUE_CIB_PRE_NOTIFY, + pcmk__str_none)) { do_send = TRUE; } else if (pcmk_is_set(client->flags, cib_notify_post) - && pcmk__str_eq(type, T_CIB_POST_NOTIFY, pcmk__str_casei)) { - + && pcmk__str_eq(type, PCMK__VALUE_CIB_POST_NOTIFY, + pcmk__str_none)) { do_send = TRUE; } @@ -125,15 +126,14 @@ cib_notify_send(const xmlNode *xml) } static void -attach_cib_generation(xmlNode *msg, const char *field, xmlNode *a_cib) +attach_cib_generation(xmlNode *msg) { - xmlNode *generation = create_xml_node(NULL, XML_CIB_TAG_GENERATION_TUPPLE); + xmlNode *wrapper = pcmk__xe_create(msg, PCMK__XE_CIB_GENERATION); + xmlNode *generation = pcmk__xe_create(wrapper, PCMK__XE_GENERATION_TUPLE); - if (a_cib != NULL) { - copy_in_properties(generation, a_cib); + if (the_cib != NULL) { + pcmk__xe_copy_attrs(generation, the_cib, pcmk__xaf_none); } - add_message_xml(msg, field, generation); - free_xml(generation); } void @@ -152,6 +152,7 @@ cib_diff_notify(const char *op, int result, const char *call_id, uint8_t log_level = LOG_TRACE; xmlNode *update_msg = NULL; + xmlNode *wrapper = NULL; const char *type = NULL; if (diff == NULL) { @@ -191,17 +192,18 @@ cib_diff_notify(const char *op, int result, const char *call_id, pcmk__s(origin, "unspecified peer"), pcmk_strerror(result)); } - update_msg = create_xml_node(NULL, "notify"); + update_msg = pcmk__xe_create(NULL, PCMK__XE_NOTIFY); - crm_xml_add(update_msg, F_TYPE, T_CIB_NOTIFY); - crm_xml_add(update_msg, F_SUBTYPE, T_CIB_DIFF_NOTIFY); - crm_xml_add(update_msg, F_CIB_OPERATION, op); - crm_xml_add(update_msg, F_CIB_CLIENTID, client_id); - crm_xml_add(update_msg, F_CIB_CLIENTNAME, client_name); - crm_xml_add(update_msg, F_CIB_CALLID, call_id); - crm_xml_add(update_msg, F_ORIG, origin); - crm_xml_add_int(update_msg, F_CIB_RC, result); + crm_xml_add(update_msg, PCMK__XA_T, PCMK__VALUE_CIB_NOTIFY); + crm_xml_add(update_msg, PCMK__XA_SUBT, PCMK__VALUE_CIB_DIFF_NOTIFY); + crm_xml_add(update_msg, PCMK__XA_CIB_OP, op); + crm_xml_add(update_msg, PCMK__XA_CIB_CLIENTID, client_id); + crm_xml_add(update_msg, PCMK__XA_CIB_CLIENTNAME, client_name); + crm_xml_add(update_msg, PCMK__XA_CIB_CALLID, call_id); + crm_xml_add(update_msg, PCMK__XA_SRC, origin); + crm_xml_add_int(update_msg, PCMK__XA_CIB_RC, result); + // @COMPAT Unused internally, drop at 3.0.0 if (update != NULL) { type = (const char *) update->name; crm_trace("Setting type to update->name: %s", type); @@ -209,14 +211,20 @@ cib_diff_notify(const char *op, int result, const char *call_id, type = (const char *) diff->name; crm_trace("Setting type to new_obj->name: %s", type); } - crm_xml_add(update_msg, F_CIB_OBJID, ID(diff)); - crm_xml_add(update_msg, F_CIB_OBJTYPE, type); - attach_cib_generation(update_msg, "cib_generation", the_cib); + // @COMPAT Unused internally, drop at 3.0.0 + crm_xml_add(update_msg, PCMK__XA_CIB_OBJECT, pcmk__xe_id(diff)); + crm_xml_add(update_msg, PCMK__XA_CIB_OBJECT_TYPE, type); + attach_cib_generation(update_msg); + + // @COMPAT Unused internally, drop at 3.0.0 if (update != NULL) { - add_message_xml(update_msg, F_CIB_UPDATE, update); + wrapper = pcmk__xe_create(update_msg, PCMK__XE_CIB_UPDATE); + pcmk__xml_copy(wrapper, update); } - add_message_xml(update_msg, F_CIB_UPDATE_RESULT, diff); + + wrapper = pcmk__xe_create(update_msg, PCMK__XE_CIB_UPDATE_RESULT); + pcmk__xml_copy(wrapper, diff); crm_log_xml_trace(update_msg, "diff-notify"); cib_notify_send(update_msg); diff --git a/daemons/based/based_operation.c b/daemons/based/based_operation.c index 736d425..8dd07af 100644 --- a/daemons/based/based_operation.c +++ b/daemons/based/based_operation.c @@ -35,6 +35,7 @@ static const cib__op_fn_t cib_op_functions[] = { [cib__op_sync_all] = cib_process_sync, [cib__op_sync_one] = cib_process_sync_one, [cib__op_upgrade] = cib_process_upgrade_server, + [cib__op_schemas] = cib_process_schemas, }; /*! diff --git a/daemons/based/based_remote.c b/daemons/based/based_remote.c index 4aa41fa..b3cb655 100644 --- a/daemons/based/based_remote.c +++ b/daemons/based/based_remote.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -27,7 +27,6 @@ #include #include -#include #include #include #include @@ -224,20 +223,20 @@ cib_remote_auth(xmlNode * login) return FALSE; } - if (!pcmk__xe_is(login, T_CIB_COMMAND)) { + if (!pcmk__xe_is(login, PCMK__XE_CIB_COMMAND)) { crm_err("Unrecognizable message from remote client"); crm_log_xml_info(login, "bad"); return FALSE; } - tmp = crm_element_value(login, "op"); + tmp = crm_element_value(login, PCMK_XA_OP); if (!pcmk__str_eq(tmp, "authenticate", pcmk__str_casei)) { crm_err("Wrong operation: %s", tmp); return FALSE; } - user = crm_element_value(login, "user"); - pass = crm_element_value(login, "password"); + user = crm_element_value(login, PCMK_XA_USER); + pass = crm_element_value(login, PCMK__XA_PASSWORD); if (!user || !pass) { crm_err("missing auth credentials"); @@ -317,7 +316,7 @@ cib_remote_listen(gpointer data) num_clients++; new_client = pcmk__new_unauth_client(NULL); - new_client->remote = calloc(1, sizeof(pcmk__remote_t)); + new_client->remote = pcmk__assert_alloc(1, sizeof(pcmk__remote_t)); if (ssock == remote_tls_fd) { #ifdef HAVE_GNUTLS_GNUTLS_H @@ -411,42 +410,35 @@ cib_remote_connection_destroy(gpointer user_data) static void cib_handle_remote_msg(pcmk__client_t *client, xmlNode *command) { - const char *value = NULL; - - if (!pcmk__xe_is(command, T_CIB_COMMAND)) { + if (!pcmk__xe_is(command, PCMK__XE_CIB_COMMAND)) { crm_log_xml_trace(command, "bad"); return; } if (client->name == NULL) { - value = crm_element_value(command, F_CLIENTNAME); - if (value == NULL) { - client->name = strdup(client->id); - } else { - client->name = strdup(value); - } + client->name = pcmk__str_copy(client->id); } /* unset dangerous options */ - xml_remove_prop(command, F_ORIG); - xml_remove_prop(command, F_CIB_HOST); - xml_remove_prop(command, F_CIB_GLOBAL_UPDATE); + pcmk__xe_remove_attr(command, PCMK__XA_SRC); + pcmk__xe_remove_attr(command, PCMK__XA_CIB_HOST); + pcmk__xe_remove_attr(command, PCMK__XA_CIB_UPDATE); - crm_xml_add(command, F_TYPE, T_CIB); - crm_xml_add(command, F_CIB_CLIENTID, client->id); - crm_xml_add(command, F_CIB_CLIENTNAME, client->name); - crm_xml_add(command, F_CIB_USER, client->user); + crm_xml_add(command, PCMK__XA_T, PCMK__VALUE_CIB); + crm_xml_add(command, PCMK__XA_CIB_CLIENTID, client->id); + crm_xml_add(command, PCMK__XA_CIB_CLIENTNAME, client->name); + crm_xml_add(command, PCMK__XA_CIB_USER, client->user); - if (crm_element_value(command, F_CIB_CALLID) == NULL) { + if (crm_element_value(command, PCMK__XA_CIB_CALLID) == NULL) { char *call_uuid = crm_generate_uuid(); /* fix the command */ - crm_xml_add(command, F_CIB_CALLID, call_uuid); + crm_xml_add(command, PCMK__XA_CIB_CALLID, call_uuid); free(call_uuid); } - if (crm_element_value(command, F_CIB_CALLOPTS) == NULL) { - crm_xml_add_int(command, F_CIB_CALLOPTS, 0); + if (crm_element_value(command, PCMK__XA_CIB_CALLOPT) == NULL) { + crm_xml_add_int(command, PCMK__XA_CIB_CALLOPT, 0); } crm_log_xml_trace(command, "Remote command: "); @@ -515,17 +507,17 @@ cib_remote_msg(gpointer data) pcmk__set_client_flags(client, pcmk__client_authenticated); g_source_remove(client->remote->auth_timeout); client->remote->auth_timeout = 0; - client->name = crm_element_value_copy(command, "name"); + client->name = crm_element_value_copy(command, PCMK_XA_NAME); - user = crm_element_value(command, "user"); + user = crm_element_value(command, PCMK_XA_USER); if (user) { - client->user = strdup(user); + client->user = pcmk__str_copy(user); } /* send ACK */ - reg = create_xml_node(NULL, "cib_result"); - crm_xml_add(reg, F_CIB_OPERATION, CRM_OP_REGISTER); - crm_xml_add(reg, F_CIB_CLIENTID, client->id); + reg = pcmk__xe_create(NULL, PCMK__XE_CIB_RESULT); + crm_xml_add(reg, PCMK__XA_CIB_OP, CRM_OP_REGISTER); + crm_xml_add(reg, PCMK__XA_CIB_CLIENTID, client->id); pcmk__remote_send_xml(client->remote, reg); free_xml(reg); free_xml(command); @@ -559,8 +551,7 @@ construct_pam_passwd(int num_msg, const struct pam_message **msg, CRM_CHECK(data, return PAM_CONV_ERR); CRM_CHECK(num_msg == 1, return PAM_CONV_ERR); /* We only want to handle one message */ - reply = calloc(1, sizeof(struct pam_response)); - CRM_ASSERT(reply != NULL); + reply = pcmk__assert_alloc(1, sizeof(struct pam_response)); for (count = 0; count < num_msg; ++count) { switch (msg[count]->msg_style) { @@ -634,7 +625,7 @@ authenticate_user(const char *user, const char *passwd) } p_conv.conv = construct_pam_passwd; - p_conv.appdata_ptr = strdup(passwd); + p_conv.appdata_ptr = pcmk__str_copy(passwd); rc = pam_start(pam_name, user, &p_conv, &pam_h); if (rc != PAM_SUCCESS) { diff --git a/daemons/based/based_transaction.c b/daemons/based/based_transaction.c index 89aea2e..39b3439 100644 --- a/daemons/based/based_transaction.c +++ b/daemons/based/based_transaction.c @@ -1,5 +1,5 @@ /* - * Copyright 2023 the Pacemaker project contributors + * Copyright 2023-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -28,21 +28,15 @@ char * based_transaction_source_str(const pcmk__client_t *client, const char *origin) { - char *source = NULL; - if (client != NULL) { - source = crm_strdup_printf("client %s (%s)%s%s", - pcmk__client_name(client), - pcmk__s(client->id, "unidentified"), - ((origin != NULL)? " on " : ""), - pcmk__s(origin, "")); - + return crm_strdup_printf("client %s (%s)%s%s", + pcmk__client_name(client), + pcmk__s(client->id, "unidentified"), + ((origin != NULL)? " on " : ""), + pcmk__s(origin, "")); } else { - source = strdup((origin != NULL)? origin : "unknown source"); + return pcmk__str_copy(pcmk__s(origin, "unknown source")); } - - CRM_ASSERT(source != NULL); - return source; } /*! @@ -61,11 +55,13 @@ static int process_transaction_requests(xmlNodePtr transaction, const pcmk__client_t *client, const char *source) { - for (xmlNodePtr request = first_named_child(transaction, T_CIB_COMMAND); - request != NULL; request = crm_next_same_xml(request)) { + for (xmlNode *request = pcmk__xe_first_child(transaction, + PCMK__XE_CIB_COMMAND, NULL, + NULL); + request != NULL; request = pcmk__xe_next_same(request)) { - const char *op = crm_element_value(request, F_CIB_OPERATION); - const char *host = crm_element_value(request, F_CIB_HOST); + const char *op = crm_element_value(request, PCMK__XA_CIB_OP); + const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); const cib__operation_t *operation = NULL; int rc = cib__get_operation(op, &operation); @@ -127,7 +123,7 @@ based_commit_transaction(xmlNodePtr transaction, const pcmk__client_t *client, CRM_ASSERT(result_cib != NULL); - CRM_CHECK(pcmk__xe_is(transaction, T_CIB_TRANSACTION), + CRM_CHECK(pcmk__xe_is(transaction, PCMK__XE_CIB_TRANSACTION), return pcmk_rc_no_transaction); /* *result_cib should be a copy of the_cib (created by cib_perform_op()). If @@ -138,7 +134,7 @@ based_commit_transaction(xmlNodePtr transaction, const pcmk__client_t *client, * * cib_perform_op() will infer changes for the commit request at the end. */ CRM_CHECK((*result_cib != NULL) && (*result_cib != the_cib), - *result_cib = copy_xml(the_cib)); + *result_cib = pcmk__xml_copy(NULL, the_cib)); source = based_transaction_source_str(client, origin); crm_trace("Committing transaction for %s to working CIB", source); diff --git a/daemons/based/pacemaker-based.c b/daemons/based/pacemaker-based.c index 5dd7938..137930b 100644 --- a/daemons/based/pacemaker-based.c +++ b/daemons/based/pacemaker-based.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -36,7 +35,7 @@ extern int init_remote_listener(int port, gboolean encrypted); gboolean cib_shutdown_flag = FALSE; int cib_status = pcmk_ok; -crm_cluster_t *crm_cluster = NULL; +pcmk_cluster_t *crm_cluster = NULL; GMainLoop *mainloop = NULL; gchar *cib_root = NULL; @@ -126,6 +125,19 @@ setup_stand_alone(GError **error) return pcmk_rc_ok; } +/* @COMPAT Deprecated since 2.1.8. Use pcmk_list_cluster_options() or + * crm_attribute --list-options=cluster instead of querying daemon metadata. + */ +static int +based_metadata(pcmk__output_t *out) +{ + return pcmk__daemon_metadata(out, "pacemaker-based", + "Cluster Information Base manager options", + "Cluster options used by Pacemaker's Cluster " + "Information Base manager", + pcmk__opt_based); +} + static GOptionEntry entries[] = { { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone, "(Advanced use only) Run in stand-alone mode", NULL }, @@ -154,8 +166,7 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; - context = pcmk__build_arg_context(args, "text (default), xml", group, - "[metadata]"); + context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); pcmk__add_main_args(context, entries); return context; } @@ -204,7 +215,13 @@ main(int argc, char **argv) if ((g_strv_length(processed_args) >= 2) && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) { - cib_metadata(); + + rc = based_metadata(out); + if (rc != pcmk_rc_ok) { + exit_code = CRM_EX_FATAL; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Unable to display metadata: %s", pcmk_rc_str(rc)); + } goto done; } @@ -254,7 +271,7 @@ main(int argc, char **argv) goto done; } - crm_peer_init(); + pcmk__cluster_init_node_caches(); // Read initial CIB, connect to cluster, and start IPC servers cib_init(); @@ -267,14 +284,14 @@ main(int argc, char **argv) /* If main loop returned, clean up and exit. We disconnect in case * terminate_cib() was called with fast=-1. */ - crm_cluster_disconnect(crm_cluster); + pcmk_cluster_disconnect(crm_cluster); pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); done: g_strfreev(processed_args); pcmk__free_arg_context(context); - crm_peer_destroy(); + pcmk__cluster_destroy_node_caches(); if (local_notify_queue != NULL) { g_hash_table_destroy(local_notify_queue); @@ -306,20 +323,19 @@ cib_cs_dispatch(cpg_handle_t handle, uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; - char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); + char *data = pcmk__cpg_message_data(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { - xml = string2xml(data); + xml = pcmk__xml_parse(data); if (xml == NULL) { crm_err("Invalid XML: '%.120s'", data); free(data); return; } - crm_xml_add(xml, F_ORIG, from); - /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */ + crm_xml_add(xml, PCMK__XA_SRC, from); cib_peer_callback(xml, NULL); } @@ -359,7 +375,7 @@ cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const voi case crm_status_uname: case crm_status_nstate: - if (cib_shutdown_flag && (crm_active_peers() < 2) + if (cib_shutdown_flag && (pcmk__cluster_num_active_nodes() < 2) && (pcmk__ipc_client_count() == 0)) { crm_info("No more peers"); @@ -375,10 +391,10 @@ cib_init(void) crm_cluster = pcmk_cluster_new(); #if SUPPORT_COROSYNC - if (is_corosync_cluster()) { - crm_cluster->destroy = cib_cs_destroy; - crm_cluster->cpg.cpg_deliver_fn = cib_cs_dispatch; - crm_cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; + if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { + pcmk_cluster_set_destroy_fn(crm_cluster, cib_cs_destroy); + pcmk_cpg_set_deliver_fn(crm_cluster, cib_cs_dispatch); + pcmk_cpg_set_confchg_fn(crm_cluster, pcmk__cpg_confchg_cb); } #endif // SUPPORT_COROSYNC @@ -390,9 +406,9 @@ cib_init(void) } if (!stand_alone) { - crm_set_status_callback(&cib_peer_update_callback); + pcmk__cluster_set_status_callback(&cib_peer_update_callback); - if (!crm_cluster_connect(crm_cluster)) { + if (pcmk_cluster_connect(crm_cluster) != pcmk_rc_ok) { crm_crit("Cannot sign in to the cluster... terminating"); crm_exit(CRM_EX_FATAL); } @@ -419,12 +435,13 @@ startCib(const char *filename) cib_read_config(config_hash, cib); - pcmk__scan_port(crm_element_value(cib, "remote-tls-port"), &port); + pcmk__scan_port(crm_element_value(cib, PCMK_XA_REMOTE_TLS_PORT), &port); if (port >= 0) { remote_tls_fd = init_remote_listener(port, TRUE); } - pcmk__scan_port(crm_element_value(cib, "remote-clear-port"), &port); + pcmk__scan_port(crm_element_value(cib, PCMK_XA_REMOTE_CLEAR_PORT), + &port); if (port >= 0) { remote_fd = init_remote_listener(port, FALSE); } diff --git a/daemons/based/pacemaker-based.h b/daemons/based/pacemaker-based.h index 33c7642..16b0e78 100644 --- a/daemons/based/pacemaker-based.h +++ b/daemons/based/pacemaker-based.h @@ -56,7 +56,7 @@ extern crm_trigger_t *cib_writer; extern gboolean cib_writes_enabled; extern GMainLoop *mainloop; -extern crm_cluster_t *crm_cluster; +extern pcmk_cluster_t *crm_cluster; extern GHashTable *local_notify_queue; extern gboolean legacy_mode; extern gboolean stand_alone; @@ -122,6 +122,10 @@ int cib_process_commit_transaction(const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); +int cib_process_schemas(const char *op, int options, const char *section, + xmlNode *req, xmlNode *input, xmlNode *existing_cib, + xmlNode **result_cib, xmlNode **answer); + void send_sync_request(const char *host); int sync_our_cib(xmlNode *request, gboolean all); diff --git a/daemons/controld/controld_attrd.c b/daemons/controld/controld_attrd.c index 923abb9..f728bef 100644 --- a/daemons/controld/controld_attrd.c +++ b/daemons/controld/controld_attrd.c @@ -1,5 +1,5 @@ /* - * Copyright 2006-2022 the Pacemaker project contributors + * Copyright 2006-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,10 +10,10 @@ #include #include -#include +#include #include #include -#include +#include #include @@ -117,7 +117,7 @@ update_attrd_remote_node_removed(const char *host, const char *user_name) if (rc == pcmk_rc_ok) { crm_trace("Asking attribute manager to purge Pacemaker Remote node %s", host); - rc = pcmk__attrd_api_purge(attrd_api, host); + rc = pcmk__attrd_api_purge(attrd_api, host, true); } if (rc != pcmk_rc_ok) { crm_err("Could not purge Pacemaker Remote node %s " @@ -136,25 +136,23 @@ update_attrd_clear_failures(const char *host, const char *rsc, const char *op, rc = pcmk_new_ipc_api(&attrd_api, pcmk_ipc_attrd); } if (rc == pcmk_rc_ok) { - const char *op_desc = pcmk__s(op, "operations"); - const char *interval_desc = "all"; uint32_t attrd_opts = pcmk__node_attr_none; - if (op != NULL) { - interval_desc = pcmk__s(interval_spec, "nonrecurring"); - } if (is_remote_node) { pcmk__set_node_attr_flags(attrd_opts, pcmk__node_attr_remote); } - crm_info("Asking attribute manager to clear failure of %s %s for %s " - "on %s node %s", interval_desc, op_desc, rsc, - node_type(is_remote_node), host); rc = pcmk__attrd_api_clear_failures(attrd_api, host, rsc, op, interval_spec, NULL, attrd_opts); } if (rc != pcmk_rc_ok) { - crm_err("Could not clear failure attributes for %s on %s node %s%s: %s " - CRM_XS " rc=%d", pcmk__s(rsc, "all resources"), - node_type(is_remote_node), host, when(), pcmk_rc_str(rc), rc); + const char *interval_desc = "all"; + + if (op != NULL) { + interval_desc = pcmk__s(interval_spec, "nonrecurring"); + } + crm_err("Could not clear failure of %s %s for %s on %s node %s%s: %s " + CRM_XS " rc=%d", interval_desc, pcmk__s(op, "operations"), + pcmk__s(rsc, "all resources"), node_type(is_remote_node), host, + when(), pcmk_rc_str(rc), rc); } } diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c index 7078739..16e6424 100644 --- a/daemons/controld/controld_callbacks.c +++ b/daemons/controld/controld_callbacks.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -27,15 +26,15 @@ void crmd_ha_msg_filter(xmlNode * msg) { if (AM_I_DC) { - const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); + const char *sys_from = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM); if (pcmk__str_eq(sys_from, CRM_SYSTEM_DC, pcmk__str_casei)) { - const char *from = crm_element_value(msg, F_ORIG); + const char *from = crm_element_value(msg, PCMK__XA_SRC); if (!pcmk__str_eq(from, controld_globals.our_nodename, pcmk__str_casei)) { int level = LOG_INFO; - const char *op = crm_element_value(msg, F_CRM_TASK); + const char *op = crm_element_value(msg, PCMK__XA_CRM_TASK); /* make sure the election happens NOW */ if (controld_globals.fsa_state != S_ELECTION) { @@ -53,7 +52,7 @@ crmd_ha_msg_filter(xmlNode * msg) } } else { - const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); + const char *sys_to = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); if (pcmk__str_eq(sys_to, CRM_SYSTEM_DC, pcmk__str_casei)) { return; @@ -84,7 +83,7 @@ node_alive(const crm_node_t *node) // Pacemaker Remote nodes can't be partially alive return pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_casei) ? 1: -1; - } else if (crm_is_peer_active(node)) { + } else if (pcmk__cluster_is_node_active(node)) { // Completely up cluster node: both cluster member and peer return 1; @@ -128,7 +127,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d xmlNode *query = create_request(CRM_OP_HELLO, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); crm_debug("Sending hello to node %u so that it learns our node name", node->id); - send_cluster_message(node, crm_msg_crmd, query, FALSE); + pcmk__cluster_send_message(node, crm_msg_crmd, query); free_xml(query); } @@ -178,7 +177,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d const char *dc_s = controld_globals.dc_name; if ((dc_s == NULL) && AM_I_DC) { - dc_s = "true"; + dc_s = PCMK_VALUE_TRUE; } crm_info("Node %s is %s a peer " CRM_XS @@ -222,7 +221,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d } else if (pcmk__str_eq(node->uname, controld_globals.dc_name, pcmk__str_casei) - && !crm_is_peer_active(node)) { + && !pcmk__cluster_is_node_active(node)) { /* Did the DC leave us? */ crm_notice("Our peer on the DC (%s) is dead", controld_globals.dc_name); @@ -274,7 +273,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d } if (down) { - const char *task = crm_element_value(down->xml, XML_LRM_ATTR_TASK); + const char *task = crm_element_value(down->xml, PCMK_XA_OPERATION); if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { @@ -322,8 +321,8 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d crm_update_peer_join(__func__, node, crm_join_none); check_join_state(controld_globals.fsa_state, __func__); } - abort_transition(INFINITY, pcmk__graph_restart, "Node failure", - NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Node failure", NULL); fail_incompletable_actions(controld_globals.transition_graph, node->uuid); @@ -340,7 +339,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d /* Trigger resource placement on newly integrated nodes */ if (appeared) { - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Pacemaker Remote node integrated", NULL); } } @@ -349,7 +348,8 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d && (node->when_member > 1)) { /* The node left CPG but is still a cluster member. Set its * membership time to 1 to record it in the cluster state as a - * boolean, so we don't fence it due to node-pending-timeout. + * boolean, so we don't fence it due to + * PCMK_OPT_NODE_PENDING_TIMEOUT. */ node->when_member = 1; flags |= node_update_cluster; @@ -361,7 +361,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d if (update == NULL) { crm_debug("Node state update not yet possible for %s", node->uname); } else { - fsa_cib_anon_update(XML_CIB_TAG_STATUS, update); + fsa_cib_anon_update(PCMK_XE_STATUS, update); } free_xml(update); } diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c index 865e41f..7acff30 100644 --- a/daemons/controld/controld_cib.c +++ b/daemons/controld/controld_cib.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -59,8 +58,8 @@ do_cib_updated(const char *event, xmlNode * msg) return; } - if (cib__element_in_patchset(patchset, XML_CIB_TAG_ALERTS) - || cib__element_in_patchset(patchset, XML_CIB_TAG_CRMCONFIG)) { + if (cib__element_in_patchset(patchset, PCMK_XE_ALERTS) + || cib__element_in_patchset(patchset, PCMK_XE_CRM_CONFIG)) { controld_trigger_config(); } @@ -70,21 +69,21 @@ do_cib_updated(const char *event, xmlNode * msg) return; } - client_name = crm_element_value(msg, F_CIB_CLIENTNAME); + client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME); if (!cib__client_triggers_refresh(client_name)) { // The CIB is still accurate return; } - if (cib__element_in_patchset(patchset, XML_CIB_TAG_NODES) - || cib__element_in_patchset(patchset, XML_CIB_TAG_STATUS)) { + if (cib__element_in_patchset(patchset, PCMK_XE_NODES) + || cib__element_in_patchset(patchset, PCMK_XE_STATUS)) { - /* An unsafe client modified the nodes or status section. Ensure the - * node list is up-to-date, and start the join process again so we get - * everyone's current resource history. + /* An unsafe client modified the PCMK_XE_NODES or PCMK_XE_STATUS + * section. Ensure the node list is up-to-date, and start the join + * process again so we get everyone's current resource history. */ if (client_name == NULL) { - client_name = crm_element_value(msg, F_CIB_CLIENTID); + client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTID); } crm_notice("Populating nodes and starting an election after %s event " "triggered by %s", @@ -106,7 +105,7 @@ controld_disconnect_cib_manager(void) controld_clear_fsa_input_flags(R_CIB_CONNECTED); - cib_conn->cmds->del_notify_callback(cib_conn, T_CIB_DIFF_NOTIFY, + cib_conn->cmds->del_notify_callback(cib_conn, PCMK__VALUE_CIB_DIFF_NOTIFY, do_cib_updated); cib_free_callbacks(cib_conn); @@ -175,7 +174,7 @@ do_cib_control(long long action, crm_err("Could not set dnotify callback"); } else if (cib_conn->cmds->add_notify_callback(cib_conn, - T_CIB_DIFF_NOTIFY, + PCMK__VALUE_CIB_DIFF_NOTIFY, update_cb) != pcmk_ok) { crm_err("Could not set CIB notification callback (update)"); @@ -226,12 +225,9 @@ cib_op_timeout(void) env_timeout, (env? env : "none")); } - calculated_timeout = 1 + crm_active_peers(); - if (crm_remote_peer_cache) { - calculated_timeout += g_hash_table_size(crm_remote_peer_cache); - } - calculated_timeout *= 10; - + calculated_timeout = 10U * (1U + + pcmk__cluster_num_active_nodes() + + pcmk__cluster_num_remote_nodes()); calculated_timeout = QB_MAX(calculated_timeout, env_timeout); crm_trace("Calculated timeout: %us", calculated_timeout); @@ -275,32 +271,32 @@ cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, } } -// Searches for various portions of node_state to delete +// Searches for various portions of PCMK__XE_NODE_STATE to delete -// Match a particular node's node_state (takes node name 1x) -#define XPATH_NODE_STATE "//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" +// Match a particular node's PCMK__XE_NODE_STATE (takes node name 1x) +#define XPATH_NODE_STATE "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" // Node's lrm section (name 1x) -#define XPATH_NODE_LRM XPATH_NODE_STATE "/" XML_CIB_TAG_LRM +#define XPATH_NODE_LRM XPATH_NODE_STATE "/" PCMK__XE_LRM -/* Node's lrm_rsc_op entries and lrm_resource entries without unexpired lock - * (name 2x, (seconds_since_epoch - XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT) 1x) +/* Node's PCMK__XE_LRM_RSC_OP entries and PCMK__XE_LRM_RESOURCE entries without + * unexpired lock + * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x) */ -#define XPATH_NODE_LRM_UNLOCKED XPATH_NODE_STATE "//" XML_LRM_TAG_RSC_OP \ +#define XPATH_NODE_LRM_UNLOCKED XPATH_NODE_STATE "//" PCMK__XE_LRM_RSC_OP \ "|" XPATH_NODE_STATE \ - "//" XML_LRM_TAG_RESOURCE \ - "[not(@" XML_CONFIG_ATTR_SHUTDOWN_LOCK ") " \ - "or " XML_CONFIG_ATTR_SHUTDOWN_LOCK "<%lld]" + "//" PCMK__XE_LRM_RESOURCE \ + "[not(@" PCMK_OPT_SHUTDOWN_LOCK ") " \ + "or " PCMK_OPT_SHUTDOWN_LOCK "<%lld]" -// Node's transient_attributes section (name 1x) -#define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" XML_TAG_TRANSIENT_NODEATTRS +// Node's PCMK__XE_TRANSIENT_ATTRIBUTES section (name 1x) +#define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" PCMK__XE_TRANSIENT_ATTRIBUTES -// Everything under node_state (name 1x) +// Everything under PCMK__XE_NODE_STATE (name 1x) #define XPATH_NODE_ALL XPATH_NODE_STATE "/*" /* Unlocked history + transient attributes - * (name 2x, (seconds_since_epoch - XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT) 1x, - * name 1x) + * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x, name 1x) */ #define XPATH_NODE_ALL_UNLOCKED XPATH_NODE_LRM_UNLOCKED "|" XPATH_NODE_ATTRS @@ -309,7 +305,7 @@ cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, * \brief Get the XPath and description of a node state section to be deleted * * \param[in] uname Desired node - * \param[in] section Subsection of node_state to be deleted + * \param[in] section Subsection of \c PCMK__XE_NODE_STATE to be deleted * \param[out] xpath Where to store XPath of \p section * \param[out] desc If not \c NULL, where to store description of \p section */ @@ -360,10 +356,10 @@ controld_node_state_deletion_strings(const char *uname, /*! * \internal - * \brief Delete subsection of a node's CIB node_state + * \brief Delete subsection of a node's CIB \c PCMK__XE_NODE_STATE * * \param[in] uname Desired node - * \param[in] section Subsection of node_state to delete + * \param[in] section Subsection of \c PCMK__XE_NODE_STATE to delete * \param[in] options CIB call options to use */ void @@ -391,12 +387,12 @@ controld_delete_node_state(const char *uname, enum controld_section_e section, } // Takes node name and resource ID -#define XPATH_RESOURCE_HISTORY "//" XML_CIB_TAG_STATE \ - "[@" XML_ATTR_UNAME "='%s']/" \ - XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ - "/" XML_LRM_TAG_RESOURCE \ - "[@" XML_ATTR_ID "='%s']" -// @TODO could add "and @XML_CONFIG_ATTR_SHUTDOWN_LOCK" to limit to locks +#define XPATH_RESOURCE_HISTORY "//" PCMK__XE_NODE_STATE \ + "[@" PCMK_XA_UNAME "='%s']/" \ + PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES \ + "/" PCMK__XE_LRM_RESOURCE \ + "[@" PCMK_XA_ID "='%s']" +// @TODO could add "and @PCMK_OPT_SHUTDOWN_LOCK" to limit to locks /*! * \internal @@ -490,7 +486,7 @@ build_parameter_list(const lrmd_event_data_t *op, { GString *list = NULL; - *result = create_xml_node(NULL, XML_TAG_PARAMS); + *result = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS); /* Consider all parameters only except private ones to be consistent with * what scheduler does with calculate_secure_digest(). @@ -547,7 +543,7 @@ build_parameter_list(const lrmd_event_data_t *op, } else { crm_trace("Removing attr %s from the xml result", param->rap_name); - xml_remove_prop(*result, param->rap_name); + pcmk__xe_remove_attr(*result, param->rap_name); } } @@ -574,7 +570,9 @@ append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, } if (pcmk_is_set(metadata->ra_flags, ra_supports_reload_agent)) { - // Add parameters not marked reloadable to the "op-force-restart" list + /* Add parameters not marked reloadable to the PCMK__XA_OP_FORCE_RESTART + * list + */ list = build_parameter_list(op, metadata, ra_param_reloadable, &restart); @@ -583,7 +581,7 @@ append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, * * Before OCF 1.1, Pacemaker abused "unique=0" to indicate * reloadability. Add any parameters with unique="1" to the - * "op-force-restart" list. + * PCMK__XA_OP_FORCE_RESTART list. */ list = build_parameter_list(op, metadata, ra_param_unique, &restart); @@ -593,11 +591,13 @@ append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, } digest = calculate_operation_digest(restart, version); - /* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload, - * no matter if it actually supports any parameters with unique="1"). */ - crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, + /* Add PCMK__XA_OP_FORCE_RESTART and PCMK__XA_OP_RESTART_DIGEST to indicate + * the resource supports reload, no matter if it actually supports any + * reloadable parameters + */ + crm_xml_add(update, PCMK__XA_OP_FORCE_RESTART, (list == NULL)? "" : (const char *) list->str); - crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); + crm_xml_add(update, PCMK__XA_OP_RESTART_DIGEST, digest); if ((list != NULL) && (list->len > 0)) { crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str); @@ -622,17 +622,16 @@ append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, CRM_LOG_ASSERT(op->params != NULL); - /* - * To keep XML_LRM_ATTR_OP_SECURE short, we want it to contain the - * secure parameters but XML_LRM_ATTR_SECURE_DIGEST to be based on - * the insecure ones + /* To keep PCMK__XA_OP_SECURE_PARAMS short, we want it to contain the secure + * parameters but PCMK__XA_OP_SECURE_DIGEST to be based on the insecure ones */ list = build_parameter_list(op, metadata, ra_param_private, &secure); if (list != NULL) { digest = calculate_operation_digest(secure, version); - crm_xml_add(update, XML_LRM_ATTR_OP_SECURE, (const char *) list->str); - crm_xml_add(update, XML_LRM_ATTR_SECURE_DIGEST, digest); + crm_xml_add(update, PCMK__XA_OP_SECURE_PARAMS, + (const char *) list->str); + crm_xml_add(update, PCMK__XA_OP_SECURE_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str); g_string_free(list, TRUE); @@ -672,7 +671,7 @@ controld_add_resource_history_xml_as(const char *func, xmlNode *parent, target_rc = rsc_op_expected_rc(op); - caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION); + caller_version = g_hash_table_lookup(op->params, PCMK_XA_CRM_FEATURE_SET); CRM_CHECK(caller_version != NULL, caller_version = CRM_FEATURE_SET); xml_op = pcmk__create_history_xml(parent, op, caller_version, target_rc, @@ -742,8 +741,8 @@ controld_record_pending_op(const char *node_name, const lrmd_rsc_info_t *rsc, return false; } - // Check action's record-pending meta-attribute (defaults to true) - record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING); + // Check action's PCMK_META_RECORD_PENDING meta-attribute (defaults to true) + record_pending = crm_meta_value(op->params, PCMK_META_RECORD_PENDING); if ((record_pending != NULL) && !crm_is_true(record_pending)) { return false; } @@ -890,34 +889,34 @@ controld_update_resource_history(const char *node_name, } // - update = create_xml_node(NULL, XML_CIB_TAG_STATUS); + update = pcmk__xe_create(NULL, PCMK_XE_STATUS); // - xml = create_xml_node(update, XML_CIB_TAG_STATE); + xml = pcmk__xe_create(update, PCMK__XE_NODE_STATE); if (pcmk__str_eq(node_name, controld_globals.our_nodename, pcmk__str_casei)) { node_id = controld_globals.our_uuid; } else { node_id = node_name; - pcmk__xe_set_bool_attr(xml, XML_NODE_IS_REMOTE, true); + pcmk__xe_set_bool_attr(xml, PCMK_XA_REMOTE_NODE, true); } - crm_xml_add(xml, XML_ATTR_ID, node_id); - crm_xml_add(xml, XML_ATTR_UNAME, node_name); - crm_xml_add(xml, XML_ATTR_ORIGIN, __func__); + crm_xml_add(xml, PCMK_XA_ID, node_id); + crm_xml_add(xml, PCMK_XA_UNAME, node_name); + crm_xml_add(xml, PCMK_XA_CRM_DEBUG_ORIGIN, __func__); // - xml = create_xml_node(xml, XML_CIB_TAG_LRM); - crm_xml_add(xml, XML_ATTR_ID, node_id); + xml = pcmk__xe_create(xml, PCMK__XE_LRM); + crm_xml_add(xml, PCMK_XA_ID, node_id); // - xml = create_xml_node(xml, XML_LRM_TAG_RESOURCES); + xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCES); // - xml = create_xml_node(xml, XML_LRM_TAG_RESOURCE); - crm_xml_add(xml, XML_ATTR_ID, op->rsc_id); - crm_xml_add(xml, XML_AGENT_ATTR_CLASS, rsc->standard); - crm_xml_add(xml, XML_AGENT_ATTR_PROVIDER, rsc->provider); - crm_xml_add(xml, XML_ATTR_TYPE, rsc->type); + xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCE); + crm_xml_add(xml, PCMK_XA_ID, op->rsc_id); + crm_xml_add(xml, PCMK_XA_CLASS, rsc->standard); + crm_xml_add(xml, PCMK_XA_PROVIDER, rsc->provider); + crm_xml_add(xml, PCMK_XA_TYPE, rsc->type); if (lock_time != 0) { /* Actions on a locked resource should either preserve the lock by * recording it with the action result, or clear it. @@ -925,16 +924,15 @@ controld_update_resource_history(const char *node_name, if (!should_preserve_lock(op)) { lock_time = 0; } - crm_xml_add_ll(xml, XML_CONFIG_ATTR_SHUTDOWN_LOCK, - (long long) lock_time); + crm_xml_add_ll(xml, PCMK_OPT_SHUTDOWN_LOCK, (long long) lock_time); } if (op->params != NULL) { container = g_hash_table_lookup(op->params, - CRM_META "_" XML_RSC_ATTR_CONTAINER); + CRM_META "_" PCMK__META_CONTAINER); if (container != NULL) { crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container); - crm_xml_add(xml, XML_RSC_ATTR_CONTAINER, container); + crm_xml_add(xml, PCMK__META_CONTAINER, container); } } @@ -946,7 +944,7 @@ controld_update_resource_history(const char *node_name, * fenced for running a resource it isn't. */ crm_log_xml_trace(update, __func__); - controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, cib_rsc_callback); + controld_update_cib(PCMK_XE_STATUS, update, call_opt, cib_rsc_callback); free_xml(update); } @@ -963,15 +961,15 @@ controld_delete_action_history(const lrmd_event_data_t *op) CRM_CHECK(op != NULL, return); - xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); - crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); - crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); + xml_top = pcmk__xe_create(NULL, PCMK__XE_LRM_RSC_OP); + crm_xml_add_int(xml_top, PCMK__XA_CALL_ID, op->call_id); + crm_xml_add(xml_top, PCMK__XA_TRANSITION_KEY, op->user_data); if (op->interval_ms > 0) { char *op_id = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ - crm_xml_add(xml_top, XML_ATTR_ID, op_id); + crm_xml_add(xml_top, PCMK_XA_ID, op_id); free(op_id); } @@ -979,31 +977,29 @@ controld_delete_action_history(const lrmd_event_data_t *op) op->rsc_id, op->op_type, op->interval_ms, op->call_id); controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, - XML_CIB_TAG_STATUS, xml_top, - cib_none); + PCMK_XE_STATUS, xml_top, cib_none); crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } /* Define xpath to find LRM resource history entry by node and resource */ #define XPATH_HISTORY \ - "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ - "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ - "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ - "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \ - "/" XML_LRM_TAG_RSC_OP + "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \ + "/" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" \ + "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES \ + "/" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" \ + "/" PCMK__XE_LRM_RSC_OP /* ... and also by operation key */ -#define XPATH_HISTORY_ID XPATH_HISTORY \ - "[@" XML_ATTR_ID "='%s']" +#define XPATH_HISTORY_ID XPATH_HISTORY "[@" PCMK_XA_ID "='%s']" /* ... and also by operation key and operation call ID */ #define XPATH_HISTORY_CALL XPATH_HISTORY \ - "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_CALLID "='%d']" + "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_CALL_ID "='%d']" /* ... and also by operation key and original operation key */ #define XPATH_HISTORY_ORIG XPATH_HISTORY \ - "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_TASK_KEY "='%s']" + "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_OPERATION_KEY "='%s']" /*! * \internal diff --git a/daemons/controld/controld_cib.h b/daemons/controld/controld_cib.h index dcc5a48..9a8d6ac 100644 --- a/daemons/controld/controld_cib.h +++ b/daemons/controld/controld_cib.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -16,7 +16,7 @@ #include #include -#include // PCMK__CIB_REQUEST_MODIFY +#include // cib__* #include "controld_globals.h" // controld_globals.cib_conn static inline void @@ -48,7 +48,7 @@ int controld_update_cib(const char *section, xmlNode *data, int options, void *)); unsigned int cib_op_timeout(void); -// Subsections of node_state +// Subsections of PCMK__XE_NODE_STATE enum controld_section_e { controld_section_lrm, controld_section_lrm_unlocked, diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c index 644d686..368659b 100644 --- a/daemons/controld/controld_control.c +++ b/daemons/controld/controld_control.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include #include @@ -27,10 +27,10 @@ static qb_ipcs_service_t *ipcs = NULL; static crm_trigger_t *config_read_trigger = NULL; #if SUPPORT_COROSYNC -extern gboolean crm_connect_corosync(crm_cluster_t * cluster); +extern gboolean crm_connect_corosync(pcmk_cluster_t *cluster); #endif -void crm_shutdown(int nsig); +static void crm_shutdown(int nsig); static gboolean crm_read_options(gpointer user_data); /* A_HA_CONNECT */ @@ -41,25 +41,25 @@ do_ha_control(long long action, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { gboolean registered = FALSE; - static crm_cluster_t *cluster = NULL; + static pcmk_cluster_t *cluster = NULL; if (cluster == NULL) { cluster = pcmk_cluster_new(); } if (action & A_HA_DISCONNECT) { - crm_cluster_disconnect(cluster); + pcmk_cluster_disconnect(cluster); crm_info("Disconnected from the cluster"); controld_set_fsa_input_flags(R_HA_DISCONNECTED); } if (action & A_HA_CONNECT) { - crm_set_status_callback(&peer_update_callback); - crm_set_autoreap(FALSE); + pcmk__cluster_set_status_callback(&peer_update_callback); + pcmk__cluster_set_autoreap(false); #if SUPPORT_COROSYNC - if (is_corosync_cluster()) { + if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { registered = crm_connect_corosync(cluster); } #endif // SUPPORT_COROSYNC @@ -117,7 +117,7 @@ do_shutdown_req(long long action, pcmk__s(controld_globals.dc_name, "not set")); msg = create_request(CRM_OP_SHUTDOWN_REQ, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); - if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) { + if (!pcmk__cluster_send_message(NULL, crm_msg_crmd, msg)) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } free_xml(msg); @@ -241,7 +241,7 @@ crmd_exit(crm_exit_t exit_code) controld_destroy_transition_trigger(); pcmk__client_cleanup(); - crm_peer_destroy(); + pcmk__cluster_destroy_node_caches(); controld_free_fsa_timers(); te_cleanup_stonith_history_sync(NULL, TRUE); @@ -365,7 +365,7 @@ accept_controller_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("Accepting new IPC client connection"); if (pcmk__new_client(c, uid, gid) == NULL) { - return -EIO; + return -ENOMEM; } return 0; } @@ -381,15 +381,17 @@ dispatch_controller_ipc(qb_ipcs_connection_t * c, void *data, size_t size) xmlNode *msg = pcmk__client_data2xml(client, data, &id, &flags); if (msg == NULL) { - pcmk__ipc_send_ack(client, id, flags, "ack", NULL, CRM_EX_PROTOCOL); + pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, + CRM_EX_PROTOCOL); return 0; } - pcmk__ipc_send_ack(client, id, flags, "ack", NULL, CRM_EX_INDETERMINATE); + pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, + CRM_EX_INDETERMINATE); CRM_ASSERT(client->user != NULL); - pcmk__update_acl_user(msg, F_CRM_USER, client->user); + pcmk__update_acl_user(msg, PCMK__XA_CRM_USER, client->user); - crm_xml_add(msg, F_CRM_SYS_FROM, client->id); + crm_xml_add(msg, PCMK__XA_CRM_SYS_FROM, client->id); if (controld_authorize_ipc_message(msg, client, NULL)) { crm_trace("Processing IPC message from client %s", pcmk__client_name(client)); @@ -515,194 +517,6 @@ do_recover(long long action, register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL); } -static pcmk__cluster_option_t controller_options[] = { - /* name, old name, type, allowed values, - * default value, validator, - * short description, - * long description - */ - { - "dc-version", NULL, "string", NULL, PCMK__VALUE_NONE, NULL, - N_("Pacemaker version on cluster node elected Designated Controller (DC)"), - N_("Includes a hash which identifies the exact changeset the code was " - "built from. Used for diagnostic purposes.") - }, - { - "cluster-infrastructure", NULL, "string", NULL, "corosync", NULL, - N_("The messaging stack on which Pacemaker is currently running"), - N_("Used for informational and diagnostic purposes.") - }, - { - "cluster-name", NULL, "string", NULL, NULL, NULL, - N_("An arbitrary name for the cluster"), - N_("This optional value is mostly for users' convenience as desired " - "in administration, but may also be used in Pacemaker " - "configuration rules via the #cluster-name node attribute, and " - "by higher-level tools and resource agents.") - }, - { - XML_CONFIG_ATTR_DC_DEADTIME, NULL, "time", - NULL, "20s", pcmk__valid_interval_spec, - N_("How long to wait for a response from other nodes during start-up"), - N_("The optimal value will depend on the speed and load of your network " - "and the type of switches used.") - }, - { - XML_CONFIG_ATTR_RECHECK, NULL, "time", - N_("Zero disables polling, while positive values are an interval in seconds" - "(unless other units are specified, for example \"5min\")"), - "15min", pcmk__valid_interval_spec, - N_("Polling interval to recheck cluster state and evaluate rules " - "with date specifications"), - N_("Pacemaker is primarily event-driven, and looks ahead to know when to " - "recheck cluster state for failure timeouts and most time-based " - "rules. However, it will also recheck the cluster after this " - "amount of inactivity, to evaluate rules with date specifications " - "and serve as a fail-safe for certain types of scheduler bugs.") - }, - { - "load-threshold", NULL, "percentage", NULL, - "80%", pcmk__valid_percentage, - N_("Maximum amount of system load that should be used by cluster nodes"), - N_("The cluster will slow down its recovery process when the amount of " - "system resources used (currently CPU) approaches this limit"), - }, - { - "node-action-limit", NULL, "integer", NULL, - "0", pcmk__valid_number, - N_("Maximum number of jobs that can be scheduled per node " - "(defaults to 2x cores)") - }, - { XML_CONFIG_ATTR_FENCE_REACTION, NULL, "string", NULL, "stop", NULL, - N_("How a cluster node should react if notified of its own fencing"), - N_("A cluster node may receive notification of its own fencing if fencing " - "is misconfigured, or if fabric fencing is in use that doesn't cut " - "cluster communication. Allowed values are \"stop\" to attempt to " - "immediately stop Pacemaker and stay stopped, or \"panic\" to attempt " - "to immediately reboot the local node, falling back to stop on failure.") - }, - { - XML_CONFIG_ATTR_ELECTION_FAIL, NULL, "time", NULL, - "2min", pcmk__valid_interval_spec, - "*** Advanced Use Only ***", - N_("Declare an election failed if it is not decided within this much " - "time. If you need to adjust this value, it probably indicates " - "the presence of a bug.") - }, - { - XML_CONFIG_ATTR_FORCE_QUIT, NULL, "time", NULL, - "20min", pcmk__valid_interval_spec, - "*** Advanced Use Only ***", - N_("Exit immediately if shutdown does not complete within this much " - "time. If you need to adjust this value, it probably indicates " - "the presence of a bug.") - }, - { - "join-integration-timeout", "crmd-integration-timeout", "time", NULL, - "3min", pcmk__valid_interval_spec, - "*** Advanced Use Only ***", - N_("If you need to adjust this value, it probably indicates " - "the presence of a bug.") - }, - { - "join-finalization-timeout", "crmd-finalization-timeout", "time", NULL, - "30min", pcmk__valid_interval_spec, - "*** Advanced Use Only ***", - N_("If you need to adjust this value, it probably indicates " - "the presence of a bug.") - }, - { - "transition-delay", "crmd-transition-delay", "time", NULL, - "0s", pcmk__valid_interval_spec, - N_("*** Advanced Use Only *** Enabling this option will slow down " - "cluster recovery under all conditions"), - N_("Delay cluster recovery for this much time to allow for additional " - "events to occur. Useful if your configuration is sensitive to " - "the order in which ping updates arrive.") - }, - { - "stonith-watchdog-timeout", NULL, "time", NULL, - "0", controld_verify_stonith_watchdog_timeout, - N_("How long before nodes can be assumed to be safely down when " - "watchdog-based self-fencing via SBD is in use"), - N_("If this is set to a positive value, lost nodes are assumed to " - "self-fence using watchdog-based SBD within this much time. This " - "does not require a fencing resource to be explicitly configured, " - "though a fence_watchdog resource can be configured, to limit use " - "to specific nodes. If this is set to 0 (the default), the cluster " - "will never assume watchdog-based self-fencing. If this is set to a " - "negative value, the cluster will use twice the local value of the " - "`SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, " - "or otherwise treat this as 0. WARNING: When used, this timeout " - "must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use " - "watchdog-based SBD, and Pacemaker will refuse to start on any of " - "those nodes where this is not true for the local value or SBD is " - "not active. When this is set to a negative value, " - "`SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes " - "that use SBD, otherwise data corruption or loss could occur.") - }, - { - "stonith-max-attempts", NULL, "integer", NULL, - "10", pcmk__valid_positive_number, - N_("How many times fencing can fail before it will no longer be " - "immediately re-attempted on a target") - }, - - // Already documented in libpe_status (other values must be kept identical) - { - "no-quorum-policy", NULL, "select", - "stop, freeze, ignore, demote, suicide", "stop", pcmk__valid_quorum, - N_("What to do when the cluster does not have quorum"), NULL - }, - { - XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL, - "false", pcmk__valid_boolean, - N_("Whether to lock resources to a cleanly shut down node"), - N_("When true, resources active on a node when it is cleanly shut down " - "are kept \"locked\" to that node (not allowed to run elsewhere) " - "until they start again on that node after it rejoins (or for at " - "most shutdown-lock-limit, if set). Stonith resources and " - "Pacemaker Remote connections are never locked. Clone and bundle " - "instances and the promoted role of promotable clones are " - "currently never locked, though support could be added in a future " - "release.") - }, - { - XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL, - "0", pcmk__valid_interval_spec, - N_("Do not lock resources to a cleanly shut down node longer than " - "this"), - N_("If shutdown-lock is true and this is set to a nonzero time " - "duration, shutdown locks will expire after this much time has " - "passed since the shutdown was initiated, even if the node has not " - "rejoined.") - }, - { - XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL, - "0", pcmk__valid_interval_spec, - N_("How long to wait for a node that has joined the cluster to join " - "the controller process group"), - N_("Fence nodes that do not join the controller process group within " - "this much time after joining the cluster, to allow the cluster " - "to continue managing resources. A value of 0 means never fence " - "pending nodes. Setting the value to 2h means fence nodes after " - "2 hours.") - }, -}; - -void -crmd_metadata(void) -{ - const char *desc_short = "Pacemaker controller options"; - const char *desc_long = "Cluster options used by Pacemaker's controller"; - - gchar *s = pcmk__format_option_metadata("pacemaker-controld", desc_short, - desc_long, controller_options, - PCMK__NELEM(controller_options)); - printf("%s", s); - g_free(s); -} - static void config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { @@ -726,49 +540,62 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void } crmconfig = output; - if ((crmconfig != NULL) - && !pcmk__xe_is(crmconfig, XML_CIB_TAG_CRMCONFIG)) { - crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG); + if ((crmconfig != NULL) && !pcmk__xe_is(crmconfig, PCMK_XE_CRM_CONFIG)) { + crmconfig = pcmk__xe_first_child(crmconfig, PCMK_XE_CRM_CONFIG, NULL, + NULL); } if (!crmconfig) { fsa_data_t *msg_data = NULL; - crm_err("Local CIB query for " XML_CIB_TAG_CRMCONFIG " section failed"); + crm_err("Local CIB query for " PCMK_XE_CRM_CONFIG " section failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); goto bail; } crm_debug("Call %d : Parsing CIB options", call_id); config_hash = pcmk__strkey_table(free, free); - pe_unpack_nvpairs(crmconfig, crmconfig, XML_CIB_TAG_PROPSET, NULL, - config_hash, CIB_OPTIONS_FIRST, FALSE, now, NULL); + pe_unpack_nvpairs(crmconfig, crmconfig, PCMK_XE_CLUSTER_PROPERTY_SET, NULL, + config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, FALSE, now, + NULL); // Validate all options, and use defaults if not already present in hash - pcmk__validate_cluster_options(config_hash, controller_options, - PCMK__NELEM(controller_options)); + pcmk__validate_cluster_options(config_hash); - value = g_hash_table_lookup(config_hash, "no-quorum-policy"); - if (pcmk__str_eq(value, "suicide", pcmk__str_casei) && pcmk__locate_sbd()) { + /* Validate the watchdog timeout in the context of the local node + * environment. If invalid, the controller will exit with a fatal error. + * + * We do this via a wrapper in the controller, so that we call + * pcmk__valid_stonith_watchdog_timeout() only if watchdog fencing is + * enabled for the local node. Otherwise, we may exit unnecessarily. + * + * A validator function in libcrmcommon can't act as such a wrapper, because + * it doesn't have a stonith API connection or the local node name. + */ + value = g_hash_table_lookup(config_hash, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT); + controld_verify_stonith_watchdog_timeout(value); + + value = g_hash_table_lookup(config_hash, PCMK_OPT_NO_QUORUM_POLICY); + if (pcmk__str_eq(value, PCMK_VALUE_FENCE_LEGACY, pcmk__str_casei) + && (pcmk__locate_sbd() != 0)) { controld_set_global_flags(controld_no_quorum_suicide); } - value = g_hash_table_lookup(config_hash, XML_CONFIG_ATTR_SHUTDOWN_LOCK); + value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK); if (crm_is_true(value)) { controld_set_global_flags(controld_shutdown_lock_enabled); } else { controld_clear_global_flags(controld_shutdown_lock_enabled); } - value = g_hash_table_lookup(config_hash, - XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT); - controld_globals.shutdown_lock_limit = crm_parse_interval_spec(value) - / 1000; + value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT); + pcmk_parse_interval_spec(value, &controld_globals.shutdown_lock_limit); + controld_globals.shutdown_lock_limit /= 1000; - value = g_hash_table_lookup(config_hash, - XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT); - controld_globals.node_pending_timeout = crm_parse_interval_spec(value) / 1000; + value = g_hash_table_lookup(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT); + pcmk_parse_interval_spec(value, &controld_globals.node_pending_timeout); + controld_globals.node_pending_timeout /= 1000; - value = g_hash_table_lookup(config_hash, "cluster-name"); + value = g_hash_table_lookup(config_hash, PCMK_OPT_CLUSTER_NAME); pcmk__str_update(&(controld_globals.cluster_name), value); // Let subcomponents initialize their own static variables @@ -777,7 +604,7 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void controld_configure_fsa_timers(config_hash); controld_configure_throttle(config_hash); - alerts = first_named_child(output, XML_CIB_TAG_ALERTS); + alerts = pcmk__xe_first_child(output, PCMK_XE_ALERTS, NULL, NULL); crmd_unpack_alerts(alerts); controld_set_fsa_input_flags(R_READ_CONFIG); @@ -809,8 +636,8 @@ crm_read_options(gpointer user_data) { cib_t *cib_conn = controld_globals.cib_conn; int call_id = cib_conn->cmds->query(cib_conn, - "//" XML_CIB_TAG_CRMCONFIG - " | //" XML_CIB_TAG_ALERTS, + "//" PCMK_XE_CRM_CONFIG + " | //" PCMK_XE_ALERTS, NULL, cib_xpath|cib_scope_local); fsa_register_cib_callback(call_id, NULL, config_query_callback); @@ -829,7 +656,7 @@ do_read_config(long long action, controld_trigger_config(); } -void +static void crm_shutdown(int nsig) { const char *value = NULL; @@ -856,9 +683,7 @@ crm_shutdown(int nsig) * config_query_callback() has been run at least once, it doesn't look like * anything could have changed the timer period since then. */ - value = pcmk__cluster_option(NULL, controller_options, - PCMK__NELEM(controller_options), - XML_CONFIG_ATTR_FORCE_QUIT); - default_period_ms = crm_parse_interval_spec(value); + value = pcmk__cluster_option(NULL, PCMK_OPT_SHUTDOWN_ESCALATION); + pcmk_parse_interval_spec(value, &default_period_ms); controld_shutdown_start_countdown(default_period_ms); } diff --git a/daemons/controld/controld_corosync.c b/daemons/controld/controld_corosync.c index b69e821..d0652e4 100644 --- a/daemons/controld/controld_corosync.c +++ b/daemons/controld/controld_corosync.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -31,14 +31,14 @@ crmd_cs_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, { uint32_t kind = 0; const char *from = NULL; - char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); + char *data = pcmk__cpg_message_data(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { crm_node_t *peer = NULL; - xmlNode *xml = string2xml(data); + xmlNode *xml = pcmk__xml_parse(data); if (xml == NULL) { crm_err("Could not parse message content (%d): %.100s", kind, data); @@ -46,10 +46,9 @@ crmd_cs_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, return; } - crm_xml_add(xml, F_ORIG, from); - /* crm_xml_add_int(xml, F_SEQ, wrapper->id); Fake? */ + crm_xml_add(xml, PCMK__XA_SRC, from); - peer = crm_get_peer(0, from); + peer = pcmk__get_node(0, from, NULL, pcmk__node_search_cluster_member); if (!pcmk_is_set(peer->processes, crm_proc_cpg)) { /* If we can still talk to our peer process on that node, * then it must be part of the corosync membership @@ -57,7 +56,7 @@ crmd_cs_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, crm_warn("Receiving messages from a node we think is dead: %s[%d]", peer->uname, peer->id); crm_update_peer_proc(__func__, peer, crm_proc_cpg, - ONLINESTATUS); + PCMK_VALUE_ONLINE); } crmd_ha_msg_filter(xml); free_xml(xml); @@ -119,8 +118,8 @@ cpg_membership_callback(cpg_handle_t handle, const struct cpg_name *cpg_name, if (controld_globals.dc_name != NULL) { crm_node_t *peer = NULL; - peer = pcmk__search_cluster_node_cache(0, controld_globals.dc_name, - NULL); + peer = pcmk__search_node_caches(0, controld_globals.dc_name, + pcmk__node_search_cluster_member); if (peer != NULL) { for (int i = 0; i < left_list_entries; ++i) { if (left_list[i].nodeid == peer->id) { @@ -132,25 +131,26 @@ cpg_membership_callback(cpg_handle_t handle, const struct cpg_name *cpg_name, } // Process the change normally, which will call the peer callback as needed - pcmk_cpg_membership(handle, cpg_name, member_list, member_list_entries, - left_list, left_list_entries, - joined_list, joined_list_entries); + pcmk__cpg_confchg_cb(handle, cpg_name, member_list, member_list_entries, + left_list, left_list_entries, + joined_list, joined_list_entries); controld_clear_global_flags(controld_dc_left); } -extern gboolean crm_connect_corosync(crm_cluster_t * cluster); +extern gboolean crm_connect_corosync(pcmk_cluster_t *cluster); gboolean -crm_connect_corosync(crm_cluster_t * cluster) +crm_connect_corosync(pcmk_cluster_t *cluster) { - if (is_corosync_cluster()) { - crm_set_status_callback(&peer_update_callback); - cluster->cpg.cpg_deliver_fn = crmd_cs_dispatch; - cluster->cpg.cpg_confchg_fn = cpg_membership_callback; - cluster->destroy = crmd_cs_destroy; + if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { + pcmk__cluster_set_status_callback(&peer_update_callback); - if (crm_cluster_connect(cluster)) { + pcmk_cluster_set_destroy_fn(cluster, crmd_cs_destroy); + pcmk_cpg_set_deliver_fn(cluster, crmd_cs_dispatch); + pcmk_cpg_set_confchg_fn(cluster, cpg_membership_callback); + + if (pcmk_cluster_connect(cluster) == pcmk_rc_ok) { pcmk__corosync_quorum_connect(crmd_quorum_callback, crmd_cs_destroy); return TRUE; diff --git a/daemons/controld/controld_election.c b/daemons/controld/controld_election.c index 70ffecc..1d4ff25 100644 --- a/daemons/controld/controld_election.c +++ b/daemons/controld/controld_election.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -44,10 +43,11 @@ controld_election_init(const char *uname) void controld_configure_election(GHashTable *options) { - const char *value = NULL; + const char *value = g_hash_table_lookup(options, PCMK_OPT_ELECTION_TIMEOUT); + guint interval_ms = 0U; - value = g_hash_table_lookup(options, XML_CONFIG_ATTR_ELECTION_FAIL); - election_timeout_set_period(fsa_election, crm_parse_interval_spec(value)); + pcmk_parse_interval_spec(value, &interval_ms); + election_timeout_set_period(fsa_election, interval_ms); } void @@ -201,7 +201,7 @@ feature_update_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, vo #define dc_takeover_update_attr(name, value) do { \ cib__update_node_attr(controld_globals.logger_out, \ controld_globals.cib_conn, cib_none, \ - XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, \ + PCMK_XE_CRM_CONFIG, NULL, NULL, NULL, NULL, \ name, value, NULL, NULL); \ } while (0) @@ -213,7 +213,8 @@ do_dc_takeover(long long action, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { xmlNode *cib = NULL; - const char *cluster_type = name_for_cluster_type(get_cluster_type()); + const enum pcmk_cluster_layer cluster_layer = pcmk_get_cluster_layer(); + const char *cluster_layer_s = pcmk_cluster_layer_text(cluster_layer); pid_t watchdog = pcmk__locate_sbd(); crm_info("Taking over DC status for this partition"); @@ -226,20 +227,23 @@ do_dc_takeover(long long action, controld_globals.cib_conn->cmds->set_primary(controld_globals.cib_conn, cib_scope_local); - cib = create_xml_node(NULL, XML_TAG_CIB); - crm_xml_add(cib, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); - controld_update_cib(XML_TAG_CIB, cib, cib_none, feature_update_callback); + cib = pcmk__xe_create(NULL, PCMK_XE_CIB); + crm_xml_add(cib, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); + controld_update_cib(PCMK_XE_CIB, cib, cib_none, feature_update_callback); - dc_takeover_update_attr(XML_ATTR_HAVE_WATCHDOG, pcmk__btoa(watchdog)); - dc_takeover_update_attr("dc-version", PACEMAKER_VERSION "-" BUILD_VERSION); - dc_takeover_update_attr("cluster-infrastructure", cluster_type); + dc_takeover_update_attr(PCMK_OPT_HAVE_WATCHDOG, pcmk__btoa(watchdog)); + dc_takeover_update_attr(PCMK_OPT_DC_VERSION, + PACEMAKER_VERSION "-" BUILD_VERSION); + dc_takeover_update_attr(PCMK_OPT_CLUSTER_INFRASTRUCTURE, cluster_layer_s); #if SUPPORT_COROSYNC - if ((controld_globals.cluster_name == NULL) && is_corosync_cluster()) { + if ((controld_globals.cluster_name == NULL) + && (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync)) { + char *cluster_name = pcmk__corosync_cluster_name(); if (cluster_name != NULL) { - dc_takeover_update_attr("cluster-name", cluster_name); + dc_takeover_update_attr(PCMK_OPT_CLUSTER_NAME, cluster_name); } free(cluster_name); } @@ -265,13 +269,15 @@ do_dc_release(long long action, crm_info("DC role released"); if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { xmlNode *update = NULL; - crm_node_t *node = crm_get_peer(0, controld_globals.our_nodename); + crm_node_t *node = + pcmk__get_node(0, controld_globals.our_nodename, + NULL, pcmk__node_search_cluster_member); pcmk__update_peer_expected(__func__, node, CRMD_JOINSTATE_DOWN); update = create_node_state_update(node, node_update_expected, NULL, __func__); /* Don't need a based response because controld will stop. */ - fsa_cib_anon_update_discard_reply(XML_CIB_TAG_STATUS, update); + fsa_cib_anon_update_discard_reply(PCMK_XE_STATUS, update); free_xml(update); } register_fsa_input(C_FSA_INTERNAL, I_RELEASE_SUCCESS, NULL); @@ -280,6 +286,5 @@ do_dc_release(long long action, crm_err("Unknown DC action %s", fsa_action2string(action)); } - crm_trace("Am I still the DC? %s", AM_I_DC ? XML_BOOLEAN_YES : XML_BOOLEAN_NO); - + crm_trace("Am I still the DC? %s", pcmk__btoa(AM_I_DC)); } diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c index 480d37d..917c8c0 100644 --- a/daemons/controld/controld_execd.c +++ b/daemons/controld/controld_execd.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -17,7 +17,6 @@ #include #include // lrmd_event_data_t, lrmd_rsc_info_t, etc. #include -#include #include #include #include @@ -68,7 +67,7 @@ static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { - g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); + pcmk__insert_dup(user_data, (const char *) key, (const char *) value); } } @@ -76,7 +75,7 @@ static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { - g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); + pcmk__insert_dup(user_data, (const char *) key, (const char *) value); } } @@ -175,14 +174,14 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_ entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { - entry = calloc(1, sizeof(rsc_history_t)); - entry->id = strdup(op->rsc_id); + entry = pcmk__assert_alloc(1, sizeof(rsc_history_t)); + entry->id = pcmk__str_copy(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; - entry->rsc.type = strdup(rsc->type); - entry->rsc.standard = strdup(rsc->standard); - pcmk__str_update(&entry->rsc.provider, rsc->provider); + entry->rsc.type = pcmk__str_copy(rsc->type); + entry->rsc.standard = pcmk__str_copy(rsc->standard); + entry->rsc.provider = pcmk__str_copy(rsc->provider); } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); @@ -539,18 +538,21 @@ build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; - xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); + xmlNode *xml_rsc = pcmk__xe_create(rsc_list, PCMK__XE_LRM_RESOURCE); - crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id); - crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type); - crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.standard); - crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider); + crm_xml_add(xml_rsc, PCMK_XA_ID, entry->id); + crm_xml_add(xml_rsc, PCMK_XA_TYPE, entry->rsc.type); + crm_xml_add(xml_rsc, PCMK_XA_CLASS, entry->rsc.standard); + crm_xml_add(xml_rsc, PCMK_XA_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { - const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); + static const char *name = CRM_META "_" PCMK__META_CONTAINER; + const char *container = g_hash_table_lookup(entry->last->params, + name); + if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); - crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container); + crm_xml_add(xml_rsc, PCMK__META_CONTAINER, container); } } controld_add_resource_history_xml(xml_rsc, &(entry->rsc), entry->failed, @@ -581,7 +583,7 @@ controld_query_executor_state(void) return NULL; } - peer = crm_get_peer_full(0, lrm_state->node_name, CRM_GET_PEER_ANY); + peer = pcmk__get_node(0, lrm_state->node_name, NULL, pcmk__node_search_any); CRM_CHECK(peer != NULL, return NULL); xml_state = create_node_state_update(peer, @@ -591,9 +593,9 @@ controld_query_executor_state(void) return NULL; } - xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); - crm_xml_add(xml_data, XML_ATTR_ID, peer->uuid); - rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); + xml_data = pcmk__xe_create(xml_state, PCMK__XE_LRM); + crm_xml_add(xml_data, PCMK_XA_ID, peer->uuid); + rsc_list = pcmk__xe_create(xml_data, PCMK__XE_LRM_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); @@ -651,7 +653,7 @@ controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id) crm_debug("Triggering a refresh after %s cleaned %s", from_sys, rsc_id); cib__update_node_attr(controld_globals.logger_out, controld_globals.cib_conn, cib_none, - XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, + PCMK_XE_CRM_CONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, NULL, NULL); free(now_s); } @@ -661,8 +663,8 @@ static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; - const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); - const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); + const char *from_sys = crm_element_value(input->msg, PCMK__XA_CRM_SYS_FROM); + const char *from_host = crm_element_value(input->msg, PCMK__XA_SRC); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, @@ -711,7 +713,7 @@ delete_rsc_entry(lrm_state_t *lrm_state, ha_msg_input_t *input, CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { - char *rsc_id_copy = strdup(rsc_id); + char *rsc_id_copy = pcmk__str_copy(rsc_id); if (rsc_iter) { g_hash_table_iter_remove(rsc_iter); @@ -907,7 +909,7 @@ static int get_lrm_resource(lrm_state_t *lrm_state, const xmlNode *rsc_xml, gboolean do_create, lrmd_rsc_info_t **rsc_info) { - const char *id = ID(rsc_xml); + const char *id = pcmk__xe_id(rsc_xml); CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL); CRM_CHECK(id, return -EINVAL); @@ -921,7 +923,7 @@ get_lrm_resource(lrm_state_t *lrm_state, const xmlNode *rsc_xml, // If resource isn't known by ID, try clone name, if provided if (!*rsc_info) { - const char *long_id = crm_element_value(rsc_xml, XML_ATTR_ID_LONG); + const char *long_id = crm_element_value(rsc_xml, PCMK__XA_LONG_ID); if (long_id) { *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0); @@ -929,9 +931,9 @@ get_lrm_resource(lrm_state_t *lrm_state, const xmlNode *rsc_xml, } if ((*rsc_info == NULL) && do_create) { - const char *class = crm_element_value(rsc_xml, XML_AGENT_ATTR_CLASS); - const char *provider = crm_element_value(rsc_xml, XML_AGENT_ATTR_PROVIDER); - const char *type = crm_element_value(rsc_xml, XML_ATTR_TYPE); + const char *class = crm_element_value(rsc_xml, PCMK_XA_CLASS); + const char *provider = crm_element_value(rsc_xml, PCMK_XA_PROVIDER); + const char *type = crm_element_value(rsc_xml, PCMK_XA_TYPE); int rc; crm_trace("Registering resource %s with the executor", id); @@ -979,10 +981,10 @@ delete_resource(lrm_state_t *lrm_state, const char *id, lrmd_rsc_info_t *rsc, crm_info("Deletion of resource '%s' from executor is pending", id); if (request) { struct pending_deletion_op_s *op = NULL; - char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE); + char *ref = crm_element_value_copy(request->msg, PCMK_XA_REFERENCE); - op = calloc(1, sizeof(struct pending_deletion_op_s)); - op->rsc = strdup(rsc->id); + op = pcmk__assert_alloc(1, sizeof(struct pending_deletion_op_s)); + op->rsc = pcmk__str_copy(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } @@ -1096,25 +1098,26 @@ synthesize_lrmd_failure(lrm_state_t *lrm_state, const xmlNode *action, const char *exit_reason) { lrmd_event_data_t *op = NULL; - const char *operation = crm_element_value(action, XML_LRM_ATTR_TASK); - const char *target_node = crm_element_value(action, XML_LRM_ATTR_TARGET); - xmlNode *xml_rsc = find_xml_node(action, XML_CIB_TAG_RESOURCE, TRUE); + const char *operation = crm_element_value(action, PCMK_XA_OPERATION); + const char *target_node = crm_element_value(action, PCMK__META_ON_NODE); + xmlNode *xml_rsc = pcmk__xe_first_child(action, PCMK_XE_PRIMITIVE, NULL, + NULL); - if ((xml_rsc == NULL) || (ID(xml_rsc) == NULL)) { + if ((xml_rsc == NULL) || (pcmk__xe_id(xml_rsc) == NULL)) { /* @TODO Should we do something else, like direct ack? */ crm_info("Can't fake %s failure (%d) on %s without resource configuration", - crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, + crm_element_value(action, PCMK__XA_OPERATION_KEY), rc, target_node); return; } else if(operation == NULL) { /* This probably came from crm_resource -C, nothing to do */ crm_info("Can't fake %s failure (%d) on %s without operation", - ID(xml_rsc), rc, target_node); + pcmk__xe_id(xml_rsc), rc, target_node); return; } - op = construct_op(lrm_state, action, ID(xml_rsc), operation); + op = construct_op(lrm_state, action, pcmk__xe_id(xml_rsc), operation); if (pcmk__str_eq(operation, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { // Notifications can't fail @@ -1146,7 +1149,7 @@ lrm_op_target(const xmlNode *xml) const char *target = NULL; if (xml) { - target = crm_element_value(xml, XML_LRM_ATTR_TARGET); + target = crm_element_value(xml, PCMK__META_ON_NODE); } if (target == NULL) { target = controld_globals.our_nodename; @@ -1160,7 +1163,7 @@ fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, { lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; - xmlNode *xml_rsc = find_xml_node(xml, XML_CIB_TAG_RESOURCE, TRUE); + xmlNode *xml_rsc = pcmk__xe_first_child(xml, PCMK_XE_PRIMITIVE, NULL, NULL); CRM_CHECK(xml_rsc != NULL, return); @@ -1172,18 +1175,20 @@ fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, * and pass that event to the executor client callback so it will be * processed as if it came from the executor. */ - op = construct_op(lrm_state, xml, ID(xml_rsc), "asyncmon"); + op = construct_op(lrm_state, xml, pcmk__xe_id(xml_rsc), "asyncmon"); free((char*) op->user_data); op->user_data = NULL; op->interval_ms = 0; if (user_name && !pcmk__is_privileged(user_name)) { - crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); + crm_err("%s does not have permission to fail %s", + user_name, pcmk__xe_id(xml_rsc)); fake_op_status(lrm_state, op, PCMK_EXEC_ERROR, PCMK_OCF_INSUFFICIENT_PRIV, "Unprivileged user cannot fail resources"); - controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc)); + controld_ack_event_directly(from_host, from_sys, NULL, op, + pcmk__xe_id(xml_rsc)); lrmd_free_event(op); return; } @@ -1204,7 +1209,8 @@ fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, "Cannot fail unknown resource"); } - controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc)); + controld_ack_event_directly(from_host, from_sys, NULL, op, + pcmk__xe_id(xml_rsc)); lrmd_free_event(op); } @@ -1242,16 +1248,17 @@ static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, const char *op_task = NULL; guint interval_ms = 0; gboolean in_progress = FALSE; - xmlNode *params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); + xmlNode *params = pcmk__xe_first_child(input->xml, PCMK__XE_ATTRIBUTES, + NULL, NULL); CRM_CHECK(params != NULL, return FALSE); - meta_key = crm_meta_name(XML_LRM_ATTR_TASK); + meta_key = crm_meta_name(PCMK_XA_OPERATION); op_task = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, return FALSE); - meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); + meta_key = crm_meta_name(PCMK_META_INTERVAL); if (crm_element_value_ms(params, meta_key, &interval_ms) != pcmk_ok) { free(meta_key); return FALSE; @@ -1260,7 +1267,7 @@ static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, op_key = pcmk__op_key(rsc->id, op_task, interval_ms); - meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); + meta_key = crm_meta_name(PCMK__XA_CALL_ID); call_id = crm_element_value(params, meta_key); free(meta_key); @@ -1302,7 +1309,8 @@ static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, * not abcdaa8, they will time out waiting for the ack (no * released versions of Pacemaker are affected). */ - const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION); + const char *peer_version = crm_element_value(params, + PCMK_XA_CRM_FEATURE_SET); if (compare_version(peer_version, "3.0.8") <= 0) { crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)", @@ -1359,9 +1367,8 @@ new_metadata_cb_data(lrmd_rsc_info_t *rsc, xmlNode *input_xml) { struct metadata_cb_data *data = NULL; - data = calloc(1, sizeof(struct metadata_cb_data)); - CRM_ASSERT(data != NULL); - data->input_xml = copy_xml(input_xml); + data = pcmk__assert_alloc(1, sizeof(struct metadata_cb_data)); + data->input_xml = pcmk__xml_copy(NULL, input_xml); data->rsc = lrmd_copy_rsc_info(rsc); return data; } @@ -1433,11 +1440,11 @@ do_lrm_invoke(long long action, } CRM_ASSERT(lrm_state != NULL); - user_name = pcmk__update_acl_user(input->msg, F_CRM_USER, NULL); - crm_op = crm_element_value(input->msg, F_CRM_TASK); - from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); + user_name = pcmk__update_acl_user(input->msg, PCMK__XA_CRM_USER, NULL); + crm_op = crm_element_value(input->msg, PCMK__XA_CRM_TASK); + from_sys = crm_element_value(input->msg, PCMK__XA_CRM_SYS_FROM); if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { - from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); + from_host = crm_element_value(input->msg, PCMK__XA_SRC); } if (pcmk__str_eq(crm_op, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { @@ -1447,7 +1454,7 @@ do_lrm_invoke(long long action, operation = PCMK_ACTION_DELETE; } else if (input->xml != NULL) { - operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); + operation = crm_element_value(input->xml, PCMK_XA_OPERATION); } CRM_CHECK(!pcmk__str_empty(crm_op) || !pcmk__str_empty(operation), return); @@ -1471,7 +1478,7 @@ do_lrm_invoke(long long action, // @COMPAT DCs <1.1.14 in a rolling upgrade might schedule this op } else if (pcmk__str_eq(operation, CRM_OP_PROBED, pcmk__str_none)) { - update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, + update_attrd(lrm_state->node_name, CRM_OP_PROBED, PCMK_VALUE_TRUE, user_name, is_remote_node); } else if (pcmk__str_eq(crm_op, CRM_OP_REPROBE, pcmk__str_none) @@ -1480,20 +1487,21 @@ do_lrm_invoke(long long action, if (input->xml != NULL) { // For CRM_OP_REPROBE, a NULL target means we're targeting all nodes - raw_target = crm_element_value(input->xml, XML_LRM_ATTR_TARGET); + raw_target = crm_element_value(input->xml, PCMK__META_ON_NODE); } handle_reprobe_op(lrm_state, from_sys, from_host, user_name, is_remote_node, (raw_target == NULL)); } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; - xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); + xmlNode *xml_rsc = pcmk__xe_first_child(input->xml, PCMK_XE_PRIMITIVE, + NULL, NULL); gboolean create_rsc = !pcmk__str_eq(operation, PCMK_ACTION_DELETE, pcmk__str_none); int rc; // We can't return anything meaningful without a resource ID - CRM_CHECK(xml_rsc && ID(xml_rsc), return); + CRM_CHECK((xml_rsc != NULL) && (pcmk__xe_id(xml_rsc) != NULL), return); rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc); if (rc == -ENOTCONN) { @@ -1509,15 +1517,15 @@ do_lrm_invoke(long long action, */ crm_notice("Not registering resource '%s' for a %s event " CRM_XS " get-rc=%d (%s) transition-key=%s", - ID(xml_rsc), operation, - rc, pcmk_strerror(rc), ID(input->xml)); - delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, - user_name, true); + pcmk__xe_id(xml_rsc), operation, + rc, pcmk_strerror(rc), pcmk__xe_id(input->xml)); + delete_rsc_entry(lrm_state, input, pcmk__xe_id(xml_rsc), NULL, + pcmk_ok, user_name, true); return; } else if (rc == -EINVAL) { // Resource operation on malformed resource - crm_err("Invalid resource definition for %s", ID(xml_rsc)); + crm_err("Invalid resource definition for %s", pcmk__xe_id(xml_rsc)); crm_log_xml_warn(input->msg, "invalid resource"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR, PCMK_OCF_NOT_CONFIGURED, // fatal error @@ -1528,7 +1536,7 @@ do_lrm_invoke(long long action, // Error communicating with the executor crm_err("Could not register resource '%s' with executor: %s " CRM_XS " rc=%d", - ID(xml_rsc), pcmk_strerror(rc), rc); + pcmk__xe_id(xml_rsc), pcmk_strerror(rc), rc); crm_log_xml_warn(input->msg, "failed registration"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR, PCMK_OCF_INVALID_PARAM, // hard error @@ -1632,30 +1640,30 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, */ op->params = pcmk__strkey_table(free, free); - g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); + pcmk__insert_dup(op->params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); - g_hash_table_remove(params, CRM_META "_op_target_rc"); + g_hash_table_remove(params, CRM_META "_" PCMK__META_OP_TARGET_RC); - op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY); + op_delay = crm_meta_value(params, PCMK_META_START_DELAY); pcmk__scan_min_int(op_delay, &op->start_delay, 0); - op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT); + op_timeout = crm_meta_value(params, PCMK_META_TIMEOUT); pcmk__scan_min_int(op_timeout, &op->timeout, 0); - if (pcmk__guint_from_hash(params, CRM_META "_" XML_LRM_ATTR_INTERVAL_MS, 0, + if (pcmk__guint_from_hash(params, CRM_META "_" PCMK_META_INTERVAL, 0, &(op->interval_ms)) != pcmk_rc_ok) { op->interval_ms = 0; } /* Use pcmk_monitor_timeout instead of meta timeout for stonith recurring monitor, if set */ - primitive = find_xml_node(rsc_op, XML_CIB_TAG_RESOURCE, FALSE); - class = crm_element_value(primitive, XML_AGENT_ATTR_CLASS); + primitive = pcmk__xe_first_child(rsc_op, PCMK_XE_PRIMITIVE, NULL, NULL); + class = crm_element_value(primitive, PCMK_XA_CLASS); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params) && pcmk__str_eq(operation, PCMK_ACTION_MONITOR, pcmk__str_casei) @@ -1663,7 +1671,9 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, op_timeout = g_hash_table_lookup(params, "pcmk_monitor_timeout"); if (op_timeout != NULL) { - op->timeout = crm_get_msec(op_timeout); + long long timeout_ms = crm_get_msec(op_timeout); + + op->timeout = (int) QB_MIN(timeout_ms, INT_MAX); } } @@ -1701,10 +1711,10 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, op->start_delay = 0; } - transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); + transition = crm_element_value(rsc_op, PCMK__XA_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); - op->user_data = strdup(transition); + op->user_data = pcmk__str_copy(transition); if (op->interval_ms != 0) { if (pcmk__strcase_any_of(operation, PCMK_ACTION_START, PCMK_ACTION_STOP, @@ -1745,23 +1755,25 @@ controld_ack_event_directly(const char *to_host, const char *to_sys, CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { + // op->rsc_id is a (const char *) but lrmd_free_event() frees it CRM_ASSERT(rsc_id != NULL); - op->rsc_id = strdup(rsc_id); + op->rsc_id = pcmk__str_copy(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } - peer = crm_get_peer(0, controld_globals.our_nodename); + peer = pcmk__get_node(0, controld_globals.our_nodename, NULL, + pcmk__node_search_cluster_member); update = create_node_state_update(peer, node_update_none, NULL, __func__); - iter = create_xml_node(update, XML_CIB_TAG_LRM); - crm_xml_add(iter, XML_ATTR_ID, controld_globals.our_uuid); - iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); - iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); + iter = pcmk__xe_create(update, PCMK__XE_LRM); + crm_xml_add(iter, PCMK_XA_ID, controld_globals.our_uuid); + iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCES); + iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCE); - crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); + crm_xml_add(iter, PCMK_XA_ID, op->rsc_id); controld_add_resource_history_xml(iter, rsc, op, controld_globals.our_nodename); @@ -1771,7 +1783,7 @@ controld_ack_event_directly(const char *to_host, const char *to_sys, crm_debug("ACK'ing resource op " PCMK__OP_FMT " from %s: %s", op->rsc_id, op->op_type, op->interval_ms, op->user_data, - crm_element_value(reply, XML_ATTR_REFERENCE)); + crm_element_value(reply, PCMK_XA_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); @@ -1916,10 +1928,10 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, CRM_CHECK((rsc != NULL) && (msg != NULL), return); - operation = crm_element_value(msg, XML_LRM_ATTR_TASK); + operation = crm_element_value(msg, PCMK_XA_OPERATION); CRM_CHECK(!pcmk__str_empty(operation), return); - transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); + transition = crm_element_value(msg, PCMK__XA_TRANSITION_KEY); if (pcmk__str_empty(transition)) { crm_log_xml_err(msg, "Missing transition number"); } @@ -1982,8 +1994,8 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, crm_notice("Discarding attempt to perform action %s on %s in state %s " "(shutdown=%s)", operation, rsc->id, fsa_state2string(controld_globals.fsa_state), - pcmk__btoa(pcmk_is_set(controld_globals.fsa_input_register, - R_SHUTDOWN))); + pcmk__flag_text(controld_globals.fsa_input_register, + R_SHUTDOWN)); lrmd__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_INVALID, nack_reason); @@ -2013,17 +2025,17 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, char *call_id_s = make_stop_id(rsc->id, call_id); active_op_t *pending = NULL; - pending = calloc(1, sizeof(active_op_t)); + pending = pcmk__assert_alloc(1, sizeof(active_op_t)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval_ms = op->interval_ms; - pending->op_type = strdup(operation); - pending->op_key = strdup(op_id); - pending->rsc_id = strdup(rsc->id); + pending->op_type = pcmk__str_copy(operation); + pending->op_key = pcmk__str_copy(op_id); + pending->rsc_id = pcmk__str_copy(rsc->id); pending->start_time = time(NULL); - pcmk__str_update(&pending->user_data, op->user_data); - if (crm_element_value_epoch(msg, XML_CONFIG_ATTR_SHUTDOWN_LOCK, + pending->user_data = pcmk__str_copy(op->user_data); + if (crm_element_value_epoch(msg, PCMK_OPT_SHUTDOWN_LOCK, &(pending->lock_time)) != pcmk_ok) { pending->lock_time = 0; } @@ -2082,7 +2094,7 @@ unescape_newlines(const char *string) return NULL; } - ret = strdup(string); + ret = pcmk__str_copy(string); pch = strstr(ret, escaped_newline); while (pch != NULL) { /* Replace newline escape pattern with actual newline (and a space so we @@ -2249,11 +2261,12 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); } if ((rsc == NULL) && action_xml) { - xmlNode *xml = find_xml_node(action_xml, XML_CIB_TAG_RESOURCE, TRUE); + xmlNode *xml = pcmk__xe_first_child(action_xml, PCMK_XE_PRIMITIVE, NULL, + NULL); - const char *standard = crm_element_value(xml, XML_AGENT_ATTR_CLASS); - const char *provider = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER); - const char *type = crm_element_value(xml, XML_ATTR_TYPE); + const char *standard = crm_element_value(xml, PCMK_XA_CLASS); + const char *provider = crm_element_value(xml, PCMK_XA_PROVIDER); + const char *type = crm_element_value(xml, PCMK_XA_TYPE); if (standard && type) { crm_info("%s agent information not cached, using %s%s%s:%s from action XML", @@ -2270,7 +2283,7 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, if (lrm_state) { node_name = lrm_state->node_name; } else if (action_xml) { - node_name = crm_element_value(action_xml, XML_LRM_ATTR_TARGET); + node_name = crm_element_value(action_xml, PCMK__META_ON_NODE); } if(pending == NULL) { diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c index b90cc5e..1919684 100644 --- a/daemons/controld/controld_execd_state.c +++ b/daemons/controld/controld_execd_state.c @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 the Pacemaker project contributors + * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -12,8 +12,8 @@ #include #include -#include #include +#include #include #include #include @@ -116,12 +116,9 @@ lrm_state_create(const char *node_name) return NULL; } - state = calloc(1, sizeof(lrm_state_t)); - if (!state) { - return NULL; - } + state = pcmk__assert_alloc(1, sizeof(lrm_state_t)); - state->node_name = strdup(node_name); + state->node_name = pcmk__str_copy(node_name); state->rsc_info_cache = pcmk__strkey_table(NULL, free_rsc_info); state->deletion_ops = pcmk__strkey_table(free, free_deletion_op); state->active_ops = pcmk__strkey_table(free, free_recurring_op); @@ -453,7 +450,7 @@ crmd_proxy_dispatch(const char *session, xmlNode *msg) { crm_trace("Processing proxied IPC message from session %s", session); crm_log_xml_trace(msg, "controller[inbound]"); - crm_xml_add(msg, F_CRM_SYS_FROM, session); + crm_xml_add(msg, PCMK__XA_CRM_SYS_FROM, session); if (controld_authorize_ipc_message(msg, NULL, session)) { route_message(C_IPC_MESSAGE, msg); } @@ -477,8 +474,9 @@ remote_config_check(xmlNode * msg, int call_id, int rc, xmlNode * output, void * crm_debug("Call %d : Parsing CIB options", call_id); - pe_unpack_nvpairs(output, output, XML_CIB_TAG_PROPSET, NULL, - config_hash, CIB_OPTIONS_FIRST, FALSE, now, NULL); + pe_unpack_nvpairs(output, output, PCMK_XE_CLUSTER_PROPERTY_SET, NULL, + config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, FALSE, + now, NULL); /* Now send it to the remote peer */ lrmd__validate_remote_settings(lrmd, config_hash); @@ -492,20 +490,22 @@ static void crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) { lrm_state_t *lrm_state = userdata; - const char *session = crm_element_value(msg, F_LRMD_IPC_SESSION); + const char *session = crm_element_value(msg, PCMK__XA_LRMD_IPC_SESSION); remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); - const char *op = crm_element_value(msg, F_LRMD_IPC_OP); + const char *op = crm_element_value(msg, PCMK__XA_LRMD_IPC_OP); if (pcmk__str_eq(op, LRMD_IPC_OP_NEW, pcmk__str_casei)) { - const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); + const char *channel = crm_element_value(msg, PCMK__XA_LRMD_IPC_SERVER); proxy = crmd_remote_proxy_new(lrmd, lrm_state->node_name, session, channel); if (!remote_ra_controlling_guest(lrm_state)) { if (proxy != NULL) { cib_t *cib_conn = controld_globals.cib_conn; - /* Look up stonith-watchdog-timeout and send to the remote peer for validation */ - int rc = cib_conn->cmds->query(cib_conn, XML_CIB_TAG_CRMCONFIG, + /* Look up PCMK_OPT_STONITH_WATCHDOG_TIMEOUT and send to the + * remote peer for validation + */ + int rc = cib_conn->cmds->query(cib_conn, PCMK_XE_CRM_CONFIG, NULL, cib_scope_local); cib_conn->cmds->register_callback_full(cib_conn, rc, 10, FALSE, lrmd, @@ -525,7 +525,8 @@ crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) if (!remote_ra_is_in_maintenance(lrm_state)) { now_s = pcmk__ttoa(time(NULL)); - update_attrd(lrm_state->node_name, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, TRUE); + update_attrd(lrm_state->node_name, PCMK__NODE_ATTR_SHUTDOWN, now_s, + NULL, TRUE); free(now_s); remote_proxy_ack_shutdown(lrmd); @@ -545,39 +546,43 @@ crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) * to send to ourselves over IPC -- do it directly. */ int flags = 0; - xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); + xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_LRMD_IPC_MSG, + NULL, NULL); + xmlNode *request = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); CRM_CHECK(request != NULL, return); CRM_CHECK(lrm_state->node_name, return); - crm_xml_add(request, XML_ACL_TAG_ROLE, "pacemaker-remote"); - pcmk__update_acl_user(request, F_LRMD_IPC_USER, lrm_state->node_name); + crm_xml_add(request, PCMK_XE_ACL_ROLE, "pacemaker-remote"); + pcmk__update_acl_user(request, PCMK__XA_LRMD_IPC_USER, + lrm_state->node_name); /* Pacemaker Remote nodes don't know their own names (as known to the * cluster). When getting a node info request with no name or ID, add * the name, so we don't return info for ourselves instead of the * Pacemaker Remote node. */ - if (pcmk__str_eq(crm_element_value(request, F_CRM_TASK), CRM_OP_NODE_INFO, pcmk__str_casei)) { + if (pcmk__str_eq(crm_element_value(request, PCMK__XA_CRM_TASK), + CRM_OP_NODE_INFO, pcmk__str_none)) { int node_id = 0; - crm_element_value_int(request, XML_ATTR_ID, &node_id); + crm_element_value_int(request, PCMK_XA_ID, &node_id); if ((node_id <= 0) - && (crm_element_value(request, XML_ATTR_UNAME) == NULL)) { - crm_xml_add(request, XML_ATTR_UNAME, lrm_state->node_name); + && (crm_element_value(request, PCMK_XA_UNAME) == NULL)) { + crm_xml_add(request, PCMK_XA_UNAME, lrm_state->node_name); } } crmd_proxy_dispatch(session, request); - crm_element_value_int(msg, F_LRMD_IPC_MSG_FLAGS, &flags); + crm_element_value_int(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, &flags); if (flags & crm_ipc_client_response) { int msg_id = 0; - xmlNode *op_reply = create_xml_node(NULL, "ack"); + xmlNode *op_reply = pcmk__xe_create(NULL, PCMK__XE_ACK); - crm_xml_add(op_reply, "function", __func__); - crm_xml_add_int(op_reply, "line", __LINE__); + crm_xml_add(op_reply, PCMK_XA_FUNCTION, __func__); + crm_xml_add_int(op_reply, PCMK__XA_LINE, __LINE__); - crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); + crm_element_value_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id); remote_proxy_relay_response(proxy, op_reply, msg_id); free_xml(op_reply); @@ -650,7 +655,7 @@ lrm_state_get_metadata(lrm_state_t * lrm_state, * @TODO Make meta-data calls asynchronous. (This will be part of a larger * project to make meta-data calls via the executor rather than directly.) */ - params = lrmd_key_value_add(params, CRM_META "_" XML_LRM_ATTR_TARGET, + params = lrmd_key_value_add(params, CRM_META "_" PCMK__META_ON_NODE, lrm_state->node_name); return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata_params(lrm_state->conn, diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c index 9557d9e..dcffc8e 100644 --- a/daemons/controld/controld_fencing.c +++ b/daemons/controld/controld_fencing.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -63,9 +62,9 @@ set_fence_reaction(const char *reaction_s) fence_reaction_panic = true; } else { - if (!pcmk__str_eq(reaction_s, "stop", pcmk__str_casei)) { + if (!pcmk__str_eq(reaction_s, PCMK_VALUE_STOP, pcmk__str_casei)) { crm_warn("Invalid value '%s' for %s, using 'stop'", - reaction_s, XML_CONFIG_ATTR_FENCE_REACTION); + reaction_s, PCMK_OPT_FENCE_REACTION); } fence_reaction_panic = false; } @@ -82,10 +81,10 @@ controld_configure_fencing(GHashTable *options) { const char *value = NULL; - value = g_hash_table_lookup(options, XML_CONFIG_ATTR_FENCE_REACTION); + value = g_hash_table_lookup(options, PCMK_OPT_FENCE_REACTION); set_fence_reaction(value); - value = g_hash_table_lookup(options, "stonith-max-attempts"); + value = g_hash_table_lookup(options, PCMK_OPT_STONITH_MAX_ATTEMPTS); update_stonith_max_attempts(value); } @@ -176,7 +175,7 @@ st_fail_count_increment(const char *target) } rec->count = 1; - g_hash_table_insert(stonith_failures, strdup(target), rec); + g_hash_table_insert(stonith_failures, pcmk__str_copy(target), rec); } } @@ -191,8 +190,8 @@ cib_fencing_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, crm_err("Fencing update %d for %s: failed - %s (%d)", call_id, (char *)user_data, pcmk_strerror(rc), rc); crm_log_xml_warn(msg, "Failed update"); - abort_transition(INFINITY, pcmk__graph_shutdown, "CIB update failed", - NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_shutdown, + "CIB update failed", NULL); } else { crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data); @@ -222,7 +221,7 @@ send_stonith_update(pcmk__graph_action_t *action, const char *target, * Try getting any existing node cache entry also by node uuid in case it * doesn't have an uname yet. */ - peer = pcmk__get_peer_full(0, target, uuid, CRM_GET_PEER_ANY); + peer = pcmk__get_node(0, target, uuid, pcmk__node_search_any); CRM_CHECK(peer != NULL, return); @@ -236,7 +235,7 @@ send_stonith_update(pcmk__graph_action_t *action, const char *target, if (peer->uuid == NULL) { crm_info("Recording uuid '%s' for node '%s'", uuid, target); - peer->uuid = strdup(uuid); + peer->uuid = pcmk__str_copy(uuid); } crmd_peer_down(peer, TRUE); @@ -248,21 +247,21 @@ send_stonith_update(pcmk__graph_action_t *action, const char *target, if (peer->flags & crm_remote_node) { char *now_s = pcmk__ttoa(time(NULL)); - crm_xml_add(node_state, XML_NODE_IS_FENCED, now_s); + crm_xml_add(node_state, PCMK__XA_NODE_FENCED, now_s); free(now_s); } /* Force our known ID */ - crm_xml_add(node_state, XML_ATTR_ID, uuid); + crm_xml_add(node_state, PCMK_XA_ID, uuid); rc = controld_globals.cib_conn->cmds->modify(controld_globals.cib_conn, - XML_CIB_TAG_STATUS, node_state, + PCMK_XE_STATUS, node_state, cib_scope_local |cib_can_create); /* Delay processing the trigger until the update completes */ crm_debug("Sending fencing update %d for %s", rc, target); - fsa_register_cib_callback(rc, strdup(target), cib_fencing_updated); + fsa_register_cib_callback(rc, pcmk__str_copy(target), cib_fencing_updated); // Make sure it sticks /* controld_globals.cib_conn->cmds->bump_epoch(controld_globals.cib_conn, @@ -293,7 +292,8 @@ abort_for_stonith_failure(enum pcmk__graph_next abort_action, if ((abort_action != pcmk__graph_wait) && too_many_st_failures(target)) { abort_action = pcmk__graph_wait; } - abort_transition(INFINITY, abort_action, "Stonith failed", reason); + abort_transition(PCMK_SCORE_INFINITY, abort_action, "Stonith failed", + reason); } @@ -315,7 +315,8 @@ static GList *stonith_cleanup_list = NULL; */ void add_stonith_cleanup(const char *target) { - stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(target)); + stonith_cleanup_list = g_list_append(stonith_cleanup_list, + pcmk__str_copy(target)); } /*! @@ -374,8 +375,9 @@ execute_stonith_cleanup(void) for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; - crm_node_t *target_node = crm_get_peer(0, target); - const char *uuid = crm_peer_uuid(target_node); + crm_node_t *target_node = + pcmk__get_node(0, target, NULL, pcmk__node_search_cluster_member); + const char *uuid = pcmk__cluster_node_uuid(target_node); crm_notice("Marking %s, target of a previous stonith action, as clean", target); send_stonith_update(NULL, target, uuid); @@ -424,13 +426,13 @@ fail_incompletable_stonith(pcmk__graph_t *graph) continue; } - task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); + task = crm_element_value(action->xml, PCMK_XA_OPERATION); if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); last_action = action->xml; pcmk__update_graph(graph, action); crm_notice("Failing action %d (%s): fencer terminated", - action->id, ID(action->xml)); + action->id, pcmk__xe_id(action->xml)); } } } @@ -580,15 +582,17 @@ handle_fence_notification(stonith_t *st, stonith_event_t *event) event->id); if (succeeded) { - crm_node_t *peer = pcmk__search_known_node_cache(0, event->target, - CRM_GET_PEER_ANY); + const uint32_t flags = pcmk__node_search_any + |pcmk__node_search_cluster_cib; + + crm_node_t *peer = pcmk__search_node_caches(0, event->target, flags); const char *uuid = NULL; if (peer == NULL) { return; } - uuid = crm_peer_uuid(peer); + uuid = pcmk__cluster_node_uuid(peer); if (AM_I_DC) { /* The DC always sends updates */ @@ -598,8 +602,8 @@ handle_fence_notification(stonith_t *st, stonith_event_t *event) * hosted any guest nodes, and call remote_node_down() for them. * Unfortunately, the controller doesn't have a simple, reliable way * to map hosts to guests. It might be possible to track this in the - * peer cache via crm_remote_peer_cache_refresh(). For now, we rely - * on the scheduler creating fence pseudo-events for the guests. + * peer cache via refresh_remote_nodes(). For now, we rely on the + * scheduler creating fence pseudo-events for the guests. */ if (!pcmk__str_eq(client, te_client_id, pcmk__str_casei)) { @@ -608,7 +612,7 @@ handle_fence_notification(stonith_t *st, stonith_event_t *event) */ crm_info("External fencing operation from %s fenced %s", client, event->target); - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "External Fencing Operation", NULL); } @@ -713,15 +717,16 @@ controld_timer_fencer_connect(gpointer user_data) } if (rc == pcmk_ok) { - stonith_api->cmds->register_notification(stonith_api, - T_STONITH_NOTIFY_DISCONNECT, - tengine_stonith_connection_destroy); - stonith_api->cmds->register_notification(stonith_api, - T_STONITH_NOTIFY_FENCE, - handle_fence_notification); - stonith_api->cmds->register_notification(stonith_api, - T_STONITH_NOTIFY_HISTORY_SYNCED, - tengine_stonith_history_synced); + stonith_api_operations_t *cmds = stonith_api->cmds; + + cmds->register_notification(stonith_api, + PCMK__VALUE_ST_NOTIFY_DISCONNECT, + tengine_stonith_connection_destroy); + cmds->register_notification(stonith_api, PCMK__VALUE_ST_NOTIFY_FENCE, + handle_fence_notification); + cmds->register_notification(stonith_api, + PCMK__VALUE_ST_NOTIFY_HISTORY_SYNCED, + tengine_stonith_history_synced); te_trigger_stonith_history_sync(TRUE); crm_notice("Fencer successfully connected"); } @@ -829,7 +834,7 @@ tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) goto bail; } - target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); + target = crm_element_value(action->xml, PCMK__META_ON_NODE); if (target == NULL) { crm_err("Ignoring fence operation %d result: No target given (bug?)", data->call_id); @@ -838,8 +843,10 @@ tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) stop_te_timer(action); if (stonith__exit_status(data) == CRM_EX_OK) { - const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); - const char *op = crm_meta_value(action->params, "stonith_action"); + const char *uuid = crm_element_value(action->xml, + PCMK__META_ON_NODE_UUID); + const char *op = crm_meta_value(action->params, + PCMK__META_STONITH_ACTION); crm_info("Fence operation %d for %s succeeded", data->call_id, target); if (!(pcmk_is_set(action->flags, pcmk__graph_action_confirmed))) { @@ -864,11 +871,12 @@ tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) is_remote_node); free(now); - value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_ALL); + value = crm_meta_value(action->params, PCMK__META_DIGESTS_ALL); update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, is_remote_node); - value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_SECURE); + value = crm_meta_value(action->params, + PCMK__META_DIGESTS_SECURE); update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, is_remote_node); @@ -952,10 +960,11 @@ controld_execute_fence_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { int rc = 0; - const char *id = ID(action->xml); - const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); - const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); - const char *type = crm_meta_value(action->params, "stonith_action"); + const char *id = pcmk__xe_id(action->xml); + const char *uuid = crm_element_value(action->xml, PCMK__META_ON_NODE_UUID); + const char *target = crm_element_value(action->xml, PCMK__META_ON_NODE); + const char *type = crm_meta_value(action->params, + PCMK__META_STONITH_ACTION); char *transition_key = NULL; const char *priority_delay = NULL; int delay_i = 0; @@ -973,7 +982,8 @@ controld_execute_fence_action(pcmk__graph_t *graph, return EPROTO; } - priority_delay = crm_meta_value(action->params, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY); + priority_delay = crm_meta_value(action->params, + PCMK_OPT_PRIORITY_FENCING_DELAY); crm_notice("Requesting fencing (%s) targeting node %s " CRM_XS " action=%s timeout=%i%s%s", @@ -1001,17 +1011,16 @@ controld_execute_fence_action(pcmk__graph_t *graph, bool controld_verify_stonith_watchdog_timeout(const char *value) { - long st_timeout = value? crm_get_msec(value) : 0; + long long st_timeout = (value != NULL)? crm_get_msec(value) : 0; const char *our_nodename = controld_globals.our_nodename; - gboolean rv = TRUE; if (st_timeout == 0 || (stonith_api && (stonith_api->state != stonith_disconnected) && stonith__watchdog_fencing_enabled_for_node_api(stonith_api, our_nodename))) { - rv = pcmk__valid_sbd_timeout(value); + return pcmk__valid_stonith_watchdog_timeout(value); } - return rv; + return true; } /* end stonith API client functions */ @@ -1042,7 +1051,7 @@ te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers) } if (st) { - st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY_SYNCED); + st->cmds->remove_notification(st, PCMK__VALUE_ST_NOTIFY_HISTORY_SYNCED); } } diff --git a/daemons/controld/controld_fsa.c b/daemons/controld/controld_fsa.c index 06559b8..79b3507 100644 --- a/daemons/controld/controld_fsa.c +++ b/daemons/controld/controld_fsa.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -198,7 +197,7 @@ s_crmd_fsa(enum crmd_fsa_cause cause) if ((controld_globals.fsa_message_queue == NULL) && (controld_globals.fsa_actions != A_NOTHING)) { /* fake the first message so we can get into the loop */ - fsa_data = calloc(1, sizeof(fsa_data_t)); + fsa_data = pcmk__assert_alloc(1, sizeof(fsa_data_t)); fsa_data->fsa_input = I_NULL; fsa_data->fsa_cause = C_FSA_INTERNAL; fsa_data->origin = __func__; @@ -283,8 +282,8 @@ s_crmd_fsa(enum crmd_fsa_cause cause) crm_debug("Exiting the FSA: queue=%d, fsa_actions=%#llx, stalled=%s", g_list_length(controld_globals.fsa_message_queue), (unsigned long long) controld_globals.fsa_actions, - pcmk__btoa(pcmk_is_set(controld_globals.flags, - controld_fsa_is_stalled))); + pcmk__flag_text(controld_globals.flags, + controld_fsa_is_stalled)); } else { crm_trace("Exiting the FSA"); } @@ -549,7 +548,7 @@ check_join_counts(fsa_data_t *msg_data) return; } - npeers = crm_active_peers(); + npeers = pcmk__cluster_num_active_nodes(); count = crmd_join_phase_count(crm_join_confirmed); if (count == npeers) { if (npeers == 1) { diff --git a/daemons/controld/controld_fsa.h b/daemons/controld/controld_fsa.h index 2b79f07..ad1c4fa 100644 --- a/daemons/controld/controld_fsa.h +++ b/daemons/controld/controld_fsa.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -401,7 +401,9 @@ enum crmd_fsa_input { # define R_HAVE_CIB 0x00020000ULL /* Do we have an up-to-date CIB */ # define R_MEMBERSHIP 0x00100000ULL /* Have we got cluster layer data yet */ -# define R_PEER_DATA 0x00200000ULL /* Have we got T_CL_STATUS data yet */ + +// Ever received membership-layer data +# define R_PEER_DATA 0x00200000ULL # define R_HA_DISCONNECTED 0x00400000ULL /* did we sign out of our own accord */ diff --git a/daemons/controld/controld_join_client.c b/daemons/controld/controld_join_client.c index 805ecbd..8faf58b 100644 --- a/daemons/controld/controld_join_client.c +++ b/daemons/controld/controld_join_client.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,7 +11,6 @@ #include #include -#include #include #include @@ -34,8 +33,9 @@ static void update_dc_expected(const xmlNode *msg) { if ((controld_globals.dc_name != NULL) - && pcmk__xe_attr_is_true(msg, F_CRM_DC_LEAVING)) { - crm_node_t *dc_node = crm_get_peer(0, controld_globals.dc_name); + && pcmk__xe_attr_is_true(msg, PCMK__XA_DC_LEAVING)) { + crm_node_t *dc_node = pcmk__get_node(0, controld_globals.dc_name, NULL, + pcmk__node_search_cluster_member); pcmk__update_peer_expected(__func__, dc_node, CRMD_JOINSTATE_DOWN); } @@ -55,7 +55,7 @@ do_cl_join_query(long long action, sleep(1); // Give the cluster layer time to propagate to the DC update_dc(NULL); /* Unset any existing value so that the result is not discarded */ crm_debug("Querying for a DC"); - send_cluster_message(NULL, crm_msg_crmd, req, FALSE); + pcmk__cluster_send_message(NULL, crm_msg_crmd, req); free_xml(req); } @@ -84,7 +84,7 @@ do_cl_join_announce(long long action, crm_debug("Announcing availability"); update_dc(NULL); - send_cluster_message(NULL, crm_msg_crmd, req, FALSE); + pcmk__cluster_send_message(NULL, crm_msg_crmd, req); free_xml(req); } else { @@ -112,10 +112,10 @@ do_cl_join_offer_respond(long long action, CRM_CHECK(input != NULL, return); - welcome_from = crm_element_value(input->msg, F_CRM_HOST_FROM); - join_id = crm_element_value(input->msg, F_CRM_JOIN_ID); + welcome_from = crm_element_value(input->msg, PCMK__XA_SRC); + join_id = crm_element_value(input->msg, PCMK__XA_JOIN_ID); crm_trace("Accepting cluster join offer from node %s "CRM_XS" join-%s", - welcome_from, crm_element_value(input->msg, F_CRM_JOIN_ID)); + welcome_from, crm_element_value(input->msg, PCMK__XA_JOIN_ID)); /* we only ever want the last one */ if (query_call_id > 0) { @@ -134,7 +134,7 @@ do_cl_join_offer_respond(long long action, query_call_id = cib_conn->cmds->query(cib_conn, NULL, NULL, cib_scope_local|cib_no_children); - fsa_register_cib_callback(query_call_id, strdup(join_id), + fsa_register_cib_callback(query_call_id, pcmk__str_copy(join_id), join_query_callback); crm_trace("Registered join query callback: %d", query_call_id); @@ -146,7 +146,7 @@ void join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *join_id = user_data; - xmlNode *generation = create_xml_node(NULL, XML_CIB_TAG_GENERATION_TUPPLE); + xmlNode *generation = pcmk__xe_create(NULL, PCMK__XE_GENERATION_TUPLE); CRM_LOG_ASSERT(join_id != NULL); @@ -166,19 +166,21 @@ join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void * } else { xmlNode *reply = NULL; + const crm_node_t *dc_node = + pcmk__get_node(0, controld_globals.dc_name, NULL, + pcmk__node_search_cluster_member); crm_debug("Respond to join offer join-%s from %s", join_id, controld_globals.dc_name); - copy_in_properties(generation, output); + pcmk__xe_copy_attrs(generation, output, pcmk__xaf_none); reply = create_request(CRM_OP_JOIN_REQUEST, generation, controld_globals.dc_name, CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); - crm_xml_add(reply, F_CRM_JOIN_ID, join_id); - crm_xml_add(reply, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); - send_cluster_message(crm_get_peer(0, controld_globals.dc_name), - crm_msg_crmd, reply, TRUE); + crm_xml_add(reply, PCMK__XA_JOIN_ID, join_id); + crm_xml_add(reply, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); + pcmk__cluster_send_message(dc_node, crm_msg_crmd, reply); free_xml(reply); } @@ -190,29 +192,32 @@ void set_join_state(const char *start_state, const char *node_name, const char *node_uuid, bool remote) { - if (pcmk__str_eq(start_state, "standby", pcmk__str_casei)) { + if (pcmk__str_eq(start_state, PCMK_VALUE_STANDBY, pcmk__str_casei)) { crm_notice("Forcing node %s to join in %s state per configured " "environment", node_name, start_state); cib__update_node_attr(controld_globals.logger_out, controld_globals.cib_conn, cib_sync_call, - XML_CIB_TAG_NODES, node_uuid, - NULL, NULL, NULL, "standby", "on", NULL, - remote ? "remote" : NULL); + PCMK_XE_NODES, node_uuid, + NULL, NULL, NULL, PCMK_NODE_ATTR_STANDBY, + PCMK_VALUE_TRUE, NULL, + (remote? PCMK_VALUE_REMOTE : NULL)); - } else if (pcmk__str_eq(start_state, "online", pcmk__str_casei)) { + } else if (pcmk__str_eq(start_state, PCMK_VALUE_ONLINE, pcmk__str_casei)) { crm_notice("Forcing node %s to join in %s state per configured " "environment", node_name, start_state); cib__update_node_attr(controld_globals.logger_out, controld_globals.cib_conn, cib_sync_call, - XML_CIB_TAG_NODES, node_uuid, - NULL, NULL, NULL, "standby", "off", NULL, - remote ? "remote" : NULL); + PCMK_XE_NODES, node_uuid, + NULL, NULL, NULL, PCMK_NODE_ATTR_STANDBY, + PCMK_VALUE_FALSE, NULL, + (remote? PCMK_VALUE_REMOTE : NULL)); - } else if (pcmk__str_eq(start_state, "default", pcmk__str_casei)) { + } else if (pcmk__str_eq(start_state, PCMK_VALUE_DEFAULT, pcmk__str_casei)) { crm_debug("Not forcing a starting state on node %s", node_name); } else { - crm_warn("Unrecognized start state '%s', using 'default' (%s)", + crm_warn("Unrecognized start state '%s', using " + "'" PCMK_VALUE_DEFAULT "' (%s)", start_state, node_name); } } @@ -220,11 +225,11 @@ set_join_state(const char *start_state, const char *node_name, const char *node_ static int update_conn_host_cache(xmlNode *node, void *userdata) { - const char *remote = crm_element_value(node, XML_ATTR_ID); - const char *conn_host = crm_element_value(node, PCMK__XA_CONN_HOST); - const char *state = crm_element_value(node, XML_CIB_TAG_STATE); + const char *remote = crm_element_value(node, PCMK_XA_ID); + const char *conn_host = crm_element_value(node, PCMK__XA_CONNECTION_HOST); + const char *state = crm_element_value(node, PCMK__XA_NODE_STATE); - crm_node_t *remote_peer = crm_remote_peer_get(remote); + crm_node_t *remote_peer = pcmk__cluster_lookup_remote_node(remote); if (remote_peer == NULL) { return pcmk_rc_ok; @@ -256,8 +261,8 @@ do_cl_join_finalize_respond(long long action, const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE); int join_id = -1; - const char *op = crm_element_value(input->msg, F_CRM_TASK); - const char *welcome_from = crm_element_value(input->msg, F_CRM_HOST_FROM); + const char *op = crm_element_value(input->msg, PCMK__XA_CRM_TASK); + const char *welcome_from = crm_element_value(input->msg, PCMK__XA_SRC); if (!pcmk__str_eq(op, CRM_OP_JOIN_ACKNAK, pcmk__str_casei)) { crm_trace("Ignoring op=%s message", op); @@ -269,7 +274,7 @@ do_cl_join_finalize_respond(long long action, was_nack = FALSE; } - crm_element_value_int(input->msg, F_CRM_JOIN_ID, &join_id); + crm_element_value_int(input->msg, PCMK__XA_JOIN_ID, &join_id); if (was_nack) { crm_err("Shutting down because cluster join with leader %s failed " @@ -305,8 +310,11 @@ do_cl_join_finalize_respond(long long action, xmlNode *reply = create_request(CRM_OP_JOIN_CONFIRM, tmp1, controld_globals.dc_name, CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); + const crm_node_t *dc_node = + pcmk__get_node(0, controld_globals.dc_name, NULL, + pcmk__node_search_cluster_member); - crm_xml_add_int(reply, F_CRM_JOIN_ID, join_id); + crm_xml_add_int(reply, PCMK__XA_JOIN_ID, join_id); crm_debug("Confirming join-%d: sending local operation history to %s", join_id, controld_globals.dc_name); @@ -333,8 +341,7 @@ do_cl_join_finalize_respond(long long action, } } - send_cluster_message(crm_get_peer(0, controld_globals.dc_name), - crm_msg_crmd, reply, TRUE); + pcmk__cluster_send_message(dc_node, crm_msg_crmd, reply); free_xml(reply); if (AM_I_DC == FALSE) { @@ -347,9 +354,10 @@ do_cl_join_finalize_respond(long long action, /* Update the remote node cache with information about which node * is hosting the connection. */ - remotes = pcmk__xe_match(input->msg, XML_CIB_TAG_NODES, NULL, NULL); + remotes = pcmk__xe_first_child(input->msg, PCMK_XE_NODES, NULL, NULL); if (remotes != NULL) { - pcmk__xe_foreach_child(remotes, XML_CIB_TAG_NODE, update_conn_host_cache, NULL); + pcmk__xe_foreach_child(remotes, PCMK_XE_NODE, + update_conn_host_cache, NULL); } } else { diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c index 2fe6710..e943e65 100644 --- a/daemons/controld/controld_join_dc.c +++ b/daemons/controld/controld_join_dc.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,7 +11,6 @@ #include -#include #include #include @@ -188,12 +187,12 @@ create_dc_message(const char *join_op, const char *host_to) CRM_SYSTEM_DC, NULL); /* Identify which election this is a part of */ - crm_xml_add_int(msg, F_CRM_JOIN_ID, current_join_id); + crm_xml_add_int(msg, PCMK__XA_JOIN_ID, current_join_id); /* Add a field specifying whether the DC is shutting down. This keeps the * joining node from fencing the old DC if it becomes the new DC. */ - pcmk__xe_set_bool_attr(msg, F_CRM_DC_LEAVING, + pcmk__xe_set_bool_attr(msg, PCMK__XA_DC_LEAVING, pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)); return msg; @@ -206,7 +205,7 @@ join_make_offer(gpointer key, gpointer value, gpointer user_data) crm_node_t *member = (crm_node_t *)value; CRM_ASSERT(member != NULL); - if (crm_is_peer_active(member) == FALSE) { + if (!pcmk__cluster_is_node_active(member)) { crm_info("Not making join-%d offer to inactive node %s", current_join_id, (member->uname? member->uname : "with unknown name")); @@ -249,10 +248,10 @@ join_make_offer(gpointer key, gpointer value, gpointer user_data) offer = create_dc_message(CRM_OP_JOIN_OFFER, member->uname); // Advertise our feature set so the joining node can bail if not compatible - crm_xml_add(offer, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); + crm_xml_add(offer, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); crm_info("Sending join-%d offer to %s", current_join_id, member->uname); - send_cluster_message(member, crm_msg_crmd, offer, TRUE); + pcmk__cluster_send_message(member, crm_msg_crmd, offer); free_xml(offer); crm_update_peer_join(__func__, member, crm_join_welcomed); @@ -313,12 +312,12 @@ do_dc_join_offer_one(long long action, return; } - join_to = crm_element_value(welcome->msg, F_CRM_HOST_FROM); + join_to = crm_element_value(welcome->msg, PCMK__XA_SRC); if (join_to == NULL) { crm_err("Can't make join-%d offer to unknown node", current_join_id); return; } - member = crm_get_peer(0, join_to); + member = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster_member); /* It is possible that a node will have been sick or starting up when the * original offer was made. However, it will either re-announce itself in @@ -332,14 +331,16 @@ do_dc_join_offer_one(long long action, * well, to ensure the correct value for max_generation_from. */ if (strcasecmp(join_to, controld_globals.our_nodename) != 0) { - member = crm_get_peer(0, controld_globals.our_nodename); + member = pcmk__get_node(0, controld_globals.our_nodename, NULL, + pcmk__node_search_cluster_member); join_make_offer(NULL, member, NULL); } /* This was a genuine join request; cancel any existing transition and * invoke the scheduler. */ - abort_transition(INFINITY, pcmk__graph_restart, "Node join", NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Node join", + NULL); count = crmd_join_phase_count(crm_join_welcomed); crm_info("Waiting on join-%d requests from %d outstanding node%s", @@ -386,19 +387,20 @@ do_dc_join_filter_offer(long long action, gboolean ack_nack_bool = TRUE; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); - const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM); - const char *ref = crm_element_value(join_ack->msg, F_CRM_REFERENCE); + const char *join_from = crm_element_value(join_ack->msg, PCMK__XA_SRC); + const char *ref = crm_element_value(join_ack->msg, PCMK_XA_REFERENCE); const char *join_version = crm_element_value(join_ack->msg, - XML_ATTR_CRM_VERSION); + PCMK_XA_CRM_FEATURE_SET); crm_node_t *join_node = NULL; if (join_from == NULL) { crm_err("Ignoring invalid join request without node name"); return; } - join_node = crm_get_peer(0, join_from); + join_node = pcmk__get_node(0, join_from, NULL, + pcmk__node_search_cluster_member); - crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id); + crm_element_value_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id); if (join_id != current_join_id) { crm_debug("Ignoring join-%d request from %s because we are on join-%d", join_id, join_from, current_join_id); @@ -411,13 +413,22 @@ do_dc_join_filter_offer(long long action, int lpc = 0; const char *attributes[] = { - XML_ATTR_GENERATION_ADMIN, - XML_ATTR_GENERATION, - XML_ATTR_NUMUPDATES, + PCMK_XA_ADMIN_EPOCH, + PCMK_XA_EPOCH, + PCMK_XA_NUM_UPDATES, }; - for (lpc = 0; cmp == 0 && lpc < PCMK__NELEM(attributes); lpc++) { - cmp = compare_int_fields(max_generation_xml, generation, attributes[lpc]); + /* It's not obvious that join_ack->xml is the PCMK__XE_GENERATION_TUPLE + * element from the join client. The "if" guard is for clarity. + */ + if (pcmk__xe_is(generation, PCMK__XE_GENERATION_TUPLE)) { + for (lpc = 0; cmp == 0 && lpc < PCMK__NELEM(attributes); lpc++) { + cmp = compare_int_fields(max_generation_xml, generation, + attributes[lpc]); + } + + } else { // Should always be PCMK__XE_GENERATION_TUPLE + CRM_LOG_ASSERT(false); } } @@ -431,7 +442,7 @@ do_dc_join_filter_offer(long long action, join_id, join_from, value, ref); ack_nack_bool = FALSE; - } else if (!crm_is_peer_active(join_node)) { + } else if (!pcmk__cluster_is_node_active(join_node)) { if (match_down_event(join_from) != NULL) { /* The join request was received after the node was fenced or * otherwise shutdown in a way that we're aware of. No need to log @@ -463,20 +474,20 @@ do_dc_join_filter_offer(long long action, } else if (max_generation_xml == NULL) { const char *validation = crm_element_value(generation, - XML_ATTR_VALIDATION); + PCMK_XA_VALIDATE_WITH); - if (get_schema_version(validation) < 0) { + if (pcmk__get_schema(validation) == NULL) { crm_err("Rejecting join-%d request from %s (with first CIB " "generation) due to unknown schema version %s " CRM_XS " ref=%s", - join_id, join_from, validation, ref); + join_id, join_from, pcmk__s(validation, "(missing)"), ref); ack_nack_bool = FALSE; } else { crm_debug("Accepting join-%d request from %s (with first CIB " "generation) " CRM_XS " ref=%s", join_id, join_from, ref); - max_generation_xml = copy_xml(generation); + max_generation_xml = pcmk__xml_copy(NULL, generation); pcmk__str_update(&max_generation_from, join_from); } @@ -485,13 +496,14 @@ do_dc_join_filter_offer(long long action, && pcmk__str_eq(join_from, controld_globals.our_nodename, pcmk__str_casei))) { const char *validation = crm_element_value(generation, - XML_ATTR_VALIDATION); + PCMK_XA_VALIDATE_WITH); - if (get_schema_version(validation) < 0) { + if (pcmk__get_schema(validation) == NULL) { crm_err("Rejecting join-%d request from %s (with better CIB " "generation than current best from %s) due to unknown " "schema version %s " CRM_XS " ref=%s", - join_id, join_from, max_generation_from, validation, ref); + join_id, join_from, max_generation_from, + pcmk__s(validation, "(missing)"), ref); ack_nack_bool = FALSE; } else { @@ -502,7 +514,7 @@ do_dc_join_filter_offer(long long action, crm_log_xml_debug(generation, "New max generation"); free_xml(max_generation_xml); - max_generation_xml = copy_xml(join_ack->xml); + max_generation_xml = pcmk__xml_copy(NULL, join_ack->xml); pcmk__str_update(&max_generation_from, join_from); } @@ -588,7 +600,7 @@ do_dc_join_finalize(long long action, if (pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { // Send our CIB out to everyone - pcmk__str_update(&sync_from, controld_globals.our_nodename); + sync_from = pcmk__str_copy(controld_globals.our_nodename); crm_debug("Finalizing join-%d for %d node%s (sync'ing from local CIB)", current_join_id, count_finalizable, pcmk__plural_s(count_finalizable)); @@ -596,7 +608,7 @@ do_dc_join_finalize(long long action, } else { // Ask for the agreed best CIB - pcmk__str_update(&sync_from, max_generation_from); + sync_from = pcmk__str_copy(max_generation_from); crm_notice("Finalizing join-%d for %d node%s (sync'ing CIB from %s)", current_join_id, count_finalizable, pcmk__plural_s(count_finalizable), sync_from); @@ -698,8 +710,8 @@ do_dc_join_ack(long long action, int join_id = -1; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); - const char *op = crm_element_value(join_ack->msg, F_CRM_TASK); - char *join_from = crm_element_value_copy(join_ack->msg, F_CRM_HOST_FROM); + const char *op = crm_element_value(join_ack->msg, PCMK__XA_CRM_TASK); + char *join_from = crm_element_value_copy(join_ack->msg, PCMK__XA_SRC); crm_node_t *peer = NULL; enum controld_section_e section = controld_section_lrm; @@ -726,13 +738,13 @@ do_dc_join_ack(long long action, goto done; } - if (crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id) != 0) { + if (crm_element_value_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id) != 0) { crm_warn("Ignoring join confirmation from %s without valid join ID", join_from); goto done; } - peer = crm_get_peer(0, join_from); + peer = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster_member); if (peer->join != crm_join_finalized) { crm_info("Ignoring out-of-sequence join-%d confirmation from %s " "(currently %s not %s)", @@ -800,7 +812,7 @@ do_dc_join_ack(long long action, join_from, current_join_id); } - rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, state, + rc = cib->cmds->modify(cib, PCMK_XE_STATUS, state, cib_scope_local|cib_can_create|cib_transaction); free_xml(execd_state); if (rc != pcmk_ok) { @@ -854,10 +866,10 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data) * weren't known before */ crm_trace("Updating node name and UUID in CIB for %s", join_to); - tmp1 = create_xml_node(NULL, XML_CIB_TAG_NODE); - crm_xml_add(tmp1, XML_ATTR_ID, crm_peer_uuid(join_node)); - crm_xml_add(tmp1, XML_ATTR_UNAME, join_to); - fsa_cib_anon_update(XML_CIB_TAG_NODES, tmp1); + tmp1 = pcmk__xe_create(NULL, PCMK_XE_NODE); + crm_xml_add(tmp1, PCMK_XA_ID, pcmk__cluster_node_uuid(join_node)); + crm_xml_add(tmp1, PCMK_XA_UNAME, join_to); + fsa_cib_anon_update(PCMK_XE_NODES, tmp1); free_xml(tmp1); if (join_node->join == crm_join_nack_quiet) { @@ -866,8 +878,9 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data) return; } - join_node = crm_get_peer(0, join_to); - if (!crm_is_peer_active(join_node)) { + join_node = pcmk__get_node(0, join_to, NULL, + pcmk__node_search_cluster_member); + if (!pcmk__cluster_is_node_active(join_node)) { /* * NACK'ing nodes that the membership layer doesn't know about yet * simply creates more churn @@ -896,10 +909,10 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data) * node hosts each to the ACK message. This keeps new controllers in * sync with what has already happened. */ - if (crm_remote_peer_cache_size() != 0) { + if (pcmk__cluster_num_remote_nodes() > 0) { GHashTableIter iter; crm_node_t *node = NULL; - xmlNode *remotes = create_xml_node(acknak, XML_CIB_TAG_NODES); + xmlNode *remotes = pcmk__xe_create(acknak, PCMK_XE_NODES); g_hash_table_iter_init(&iter, crm_remote_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { @@ -909,16 +922,16 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data) continue; } - remote = create_xml_node(remotes, XML_CIB_TAG_NODE); + remote = pcmk__xe_create(remotes, PCMK_XE_NODE); pcmk__xe_set_props(remote, - XML_ATTR_ID, node->uname, - XML_CIB_TAG_STATE, node->state, - PCMK__XA_CONN_HOST, node->conn_host, + PCMK_XA_ID, node->uname, + PCMK__XA_NODE_STATE, node->state, + PCMK__XA_CONNECTION_HOST, node->conn_host, NULL); } } } - send_cluster_message(join_node, crm_msg_crmd, acknak, TRUE); + pcmk__cluster_send_message(join_node, crm_msg_crmd, acknak); free_xml(acknak); return; } diff --git a/daemons/controld/controld_membership.c b/daemons/controld/controld_membership.c index f25d1e9..1079d6a 100644 --- a/daemons/controld/controld_membership.c +++ b/daemons/controld/controld_membership.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -29,7 +28,7 @@ reap_dead_nodes(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; - if (crm_is_peer_active(node) == FALSE) { + if (!pcmk__cluster_is_node_active(node)) { crm_update_peer_join(__func__, node, crm_join_none); if(node && node->uname) { @@ -85,7 +84,7 @@ post_cache_update(int instance) */ no_op = create_request(CRM_OP_NOOP, NULL, NULL, CRM_SYSTEM_CRMD, AM_I_DC ? CRM_SYSTEM_DC : CRM_SYSTEM_CRMD, NULL); - send_cluster_message(NULL, crm_msg_crmd, no_op, FALSE); + pcmk__cluster_send_message(NULL, crm_msg_crmd, no_op); free_xml(no_op); } @@ -132,19 +131,20 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent, return NULL; } - node_state = create_xml_node(parent, XML_CIB_TAG_STATE); + node_state = pcmk__xe_create(parent, PCMK__XE_NODE_STATE); if (pcmk_is_set(node->flags, crm_remote_node)) { - pcmk__xe_set_bool_attr(node_state, XML_NODE_IS_REMOTE, true); + pcmk__xe_set_bool_attr(node_state, PCMK_XA_REMOTE_NODE, true); } - if (crm_xml_add(node_state, XML_ATTR_ID, crm_peer_uuid(node)) == NULL) { + if (crm_xml_add(node_state, PCMK_XA_ID, + pcmk__cluster_node_uuid(node)) == NULL) { crm_info("Node update for %s cancelled: no ID", node->uname); free_xml(node_state); return NULL; } - crm_xml_add(node_state, XML_ATTR_UNAME, node->uname); + crm_xml_add(node_state, PCMK_XA_UNAME, node->uname); if ((flags & node_update_cluster) && node->state) { if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) { @@ -162,15 +162,15 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent, if (flags & node_update_peer) { if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) { // A value 0 means the peer is offline in CPG. - crm_xml_add_ll(node_state, PCMK__XA_CRMD, node->when_online); + crm_xml_add_ll(node_state, PCMK_XA_CRMD, node->when_online); } else { // @COMPAT DCs < 2.1.7 use online/offline rather than timestamp - value = OFFLINESTATUS; + value = PCMK_VALUE_OFFLINE; if (pcmk_is_set(node->processes, crm_get_cluster_proc())) { - value = ONLINESTATUS; + value = PCMK_VALUE_ONLINE; } - crm_xml_add(node_state, PCMK__XA_CRMD, value); + crm_xml_add(node_state, PCMK_XA_CRMD, value); } } @@ -184,11 +184,11 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent, } if (flags & node_update_expected) { - crm_xml_add(node_state, PCMK__XA_EXPECTED, node->expected); + crm_xml_add(node_state, PCMK_XA_EXPECTED, node->expected); } } - crm_xml_add(node_state, XML_ATTR_ORIGIN, source); + crm_xml_add(node_state, PCMK_XA_CRM_DEBUG_ORIGIN, source); return node_state; } @@ -222,26 +222,22 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, return; } - if (pcmk__xe_is(output, XML_CIB_TAG_NODE)) { + if (pcmk__xe_is(output, PCMK_XE_NODE)) { node_xml = output; } else { - node_xml = pcmk__xml_first_child(output); + node_xml = pcmk__xe_first_child(output, PCMK_XE_NODE, NULL, NULL); } - for (; node_xml != NULL; node_xml = pcmk__xml_next(node_xml)) { + for (; node_xml != NULL; node_xml = pcmk__xe_next_same(node_xml)) { const char *node_uuid = NULL; const char *node_uname = NULL; GHashTableIter iter; crm_node_t *node = NULL; gboolean known = FALSE; - if (!pcmk__xe_is(node_xml, XML_CIB_TAG_NODE)) { - continue; - } - - node_uuid = crm_element_value(node_xml, XML_ATTR_ID); - node_uname = crm_element_value(node_xml, XML_ATTR_UNAME); + node_uuid = crm_element_value(node_xml, PCMK_XA_ID); + node_uname = crm_element_value(node_xml, PCMK_XA_UNAME); if (node_uuid == NULL || node_uname == NULL) { continue; @@ -267,20 +263,19 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, crm_notice("Deleting unknown node %s/%s which has conflicting uname with %s", node_uuid, node_uname, new_node_uuid); - delete_call_id = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_NODES, + delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_NODES, node_xml, cib_scope_local); - fsa_register_cib_callback(delete_call_id, strdup(node_uuid), + fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid), remove_conflicting_node_callback); - node_state_xml = create_xml_node(NULL, XML_CIB_TAG_STATE); - crm_xml_add(node_state_xml, XML_ATTR_ID, node_uuid); - crm_xml_add(node_state_xml, XML_ATTR_UNAME, node_uname); + node_state_xml = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE); + crm_xml_add(node_state_xml, PCMK_XA_ID, node_uuid); + crm_xml_add(node_state_xml, PCMK_XA_UNAME, node_uname); - delete_call_id = cib_conn->cmds->remove(cib_conn, - XML_CIB_TAG_STATUS, + delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_STATUS, node_state_xml, cib_scope_local); - fsa_register_cib_callback(delete_call_id, strdup(node_uuid), + fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid), remove_conflicting_node_callback); free_xml(node_state_xml); } @@ -311,10 +306,12 @@ populate_cib_nodes(enum node_update_flags flags, const char *source) int call_id = 0; gboolean from_hashtable = TRUE; - xmlNode *node_list = create_xml_node(NULL, XML_CIB_TAG_NODES); + xmlNode *node_list = pcmk__xe_create(NULL, PCMK_XE_NODES); #if SUPPORT_COROSYNC - if (!pcmk_is_set(flags, node_update_quick) && is_corosync_cluster()) { + if (!pcmk_is_set(flags, node_update_quick) + && (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync)) { + from_hashtable = pcmk__corosync_add_nodes(node_list); } #endif @@ -337,22 +334,22 @@ populate_cib_nodes(enum node_update_flags flags, const char *source) } /* We need both to be valid */ - new_node = create_xml_node(node_list, XML_CIB_TAG_NODE); - crm_xml_add(new_node, XML_ATTR_ID, node->uuid); - crm_xml_add(new_node, XML_ATTR_UNAME, node->uname); + new_node = pcmk__xe_create(node_list, PCMK_XE_NODE); + crm_xml_add(new_node, PCMK_XA_ID, node->uuid); + crm_xml_add(new_node, PCMK_XA_UNAME, node->uname); /* Search and remove unknown nodes with the conflicting uname from CIB */ pcmk__g_strcat(xpath, - "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION - "/" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE - "[@" XML_ATTR_UNAME "='", node->uname, "']" - "[@" XML_ATTR_ID "!='", node->uuid, "']", NULL); + "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION + "/" PCMK_XE_NODES "/" PCMK_XE_NODE + "[@" PCMK_XA_UNAME "='", node->uname, "']" + "[@" PCMK_XA_ID "!='", node->uuid, "']", NULL); call_id = cib_conn->cmds->query(cib_conn, (const char *) xpath->str, NULL, cib_scope_local|cib_xpath); - fsa_register_cib_callback(call_id, strdup(node->uuid), + fsa_register_cib_callback(call_id, pcmk__str_copy(node->uuid), search_conflicting_node_callback); } } @@ -364,7 +361,7 @@ populate_cib_nodes(enum node_update_flags flags, const char *source) crm_trace("Populating section from %s", from_hashtable ? "hashtable" : "cluster"); - if ((controld_update_cib(XML_CIB_TAG_NODES, node_list, cib_scope_local, + if ((controld_update_cib(PCMK_XE_NODES, node_list, cib_scope_local, node_list_update_callback) == pcmk_rc_ok) && (crm_peer_cache != NULL) && AM_I_DC) { /* @@ -375,7 +372,7 @@ populate_cib_nodes(enum node_update_flags flags, const char *source) crm_node_t *node = NULL; free_xml(node_list); - node_list = create_xml_node(NULL, XML_CIB_TAG_STATUS); + node_list = pcmk__xe_create(NULL, PCMK_XE_STATUS); g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { @@ -389,7 +386,7 @@ populate_cib_nodes(enum node_update_flags flags, const char *source) } } - controld_update_cib(XML_CIB_TAG_STATUS, node_list, cib_scope_local, + controld_update_cib(PCMK_XE_STATUS, node_list, cib_scope_local, crmd_node_update_complete); } free_xml(node_list); @@ -429,12 +426,12 @@ crm_update_quorum(gboolean quorum, gboolean force_update) || force_update)) { xmlNode *update = NULL; - update = create_xml_node(NULL, XML_TAG_CIB); - crm_xml_add_int(update, XML_ATTR_HAVE_QUORUM, quorum); - crm_xml_add(update, XML_ATTR_DC_UUID, controld_globals.our_uuid); + update = pcmk__xe_create(NULL, PCMK_XE_CIB); + crm_xml_add_int(update, PCMK_XA_HAVE_QUORUM, quorum); + crm_xml_add(update, PCMK_XA_DC_UUID, controld_globals.our_uuid); crm_debug("Updating quorum status to %s", pcmk__btoa(quorum)); - controld_update_cib(XML_TAG_CIB, update, cib_scope_local, + controld_update_cib(PCMK_XE_CIB, update, cib_scope_local, cib_quorum_update_complete); free_xml(update); @@ -453,11 +450,11 @@ crm_update_quorum(gboolean quorum, gboolean force_update) * nodes are joining around the same time, so the one that brings us * to quorum doesn't cause all the remaining ones to be fenced. */ - abort_after_delay(INFINITY, pcmk__graph_restart, "Quorum gained", - 5000); + abort_after_delay(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Quorum gained", 5000); } else { - abort_transition(INFINITY, pcmk__graph_restart, "Quorum lost", - NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Quorum lost", NULL); } } diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c index 39f3c7a..5f7a78c 100644 --- a/daemons/controld/controld_messages.c +++ b/daemons/controld/controld_messages.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -22,8 +21,6 @@ #include -extern void crm_shutdown(int nsig); - static enum crmd_fsa_input handle_message(xmlNode *msg, enum crmd_fsa_cause cause); static void handle_response(xmlNode *stored_msg); @@ -102,7 +99,7 @@ register_fsa_input_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input, fsa_input2string(input), fsa_cause2string(cause), (data? "with" : "without")); - fsa_data = calloc(1, sizeof(fsa_data_t)); + fsa_data = pcmk__assert_alloc(1, sizeof(fsa_data_t)); fsa_data->id = last_data_id; fsa_data->fsa_input = input; fsa_data->fsa_cause = cause; @@ -191,11 +188,14 @@ fsa_dump_queue(int log_level) ha_msg_input_t * copy_ha_msg_input(ha_msg_input_t * orig) { - ha_msg_input_t *copy = calloc(1, sizeof(ha_msg_input_t)); + xmlNode *wrapper = NULL; + + ha_msg_input_t *copy = pcmk__assert_alloc(1, sizeof(ha_msg_input_t)); + + copy->msg = (orig != NULL)? pcmk__xml_copy(NULL, orig->msg) : NULL; - CRM_ASSERT(copy != NULL); - copy->msg = (orig && orig->msg)? copy_xml(orig->msg) : NULL; - copy->xml = get_message_xml(copy->msg, F_CRM_DATA); + wrapper = pcmk__xe_first_child(copy->msg, PCMK__XE_CRM_XML, NULL, NULL); + copy->xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); return copy; } @@ -328,7 +328,7 @@ route_message(enum crmd_fsa_cause cause, xmlNode * input) gboolean relay_message(xmlNode * msg, gboolean originated_locally) { - enum crm_ais_msg_types dest = crm_msg_ais; + enum crm_ais_msg_types dest = crm_msg_none; bool is_for_dc = false; bool is_for_dcib = false; bool is_for_te = false; @@ -346,12 +346,12 @@ relay_message(xmlNode * msg, gboolean originated_locally) CRM_CHECK(msg != NULL, return TRUE); - host_to = crm_element_value(msg, F_CRM_HOST_TO); - sys_to = crm_element_value(msg, F_CRM_SYS_TO); - sys_from = crm_element_value(msg, F_CRM_SYS_FROM); - type = crm_element_value(msg, F_TYPE); - task = crm_element_value(msg, F_CRM_TASK); - ref = crm_element_value(msg, XML_ATTR_REFERENCE); + host_to = crm_element_value(msg, PCMK__XA_CRM_HOST_TO); + sys_to = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); + sys_from = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM); + type = crm_element_value(msg, PCMK__XA_T); + task = crm_element_value(msg, PCMK__XA_CRM_TASK); + ref = crm_element_value(msg, PCMK_XA_REFERENCE); broadcast = pcmk__str_empty(host_to); @@ -367,8 +367,9 @@ relay_message(xmlNode * msg, gboolean originated_locally) } // Require message type (set by create_request()) - if (!pcmk__str_eq(type, T_CRM, pcmk__str_casei)) { - crm_warn("Ignoring invalid message %s with type '%s' (not '" T_CRM "')", + if (!pcmk__str_eq(type, PCMK__VALUE_CRMD, pcmk__str_none)) { + crm_warn("Ignoring invalid message %s with type '%s' " + "(not '" PCMK__VALUE_CRMD "')", ref, pcmk__s(type, "")); crm_log_xml_trace(msg, "ignored"); return TRUE; @@ -376,15 +377,16 @@ relay_message(xmlNode * msg, gboolean originated_locally) // Require a destination subsystem (also set by create_request()) if (sys_to == NULL) { - crm_warn("Ignoring invalid message %s with no " F_CRM_SYS_TO, ref); + crm_warn("Ignoring invalid message %s with no " PCMK__XA_CRM_SYS_TO, + ref); crm_log_xml_trace(msg, "ignored"); return TRUE; } // Get the message type appropriate to the destination subsystem - if (is_corosync_cluster()) { - dest = text2msg_type(sys_to); - if ((dest < crm_msg_ais) || (dest > crm_msg_stonith_ng)) { + if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { + dest = pcmk__cluster_parse_msg_type(sys_to); + if (dest == crm_msg_none) { /* Unrecognized value, use a sane default * * @TODO Maybe we should bail instead @@ -427,10 +429,12 @@ relay_message(xmlNode * msg, gboolean originated_locally) is_local = true; } else if (is_for_crm && pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { - xmlNode *msg_data = get_message_xml(msg, F_CRM_DATA); + xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, + NULL); + xmlNode *msg_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); const char *mode = crm_element_value(msg_data, PCMK__XA_MODE); - if (pcmk__str_eq(mode, XML_TAG_CIB, pcmk__str_casei)) { + if (pcmk__str_eq(mode, PCMK__VALUE_CIB, pcmk__str_none)) { // Local delete of an offline node's resource history is_local = true; } @@ -458,9 +462,10 @@ relay_message(xmlNode * msg, gboolean originated_locally) ref, pcmk__s(host_to, "broadcast")); crm_log_xml_trace(msg, "relayed"); if (!broadcast) { - node_to = crm_get_peer(0, host_to); + node_to = pcmk__get_node(0, host_to, NULL, + pcmk__node_search_cluster_member); } - send_cluster_message(node_to, dest, msg, TRUE); + pcmk__cluster_send_message(node_to, dest, msg); return TRUE; } @@ -484,7 +489,8 @@ relay_message(xmlNode * msg, gboolean originated_locally) } if (!broadcast) { - node_to = pcmk__search_cluster_node_cache(0, host_to, NULL); + node_to = pcmk__search_node_caches(0, host_to, + pcmk__node_search_cluster_member); if (node_to == NULL) { crm_warn("Ignoring message %s because node %s is unknown", ref, host_to); @@ -496,7 +502,7 @@ relay_message(xmlNode * msg, gboolean originated_locally) crm_trace("Relay message %s to %s", ref, pcmk__s(host_to, "all peers")); crm_log_xml_trace(msg, "relayed"); - send_cluster_message(node_to, dest, msg, TRUE); + pcmk__cluster_send_message(node_to, dest, msg); return TRUE; } @@ -539,10 +545,11 @@ bool controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_client, const char *proxy_session) { + xmlNode *wrapper = NULL; xmlNode *message_data = NULL; const char *client_name = NULL; - const char *op = crm_element_value(client_msg, F_CRM_TASK); - const char *ref = crm_element_value(client_msg, XML_ATTR_REFERENCE); + const char *op = crm_element_value(client_msg, PCMK__XA_CRM_TASK); + const char *ref = crm_element_value(client_msg, PCMK_XA_REFERENCE); const char *uuid = (curr_client? curr_client->id : proxy_session); if (uuid == NULL) { @@ -556,27 +563,28 @@ controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_c return true; } - message_data = get_message_xml(client_msg, F_CRM_DATA); + wrapper = pcmk__xe_first_child(client_msg, PCMK__XE_CRM_XML, NULL, NULL); + message_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); - client_name = crm_element_value(message_data, "client_name"); + client_name = crm_element_value(message_data, PCMK__XA_CLIENT_NAME); if (pcmk__str_empty(client_name)) { crm_warn("IPC hello from client rejected: No client name", CRM_XS " ref=%s uuid=%s", (ref? ref : "none"), uuid); goto rejected; } - if (!authorize_version(message_data, "major_version", client_name, ref, - uuid)) { + if (!authorize_version(message_data, PCMK__XA_MAJOR_VERSION, client_name, + ref, uuid)) { goto rejected; } - if (!authorize_version(message_data, "minor_version", client_name, ref, - uuid)) { + if (!authorize_version(message_data, PCMK__XA_MINOR_VERSION, client_name, + ref, uuid)) { goto rejected; } crm_trace("Validated IPC hello from client %s", client_name); crm_log_xml_trace(client_msg, "hello"); if (curr_client) { - curr_client->userdata = strdup(client_name); + curr_client->userdata = pcmk__str_copy(client_name); } controld_trigger_fsa(); return false; @@ -596,16 +604,17 @@ handle_message(xmlNode *msg, enum crmd_fsa_cause cause) CRM_CHECK(msg != NULL, return I_NULL); - type = crm_element_value(msg, F_CRM_MSG_TYPE); - if (pcmk__str_eq(type, XML_ATTR_REQUEST, pcmk__str_none)) { + type = crm_element_value(msg, PCMK__XA_SUBT); + if (pcmk__str_eq(type, PCMK__VALUE_REQUEST, pcmk__str_none)) { return handle_request(msg, cause); + } - } else if (pcmk__str_eq(type, XML_ATTR_RESPONSE, pcmk__str_none)) { + if (pcmk__str_eq(type, PCMK__VALUE_RESPONSE, pcmk__str_none)) { handle_response(msg); return I_NULL; } - crm_warn("Ignoring message with unknown " F_CRM_MSG_TYPE " '%s'", + crm_warn("Ignoring message with unknown " PCMK__XA_SUBT" '%s'", pcmk__s(type, "")); crm_log_xml_trace(msg, "bad"); return I_NULL; @@ -620,31 +629,36 @@ handle_failcount_op(xmlNode * stored_msg) char *interval_spec = NULL; guint interval_ms = 0; gboolean is_remote_node = FALSE; - xmlNode *xml_op = get_message_xml(stored_msg, F_CRM_DATA); + + xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, NULL, + NULL); + xmlNode *xml_op = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); if (xml_op) { - xmlNode *xml_rsc = first_named_child(xml_op, XML_CIB_TAG_RESOURCE); - xmlNode *xml_attrs = first_named_child(xml_op, XML_TAG_ATTRS); + xmlNode *xml_rsc = pcmk__xe_first_child(xml_op, PCMK_XE_PRIMITIVE, NULL, + NULL); + xmlNode *xml_attrs = pcmk__xe_first_child(xml_op, PCMK__XE_ATTRIBUTES, + NULL, NULL); if (xml_rsc) { - rsc = ID(xml_rsc); + rsc = pcmk__xe_id(xml_rsc); } if (xml_attrs) { op = crm_element_value(xml_attrs, - CRM_META "_" XML_RSC_ATTR_CLEAR_OP); + CRM_META "_" PCMK__META_CLEAR_FAILURE_OP); crm_element_value_ms(xml_attrs, - CRM_META "_" XML_RSC_ATTR_CLEAR_INTERVAL, + CRM_META "_" PCMK__META_CLEAR_FAILURE_INTERVAL, &interval_ms); } } - uname = crm_element_value(xml_op, XML_LRM_ATTR_TARGET); + uname = crm_element_value(xml_op, PCMK__META_ON_NODE); if ((rsc == NULL) || (uname == NULL)) { crm_log_xml_warn(stored_msg, "invalid failcount op"); return I_NULL; } - if (crm_element_value(xml_op, XML_LRM_ATTR_ROUTER_NODE)) { + if (crm_element_value(xml_op, PCMK__XA_ROUTER_NODE)) { is_remote_node = TRUE; } @@ -669,7 +683,9 @@ static enum crmd_fsa_input handle_lrm_delete(xmlNode *stored_msg) { const char *mode = NULL; - xmlNode *msg_data = get_message_xml(stored_msg, F_CRM_DATA); + xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, NULL, + NULL); + xmlNode *msg_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); CRM_CHECK(msg_data != NULL, return I_NULL); @@ -679,14 +695,14 @@ handle_lrm_delete(xmlNode *stored_msg) * CIB, and do some bookkeeping in the controller. * * However, if the affected node is offline, the client will specify - * mode="cib" which means the controller receiving the operation should - * clear the resource's history from the CIB and nothing else. This is used - * to clear shutdown locks. + * mode=PCMK__VALUE_CIB which means the controller receiving the operation + * should clear the resource's history from the CIB and nothing else. This + * is used to clear shutdown locks. */ mode = crm_element_value(msg_data, PCMK__XA_MODE); - if ((mode == NULL) || strcmp(mode, XML_TAG_CIB)) { + if (!pcmk__str_eq(mode, PCMK__VALUE_CIB, pcmk__str_none)) { // Relay to affected node - crm_xml_add(stored_msg, F_CRM_SYS_TO, CRM_SYSTEM_LRMD); + crm_xml_add(stored_msg, PCMK__XA_CRM_SYS_TO, CRM_SYSTEM_LRMD); return I_ROUTER; } else { @@ -698,13 +714,13 @@ handle_lrm_delete(xmlNode *stored_msg) xmlNode *rsc_xml = NULL; int rc = pcmk_rc_ok; - rsc_xml = first_named_child(msg_data, XML_CIB_TAG_RESOURCE); + rsc_xml = pcmk__xe_first_child(msg_data, PCMK_XE_PRIMITIVE, NULL, NULL); CRM_CHECK(rsc_xml != NULL, return I_NULL); - rsc_id = ID(rsc_xml); - from_sys = crm_element_value(stored_msg, F_CRM_SYS_FROM); - node = crm_element_value(msg_data, XML_LRM_ATTR_TARGET); - user_name = pcmk__update_acl_user(stored_msg, F_CRM_USER, NULL); + rsc_id = pcmk__xe_id(rsc_xml); + from_sys = crm_element_value(stored_msg, PCMK__XA_CRM_SYS_FROM); + node = crm_element_value(msg_data, PCMK__META_ON_NODE); + user_name = pcmk__update_acl_user(stored_msg, PCMK__XA_CRM_USER, NULL); crm_debug("Handling " CRM_OP_LRM_DELETE " for %s on %s locally%s%s " "(clearing CIB resource history only)", rsc_id, node, (user_name? " for user " : ""), (user_name? user_name : "")); @@ -715,19 +731,20 @@ handle_lrm_delete(xmlNode *stored_msg) crmd_cib_smart_opt()); } - //Notify client and tengine.(Only notify tengine if mode = "cib" and CRM_OP_LRM_DELETE.) + /* Notify client. Also notify tengine if mode=PCMK__VALUE_CIB and + * op=CRM_OP_LRM_DELETE. + */ if (from_sys) { lrmd_event_data_t *op = NULL; - const char *from_host = crm_element_value(stored_msg, - F_CRM_HOST_FROM); + const char *from_host = crm_element_value(stored_msg, PCMK__XA_SRC); const char *transition; if (strcmp(from_sys, CRM_SYSTEM_TENGINE)) { transition = crm_element_value(msg_data, - XML_ATTR_TRANSITION_KEY); + PCMK__XA_TRANSITION_KEY); } else { transition = crm_element_value(stored_msg, - XML_ATTR_TRANSITION_KEY); + PCMK__XA_TRANSITION_KEY); } crm_info("Notifying %s on %s that %s was%s deleted", @@ -735,10 +752,10 @@ handle_lrm_delete(xmlNode *stored_msg) ((rc == pcmk_rc_ok)? "" : " not")); op = lrmd_new_event(rsc_id, PCMK_ACTION_DELETE, 0); op->type = lrmd_event_exec_complete; - op->user_data = strdup(transition? transition : FAKE_TE_ID); + op->user_data = pcmk__str_copy(pcmk__s(transition, FAKE_TE_ID)); op->params = pcmk__strkey_table(free, free); - g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), - strdup(CRM_FEATURE_SET)); + pcmk__insert_dup(op->params, PCMK_XA_CRM_FEATURE_SET, + CRM_FEATURE_SET); controld_rc2event(op, rc); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); @@ -759,7 +776,7 @@ static enum crmd_fsa_input handle_remote_state(const xmlNode *msg) { const char *conn_host = NULL; - const char *remote_uname = ID(msg); + const char *remote_uname = pcmk__xe_id(msg); crm_node_t *remote_peer; bool remote_is_up = false; int rc = pcmk_rc_ok; @@ -768,14 +785,14 @@ handle_remote_state(const xmlNode *msg) CRM_CHECK(remote_uname && rc == pcmk_rc_ok, return I_NULL); - remote_peer = crm_remote_peer_get(remote_uname); + remote_peer = pcmk__cluster_lookup_remote_node(remote_uname); CRM_CHECK(remote_peer, return I_NULL); pcmk__update_peer_state(__func__, remote_peer, remote_is_up ? CRM_NODE_MEMBER : CRM_NODE_LOST, 0); - conn_host = crm_element_value(msg, PCMK__XA_CONN_HOST); + conn_host = crm_element_value(msg, PCMK__XA_CONNECTION_HOST); if (conn_host) { pcmk__str_update(&remote_peer->conn_host, conn_host); } else if (remote_peer->conn_host) { @@ -802,18 +819,18 @@ handle_ping(const xmlNode *msg) // Build reply - ping = create_xml_node(NULL, XML_CRM_TAG_PING); - value = crm_element_value(msg, F_CRM_SYS_TO); - crm_xml_add(ping, XML_PING_ATTR_SYSFROM, value); + ping = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE); + value = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); + crm_xml_add(ping, PCMK__XA_CRM_SUBSYSTEM, value); // Add controller state value = fsa_state2string(controld_globals.fsa_state); - crm_xml_add(ping, XML_PING_ATTR_CRMDSTATE, value); + crm_xml_add(ping, PCMK__XA_CRMD_STATE, value); crm_notice("Current ping state: %s", value); // CTS needs this // Add controller health // @TODO maybe do some checks to determine meaningful status - crm_xml_add(ping, XML_PING_ATTR_STATUS, "ok"); + crm_xml_add(ping, PCMK_XA_RESULT, "ok"); // Send reply reply = create_reply(msg, ping); @@ -843,13 +860,13 @@ handle_node_list(const xmlNode *request) xmlNode *reply_data = NULL; // Create message data for reply - reply_data = create_xml_node(NULL, XML_CIB_TAG_NODES); + reply_data = pcmk__xe_create(NULL, PCMK_XE_NODES); g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { - xmlNode *xml = create_xml_node(reply_data, XML_CIB_TAG_NODE); + xmlNode *xml = pcmk__xe_create(reply_data, PCMK_XE_NODE); - crm_xml_add_ll(xml, XML_ATTR_ID, (long long) node->id); // uint32_t - crm_xml_add(xml, XML_ATTR_UNAME, node->uname); + crm_xml_add_ll(xml, PCMK_XA_ID, (long long) node->id); // uint32_t + crm_xml_add(xml, PCMK_XA_UNAME, node->uname); crm_xml_add(xml, PCMK__XA_IN_CCM, node->state); } @@ -883,32 +900,32 @@ handle_node_info_request(const xmlNode *msg) // Build reply - reply_data = create_xml_node(NULL, XML_CIB_TAG_NODE); - crm_xml_add(reply_data, XML_PING_ATTR_SYSFROM, CRM_SYSTEM_CRMD); + reply_data = pcmk__xe_create(NULL, PCMK_XE_NODE); + crm_xml_add(reply_data, PCMK__XA_CRM_SUBSYSTEM, CRM_SYSTEM_CRMD); // Add whether current partition has quorum - pcmk__xe_set_bool_attr(reply_data, XML_ATTR_HAVE_QUORUM, + pcmk__xe_set_bool_attr(reply_data, PCMK_XA_HAVE_QUORUM, pcmk_is_set(controld_globals.flags, controld_has_quorum)); // Check whether client requested node info by ID and/or name - crm_element_value_int(msg, XML_ATTR_ID, &node_id); + crm_element_value_int(msg, PCMK_XA_ID, &node_id); if (node_id < 0) { node_id = 0; } - value = crm_element_value(msg, XML_ATTR_UNAME); + value = crm_element_value(msg, PCMK_XA_UNAME); // Default to local node if none given if ((node_id == 0) && (value == NULL)) { value = controld_globals.our_nodename; } - node = pcmk__search_node_caches(node_id, value, CRM_GET_PEER_ANY); + node = pcmk__search_node_caches(node_id, value, pcmk__node_search_any); if (node) { - crm_xml_add(reply_data, XML_ATTR_ID, node->uuid); - crm_xml_add(reply_data, XML_ATTR_UNAME, node->uname); - crm_xml_add(reply_data, PCMK__XA_CRMD, node->state); - pcmk__xe_set_bool_attr(reply_data, XML_NODE_IS_REMOTE, + crm_xml_add(reply_data, PCMK_XA_ID, node->uuid); + crm_xml_add(reply_data, PCMK_XA_UNAME, node->uname); + crm_xml_add(reply_data, PCMK_XA_CRMD, node->state); + pcmk__xe_set_bool_attr(reply_data, PCMK_XA_REMOTE_NODE, pcmk_is_set(node->flags, crm_remote_node)); } @@ -927,7 +944,7 @@ handle_node_info_request(const xmlNode *msg) static void verify_feature_set(xmlNode *msg) { - const char *dc_version = crm_element_value(msg, XML_ATTR_CRM_VERSION); + const char *dc_version = crm_element_value(msg, PCMK_XA_CRM_FEATURE_SET); if (dc_version == NULL) { /* All we really know is that the DC feature set is older than 3.1.0, @@ -953,7 +970,7 @@ verify_feature_set(xmlNode *msg) static enum crmd_fsa_input handle_shutdown_self_ack(xmlNode *stored_msg) { - const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); + const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC); if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { // The expected case -- we initiated own shutdown sequence @@ -986,7 +1003,7 @@ handle_shutdown_self_ack(xmlNode *stored_msg) static enum crmd_fsa_input handle_shutdown_ack(xmlNode *stored_msg) { - const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); + const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC); if (host_from == NULL) { crm_warn("Ignoring shutdown request without origin specified"); @@ -1016,19 +1033,20 @@ static enum crmd_fsa_input handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) { xmlNode *msg = NULL; - const char *op = crm_element_value(stored_msg, F_CRM_TASK); + const char *op = crm_element_value(stored_msg, PCMK__XA_CRM_TASK); /* Optimize this for the DC - it has the most to do */ crm_log_xml_trace(stored_msg, "request"); if (op == NULL) { - crm_warn("Ignoring request without " F_CRM_TASK); + crm_warn("Ignoring request without " PCMK__XA_CRM_TASK); return I_NULL; } if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) { - const char *from = crm_element_value(stored_msg, F_CRM_HOST_FROM); - crm_node_t *node = pcmk__search_cluster_node_cache(0, from, NULL); + const char *from = crm_element_value(stored_msg, PCMK__XA_SRC); + crm_node_t *node = + pcmk__search_node_caches(0, from, pcmk__node_search_cluster_member); pcmk__update_peer_expected(__func__, node, CRMD_JOINSTATE_DOWN); if(AM_I_DC == FALSE) { @@ -1099,11 +1117,13 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) } else if (strcmp(op, CRM_OP_JOIN_OFFER) == 0) { verify_feature_set(stored_msg); - crm_debug("Raising I_JOIN_OFFER: join-%s", crm_element_value(stored_msg, F_CRM_JOIN_ID)); + crm_debug("Raising I_JOIN_OFFER: join-%s", + crm_element_value(stored_msg, PCMK__XA_JOIN_ID)); return I_JOIN_OFFER; } else if (strcmp(op, CRM_OP_JOIN_ACKNAK) == 0) { - crm_debug("Raising I_JOIN_RESULT: join-%s", crm_element_value(stored_msg, F_CRM_JOIN_ID)); + crm_debug("Raising I_JOIN_RESULT: join-%s", + crm_element_value(stored_msg, PCMK__XA_JOIN_ID)); return I_JOIN_RESULT; } else if (strcmp(op, CRM_OP_LRM_DELETE) == 0) { @@ -1113,18 +1133,12 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) || (strcmp(op, CRM_OP_LRM_REFRESH) == 0) // @COMPAT || (strcmp(op, CRM_OP_REPROBE) == 0)) { - crm_xml_add(stored_msg, F_CRM_SYS_TO, CRM_SYSTEM_LRMD); + crm_xml_add(stored_msg, PCMK__XA_CRM_SYS_TO, CRM_SYSTEM_LRMD); return I_ROUTER; } else if (strcmp(op, CRM_OP_NOOP) == 0) { return I_NULL; - } else if (strcmp(op, CRM_OP_LOCAL_SHUTDOWN) == 0) { - - crm_shutdown(SIGTERM); - /*return I_SHUTDOWN; */ - return I_NULL; - } else if (strcmp(op, CRM_OP_PING) == 0) { return handle_ping(stored_msg); @@ -1135,12 +1149,12 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) int id = 0; const char *name = NULL; - crm_element_value_int(stored_msg, XML_ATTR_ID, &id); - name = crm_element_value(stored_msg, XML_ATTR_UNAME); + crm_element_value_int(stored_msg, PCMK_XA_ID, &id); + name = crm_element_value(stored_msg, PCMK_XA_UNAME); if(cause == C_IPC_MESSAGE) { msg = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); - if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) { + if (!pcmk__cluster_send_message(NULL, crm_msg_crmd, msg)) { crm_err("Could not instruct peers to remove references to node %s/%u", name, id); } else { crm_notice("Instructing peers to remove references to node %s/%u", name, id); @@ -1148,7 +1162,7 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) free_xml(msg); } else { - reap_crm_member(id, name); + pcmk__cluster_forget_cluster_node(id, name); /* If we're forgetting this node, also forget any failures to fence * it, so we don't carry that over to any node added later with the @@ -1158,7 +1172,9 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) } } else if (strcmp(op, CRM_OP_MAINTENANCE_NODES) == 0) { - xmlNode *xml = get_message_xml(stored_msg, F_CRM_DATA); + xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, + NULL, NULL); + xmlNode *xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); remote_ra_process_maintenance_nodes(xml); @@ -1183,15 +1199,15 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) static void handle_response(xmlNode *stored_msg) { - const char *op = crm_element_value(stored_msg, F_CRM_TASK); + const char *op = crm_element_value(stored_msg, PCMK__XA_CRM_TASK); crm_log_xml_trace(stored_msg, "reply"); if (op == NULL) { - crm_warn("Ignoring reply without " F_CRM_TASK); + crm_warn("Ignoring reply without " PCMK__XA_CRM_TASK); } else if (AM_I_DC && strcmp(op, CRM_OP_PECALC) == 0) { // Check whether scheduler answer been superseded by subsequent request - const char *msg_ref = crm_element_value(stored_msg, XML_ATTR_REFERENCE); + const char *msg_ref = crm_element_value(stored_msg, PCMK_XA_REFERENCE); if (msg_ref == NULL) { crm_err("%s - Ignoring calculation with no reference", op); @@ -1212,7 +1228,7 @@ handle_response(xmlNode *stored_msg) || strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0 || strcmp(op, CRM_OP_SHUTDOWN) == 0) { } else { - const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); + const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC); crm_err("Unexpected response (op=%s, src=%s) sent to the %s", op, host_from, AM_I_DC ? "DC" : "controller"); @@ -1230,7 +1246,7 @@ handle_shutdown_request(xmlNode * stored_msg) */ char *now_s = NULL; - const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM); + const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC); if (host_from == NULL) { /* we're shutting down and the DC */ @@ -1242,7 +1258,7 @@ handle_shutdown_request(xmlNode * stored_msg) crm_log_xml_trace(stored_msg, "message"); now_s = pcmk__ttoa(time(NULL)); - update_attrd(host_from, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, FALSE); + update_attrd(host_from, PCMK__NODE_ATTR_SHUTDOWN, now_s, NULL, FALSE); free(now_s); /* will be picked up by the TE as long as its running */ @@ -1258,8 +1274,8 @@ send_msg_via_ipc(xmlNode * msg, const char *sys) client_channel = pcmk__find_client_by_id(sys); - if (crm_element_value(msg, F_CRM_HOST_FROM) == NULL) { - crm_xml_add(msg, F_CRM_HOST_FROM, controld_globals.our_nodename); + if (crm_element_value(msg, PCMK__XA_SRC) == NULL) { + crm_xml_add(msg, PCMK__XA_SRC, controld_globals.our_nodename); } if (client_channel != NULL) { @@ -1267,16 +1283,21 @@ send_msg_via_ipc(xmlNode * msg, const char *sys) pcmk__ipc_send_xml(client_channel, 0, msg, crm_ipc_server_event); } else if (pcmk__str_eq(sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { - xmlNode *data = get_message_xml(msg, F_CRM_DATA); + xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, + NULL); + xmlNode *data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); process_te_message(msg, data); } else if (pcmk__str_eq(sys, CRM_SYSTEM_LRMD, pcmk__str_none)) { fsa_data_t fsa_data; ha_msg_input_t fsa_input; + xmlNode *wrapper = NULL; fsa_input.msg = msg; - fsa_input.xml = get_message_xml(msg, F_CRM_DATA); + + wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL); + fsa_input.xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); fsa_data.id = 0; fsa_data.actions = 0; @@ -1323,14 +1344,15 @@ broadcast_remote_state_message(const char *node_name, bool node_up) crm_info("Notifying cluster of Pacemaker Remote node %s %s", node_name, node_up? "coming up" : "going down"); - crm_xml_add(msg, XML_ATTR_ID, node_name); + crm_xml_add(msg, PCMK_XA_ID, node_name); pcmk__xe_set_bool_attr(msg, PCMK__XA_IN_CCM, node_up); if (node_up) { - crm_xml_add(msg, PCMK__XA_CONN_HOST, controld_globals.our_nodename); + crm_xml_add(msg, PCMK__XA_CONNECTION_HOST, + controld_globals.our_nodename); } - send_cluster_message(NULL, crm_msg_crmd, msg, TRUE); + pcmk__cluster_send_message(NULL, crm_msg_crmd, msg); free_xml(msg); } diff --git a/daemons/controld/controld_metadata.c b/daemons/controld/controld_metadata.c index c813ceb..37df38c 100644 --- a/daemons/controld/controld_metadata.c +++ b/daemons/controld/controld_metadata.c @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the Pacemaker project contributors + * Copyright 2017-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -70,25 +70,18 @@ metadata_cache_reset(GHashTable *mdc) static struct ra_param_s * ra_param_from_xml(xmlNode *param_xml) { - const char *param_name = crm_element_value(param_xml, "name"); + const char *param_name = crm_element_value(param_xml, PCMK_XA_NAME); struct ra_param_s *p; - p = calloc(1, sizeof(struct ra_param_s)); - if (p == NULL) { - return NULL; - } + p = pcmk__assert_alloc(1, sizeof(struct ra_param_s)); - p->rap_name = strdup(param_name); - if (p->rap_name == NULL) { - free(p); - return NULL; - } + p->rap_name = pcmk__str_copy(param_name); - if (pcmk__xe_attr_is_true(param_xml, "reloadable")) { + if (pcmk__xe_attr_is_true(param_xml, PCMK_XA_RELOADABLE)) { controld_set_ra_param_flags(p, ra_param_reloadable); } - if (pcmk__xe_attr_is_true(param_xml, "unique")) { + if (pcmk__xe_attr_is_true(param_xml, PCMK_XA_UNIQUE)) { controld_set_ra_param_flags(p, ra_param_unique); } @@ -139,21 +132,19 @@ controld_cache_metadata(GHashTable *mdc, const lrmd_rsc_info_t *rsc, goto err; } - metadata = string2xml(metadata_str); + metadata = pcmk__xml_parse(metadata_str); if (!metadata) { reason = "Metadata is not valid XML"; goto err; } - md = calloc(1, sizeof(struct ra_metadata_s)); - if (md == NULL) { - reason = "Could not allocate memory"; - goto err; - } + md = pcmk__assert_alloc(1, sizeof(struct ra_metadata_s)); if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_OCF) == 0) { xmlChar *content = NULL; - xmlNode *version_element = first_named_child(metadata, "version"); + xmlNode *version_element = pcmk__xe_first_child(metadata, + PCMK_XE_VERSION, NULL, + NULL); if (version_element != NULL) { content = xmlNodeGetContent(version_element); @@ -166,11 +157,11 @@ controld_cache_metadata(GHashTable *mdc, const lrmd_rsc_info_t *rsc, } // Check supported actions - match = first_named_child(metadata, "actions"); - for (match = first_named_child(match, "action"); match != NULL; - match = crm_next_same_xml(match)) { + match = pcmk__xe_first_child(metadata, PCMK_XE_ACTIONS, NULL, NULL); + for (match = pcmk__xe_first_child(match, PCMK_XE_ACTION, NULL, NULL); + match != NULL; match = pcmk__xe_next_same(match)) { - const char *action_name = crm_element_value(match, "name"); + const char *action_name = crm_element_value(match, PCMK_XA_NAME); if (pcmk__str_eq(action_name, PCMK_ACTION_RELOAD_AGENT, pcmk__str_none)) { @@ -188,15 +179,15 @@ controld_cache_metadata(GHashTable *mdc, const lrmd_rsc_info_t *rsc, } // Build a parameter list - match = first_named_child(metadata, "parameters"); - for (match = first_named_child(match, "parameter"); match != NULL; - match = crm_next_same_xml(match)) { + match = pcmk__xe_first_child(metadata, PCMK_XE_PARAMETERS, NULL, NULL); + for (match = pcmk__xe_first_child(match, PCMK_XE_PARAMETER, NULL, NULL); + match != NULL; match = pcmk__xe_next_same(match)) { - const char *param_name = crm_element_value(match, "name"); + const char *param_name = crm_element_value(match, PCMK_XA_NAME); if (param_name == NULL) { - crm_warn("Metadata for %s:%s:%s has parameter without a name", - rsc->standard, rsc->provider, rsc->type); + crm_warn("Metadata for %s:%s:%s has parameter without a " + PCMK_XA_NAME, rsc->standard, rsc->provider, rsc->type); } else { struct ra_param_s *p = ra_param_from_xml(match); diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c index d692ef6..4bbf80c 100644 --- a/daemons/controld/controld_remote_ra.c +++ b/daemons/controld/controld_remote_ra.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2023 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include @@ -206,7 +206,8 @@ should_purge_attributes(crm_node_t *node) /* Get the node that was hosting the remote connection resource from the * peer cache. That's the one we really care about here. */ - conn_node = crm_get_peer(0, node->conn_host); + conn_node = pcmk__get_node(0, node->conn_host, NULL, + pcmk__node_search_cluster_member); if (conn_node == NULL) { return purge; } @@ -296,7 +297,7 @@ remote_node_up(const char *node_name) update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE); /* Ensure node is in the remote peer cache with member status */ - node = crm_remote_peer_get(node_name); + node = pcmk__cluster_lookup_remote_node(node_name); CRM_CHECK(node != NULL, return); purge_remote_node_attrs(call_opt, node); @@ -324,24 +325,24 @@ remote_node_up(const char *node_name) */ broadcast_remote_state_message(node_name, true); - update = create_xml_node(NULL, XML_CIB_TAG_STATUS); + update = pcmk__xe_create(NULL, PCMK_XE_STATUS); state = create_node_state_update(node, node_update_cluster, update, __func__); - /* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever + /* Clear the PCMK__XA_NODE_FENCED flag in the node state. If the node ever * needs to be fenced, this flag will allow various actions to determine * whether the fencing has happened yet. */ - crm_xml_add(state, XML_NODE_IS_FENCED, "0"); + crm_xml_add(state, PCMK__XA_NODE_FENCED, "0"); /* TODO: If the remote connection drops, and this (async) CIB update either * failed or has not yet completed, later actions could mistakenly think the - * node has already been fenced (if the XML_NODE_IS_FENCED attribute was + * node has already been fenced (if the PCMK__XA_NODE_FENCED attribute was * previously set, because it won't have been cleared). This could prevent * actual fencing or allow recurring monitor failures to be cleared too * soon. Ideally, we wouldn't rely on the CIB for the fenced status. */ - controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL); + controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL); free_xml(update); } @@ -379,7 +380,7 @@ remote_node_down(const char *node_name, const enum down_opts opts) } /* Ensure node is in the remote peer cache with lost state */ - node = crm_remote_peer_get(node_name); + node = pcmk__cluster_lookup_remote_node(node_name); CRM_CHECK(node != NULL, return); pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0); @@ -387,9 +388,9 @@ remote_node_down(const char *node_name, const enum down_opts opts) broadcast_remote_state_message(node_name, false); /* Update CIB node state */ - update = create_xml_node(NULL, XML_CIB_TAG_STATUS); + update = pcmk__xe_create(NULL, PCMK_XE_STATUS); create_node_state_update(node, node_update_cluster, update, __func__); - controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL); + controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL); free_xml(update); } @@ -419,7 +420,7 @@ check_remote_node_state(const remote_ra_cmd_t *cmd) * it hasn't been tracking the remote node, and other code relies on * the cache to distinguish remote nodes from unseen cluster nodes. */ - crm_node_t *node = crm_remote_peer_get(cmd->rsc_id); + crm_node_t *node = pcmk__cluster_lookup_remote_node(cmd->rsc_id); CRM_CHECK(node != NULL, return); pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0); @@ -437,7 +438,7 @@ check_remote_node_state(const remote_ra_cmd_t *cmd) * so if the connection migrated elsewhere and we aren't DC, * un-cache the node, so we don't have stale info */ - crm_remote_peer_cache_remove(cmd->rsc_id); + pcmk__cluster_forget_remote_node(cmd->rsc_id); } } } @@ -493,7 +494,7 @@ report_remote_ra_result(remote_ra_cmd_t * cmd) op.params = pcmk__strkey_table(free, free); for (tmp = cmd->params; tmp; tmp = tmp->next) { - g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value)); + pcmk__insert_dup(op.params, tmp->key, tmp->value); } } @@ -861,12 +862,17 @@ handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeo int rc = pcmk_rc_ok; for (tmp = cmd->params; tmp; tmp = tmp->next) { - if (pcmk__strcase_any_of(tmp->key, XML_RSC_ATTR_REMOTE_RA_ADDR, - XML_RSC_ATTR_REMOTE_RA_SERVER, NULL)) { + if (pcmk__strcase_any_of(tmp->key, + PCMK_REMOTE_RA_ADDR, PCMK_REMOTE_RA_SERVER, + NULL)) { server = tmp->value; - } else if (pcmk__str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_PORT, pcmk__str_casei)) { + + } else if (pcmk__str_eq(tmp->key, PCMK_REMOTE_RA_PORT, + pcmk__str_none)) { port = atoi(tmp->value); - } else if (pcmk__str_eq(tmp->key, CRM_META "_" XML_RSC_ATTR_CONTAINER, pcmk__str_casei)) { + + } else if (pcmk__str_eq(tmp->key, CRM_META "_" PCMK__META_CONTAINER, + pcmk__str_none)) { lrm_remote_set_flags(lrm_state, controlling_guest); } } @@ -967,9 +973,9 @@ handle_remote_ra_exec(gpointer user_data) } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, NULL)) { - /* Currently the only reloadable parameter is reconnect_interval, - * which is only used by the scheduler via the CIB, so reloads are a - * no-op. + /* Currently the only reloadable parameter is + * PCMK_REMOTE_RA_RECONNECT_INTERVAL, which is only used by the + * scheduler via the CIB, so reloads are a no-op. * * @COMPAT DC <2.1.0: We only need to check for "reload" in case * we're in a rolling upgrade with a DC scheduling "reload" instead @@ -995,7 +1001,7 @@ remote_ra_data_init(lrm_state_t * lrm_state) return; } - ra_data = calloc(1, sizeof(remote_ra_data_t)); + ra_data = pcmk__assert_alloc(1, sizeof(remote_ra_data_t)); ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state); lrm_state->remote_ra_data = ra_data; } @@ -1041,12 +1047,12 @@ remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id) lrmd_rsc_info_t *info = NULL; if ((lrm_state_find(rsc_id))) { - info = calloc(1, sizeof(lrmd_rsc_info_t)); + info = pcmk__assert_alloc(1, sizeof(lrmd_rsc_info_t)); - info->id = strdup(rsc_id); - info->type = strdup(REMOTE_LRMD_RA); - info->standard = strdup(PCMK_RESOURCE_CLASS_OCF); - info->provider = strdup("pacemaker"); + info->id = pcmk__str_copy(rsc_id); + info->type = pcmk__str_copy(REMOTE_LRMD_RA); + info->standard = pcmk__str_copy(PCMK_RESOURCE_CLASS_OCF); + info->provider = pcmk__str_copy("pacemaker"); } return info; @@ -1202,7 +1208,7 @@ handle_dup: /* update the userdata */ if (userdata) { free(cmd->userdata); - cmd->userdata = strdup(userdata); + cmd->userdata = pcmk__str_copy(userdata); } /* if we've already reported success, generate a new call id */ @@ -1280,23 +1286,12 @@ controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id, return pcmk_rc_ok; } - cmd = calloc(1, sizeof(remote_ra_cmd_t)); - if (cmd == NULL) { - lrmd_key_value_freeall(params); - return ENOMEM; - } - - cmd->owner = strdup(lrm_state->node_name); - cmd->rsc_id = strdup(rsc_id); - cmd->action = strdup(action); - cmd->userdata = strdup(userdata); - if ((cmd->owner == NULL) || (cmd->rsc_id == NULL) || (cmd->action == NULL) - || (cmd->userdata == NULL)) { - free_cmd(cmd); - lrmd_key_value_freeall(params); - return ENOMEM; - } + cmd = pcmk__assert_alloc(1, sizeof(remote_ra_cmd_t)); + cmd->owner = pcmk__str_copy(lrm_state->node_name); + cmd->rsc_id = pcmk__str_copy(rsc_id); + cmd->action = pcmk__str_copy(action); + cmd->userdata = pcmk__str_copy(userdata); cmd->interval_ms = interval_ms; cmd->timeout = timeout_ms; cmd->start_delay = start_delay_ms; @@ -1347,9 +1342,8 @@ remote_ra_fail(const char *node_name) * * */ -#define XPATH_PSEUDO_FENCE "/" XML_GRAPH_TAG_PSEUDO_EVENT \ - "[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \ - "/" XML_CIB_TAG_NODE +#define XPATH_PSEUDO_FENCE "/" PCMK__XE_PSEUDO_EVENT \ + "[@" PCMK_XA_OPERATION "='stonith']/" PCMK__XE_DOWNED "/" PCMK_XE_NODE /*! * \internal @@ -1380,7 +1374,7 @@ remote_ra_process_pseudo(xmlNode *xml) * recovered. */ if (result) { - const char *remote = ID(result); + const char *remote = pcmk__xe_id(result); if (remote) { remote_node_down(remote, DOWN_ERASE_LRM); @@ -1398,13 +1392,13 @@ remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance) crm_node_t *node; call_opt = crmd_cib_smart_opt(); - node = crm_remote_peer_get(lrm_state->node_name); + node = pcmk__cluster_lookup_remote_node(lrm_state->node_name); CRM_CHECK(node != NULL, return); - update = create_xml_node(NULL, XML_CIB_TAG_STATUS); + update = pcmk__xe_create(NULL, PCMK_XE_STATUS); state = create_node_state_update(node, node_update_none, update, __func__); - crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0"); - if (controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, + crm_xml_add(state, PCMK__XA_NODE_IN_MAINTENANCE, (maintenance? "1" : "0")); + if (controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL) == pcmk_rc_ok) { /* TODO: still not 100% sure that async update will succeed ... */ if (maintenance) { @@ -1416,9 +1410,9 @@ remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance) free_xml(update); } -#define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \ - "[@" XML_LRM_ATTR_TASK "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \ - XML_GRAPH_TAG_MAINTENANCE +#define XPATH_PSEUDO_MAINTENANCE "//" PCMK__XE_PSEUDO_EVENT \ + "[@" PCMK_XA_OPERATION "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \ + PCMK__XE_MAINTENANCE /*! * \internal @@ -1435,25 +1429,29 @@ remote_ra_process_maintenance_nodes(xmlNode *xml) xmlNode *node; int cnt = 0, cnt_remote = 0; - for (node = first_named_child(getXpathResult(search, 0), - XML_CIB_TAG_NODE); - node != NULL; node = crm_next_same_xml(node)) { + for (node = pcmk__xe_first_child(getXpathResult(search, 0), + PCMK_XE_NODE, NULL, NULL); + node != NULL; node = pcmk__xe_next_same(node)) { - lrm_state_t *lrm_state = lrm_state_find(ID(node)); + lrm_state_t *lrm_state = lrm_state_find(pcmk__xe_id(node)); cnt++; if (lrm_state && lrm_state->remote_ra_data && pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) { - int is_maint; + + const char *in_maint_s = NULL; + int in_maint; cnt_remote++; - pcmk__scan_min_int(crm_element_value(node, XML_NODE_IS_MAINTENANCE), - &is_maint, 0); - remote_ra_maintenance(lrm_state, is_maint); + in_maint_s = crm_element_value(node, + PCMK__XA_NODE_IN_MAINTENANCE); + pcmk__scan_min_int(in_maint_s, &in_maint, 0); + remote_ra_maintenance(lrm_state, in_maint); } } - crm_trace("Action holds %d nodes (%d remotes found) " - "adjusting maintenance-mode", cnt, cnt_remote); + crm_trace("Action holds %d nodes (%d remotes found) adjusting " + PCMK_OPT_MAINTENANCE_MODE, + cnt, cnt_remote); } freeXpathObject(search); } diff --git a/daemons/controld/controld_schedulerd.c b/daemons/controld/controld_schedulerd.c index 8aca83f..3081304 100644 --- a/daemons/controld/controld_schedulerd.c +++ b/daemons/controld/controld_schedulerd.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -65,7 +64,7 @@ save_cib_contents(xmlNode *msg, int call_id, int rc, xmlNode *output, if (rc == pcmk_ok) { char *filename = crm_strdup_printf(PE_STATE_DIR "/pe-core-%s.bz2", id); - if (write_xml_file(output, filename, TRUE) < 0) { + if (pcmk__xml_write_file(output, filename, true, NULL) != pcmk_rc_ok) { crm_err("Could not save Cluster Information Base to %s after scheduler crash", filename); } else { @@ -144,12 +143,13 @@ handle_reply(pcmk_schedulerd_api_reply_t *reply) * * The name of the top level element here is irrelevant. Nothing checks it. */ - fsa_input.msg = create_xml_node(NULL, "dummy-reply"); - crm_xml_add(fsa_input.msg, XML_ATTR_REFERENCE, msg_ref); - crm_xml_add(fsa_input.msg, F_CRM_TGRAPH_INPUT, reply->data.graph.input); + fsa_input.msg = pcmk__xe_create(NULL, "dummy-reply"); + crm_xml_add(fsa_input.msg, PCMK_XA_REFERENCE, msg_ref); + crm_xml_add(fsa_input.msg, PCMK__XA_CRM_TGRAPH_IN, + reply->data.graph.input); - crm_data_node = create_xml_node(fsa_input.msg, F_CRM_DATA); - add_node_copy(crm_data_node, reply->data.graph.tgraph); + crm_data_node = pcmk__xe_create(fsa_input.msg, PCMK__XE_CRM_XML); + pcmk__xml_copy(crm_data_node, reply->data.graph.tgraph); register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input); free_xml(fsa_input.msg); @@ -378,14 +378,14 @@ force_local_option(xmlNode *xml, const char *attr_name, const char *attr_value) char *xpath_string = NULL; xmlXPathObjectPtr xpathObj = NULL; - xpath_base = pcmk_cib_xpath_for(XML_CIB_TAG_CRMCONFIG); + xpath_base = pcmk_cib_xpath_for(PCMK_XE_CRM_CONFIG); if (xpath_base == NULL) { - crm_err(XML_CIB_TAG_CRMCONFIG " CIB element not known (bug?)"); + crm_err(PCMK_XE_CRM_CONFIG " CIB element not known (bug?)"); return; } xpath_string = crm_strdup_printf("%s//%s//nvpair[@name='%s']", - xpath_base, XML_CIB_TAG_PROPSET, + xpath_base, PCMK_XE_CLUSTER_PROPERTY_SET, attr_name); xpathObj = xpath_search(xml, xpath_string); max = numXpathResults(xpathObj); @@ -393,8 +393,9 @@ force_local_option(xmlNode *xml, const char *attr_name, const char *attr_value) for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); - crm_trace("Forcing %s/%s = %s", ID(match), attr_name, attr_value); - crm_xml_add(match, XML_NVPAIR_ATTR_VALUE, attr_value); + crm_trace("Forcing %s/%s = %s", + pcmk__xe_id(match), attr_name, attr_value); + crm_xml_add(match, PCMK_XA_VALUE, attr_value); } if(max == 0) { @@ -403,32 +404,37 @@ force_local_option(xmlNode *xml, const char *attr_name, const char *attr_value) xmlNode *cluster_property_set = NULL; crm_trace("Creating %s-%s for %s=%s", - CIB_OPTIONS_FIRST, attr_name, attr_name, attr_value); + PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, attr_name, attr_name, + attr_value); - configuration = pcmk__xe_match(xml, XML_CIB_TAG_CONFIGURATION, NULL, - NULL); + configuration = pcmk__xe_first_child(xml, PCMK_XE_CONFIGURATION, NULL, + NULL); if (configuration == NULL) { - configuration = create_xml_node(xml, XML_CIB_TAG_CONFIGURATION); + configuration = pcmk__xe_create(xml, PCMK_XE_CONFIGURATION); } - crm_config = pcmk__xe_match(configuration, XML_CIB_TAG_CRMCONFIG, NULL, - NULL); + crm_config = pcmk__xe_first_child(configuration, PCMK_XE_CRM_CONFIG, + NULL, NULL); if (crm_config == NULL) { - crm_config = create_xml_node(configuration, XML_CIB_TAG_CRMCONFIG); + crm_config = pcmk__xe_create(configuration, PCMK_XE_CRM_CONFIG); } - cluster_property_set = pcmk__xe_match(crm_config, XML_CIB_TAG_PROPSET, - NULL, NULL); + cluster_property_set = + pcmk__xe_first_child(crm_config, PCMK_XE_CLUSTER_PROPERTY_SET, NULL, + NULL); if (cluster_property_set == NULL) { - cluster_property_set = create_xml_node(crm_config, XML_CIB_TAG_PROPSET); - crm_xml_add(cluster_property_set, XML_ATTR_ID, CIB_OPTIONS_FIRST); + cluster_property_set = + pcmk__xe_create(crm_config, PCMK_XE_CLUSTER_PROPERTY_SET); + crm_xml_add(cluster_property_set, PCMK_XA_ID, + PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS); } - xml = create_xml_node(cluster_property_set, XML_CIB_TAG_NVPAIR); + xml = pcmk__xe_create(cluster_property_set, PCMK_XE_NVPAIR); - crm_xml_set_id(xml, "%s-%s", CIB_OPTIONS_FIRST, attr_name); - crm_xml_add(xml, XML_NVPAIR_ATTR_NAME, attr_name); - crm_xml_add(xml, XML_NVPAIR_ATTR_VALUE, attr_value); + crm_xml_set_id(xml, "%s-%s", + PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, attr_name); + crm_xml_add(xml, PCMK_XA_NAME, attr_name); + crm_xml_add(xml, PCMK_XA_VALUE, attr_value); } freeXpathObject(xpathObj); } @@ -476,16 +482,16 @@ do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void * scheduler is invoked */ pcmk__refresh_node_caches_from_cib(output); - crm_xml_add(output, XML_ATTR_DC_UUID, controld_globals.our_uuid); - pcmk__xe_set_bool_attr(output, XML_ATTR_HAVE_QUORUM, + crm_xml_add(output, PCMK_XA_DC_UUID, controld_globals.our_uuid); + pcmk__xe_set_bool_attr(output, PCMK_XA_HAVE_QUORUM, pcmk_is_set(controld_globals.flags, controld_has_quorum)); - force_local_option(output, XML_ATTR_HAVE_WATCHDOG, pcmk__btoa(watchdog)); + force_local_option(output, PCMK_OPT_HAVE_WATCHDOG, pcmk__btoa(watchdog)); if (pcmk_is_set(controld_globals.flags, controld_ever_had_quorum) && !crm_have_quorum) { - crm_xml_add_int(output, XML_ATTR_QUORUM_PANIC, 1); + crm_xml_add_int(output, PCMK_XA_NO_QUORUM_PANIC, 1); } rc = pcmk_rc2legacy(pcmk_schedulerd_api_graph(schedulerd_api, output, &ref)); @@ -498,8 +504,8 @@ do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void CRM_ASSERT(ref != NULL); controld_expect_sched_reply(ref); crm_debug("Invoking the scheduler: query=%d, ref=%s, seq=%llu, " - "quorate=%s", fsa_pe_query, controld_globals.fsa_pe_ref, - crm_peer_seq, pcmk__btoa(pcmk_is_set(controld_globals.flags, - controld_has_quorum))); + "quorate=%s", + fsa_pe_query, controld_globals.fsa_pe_ref, crm_peer_seq, + pcmk__flag_text(controld_globals.flags, controld_has_quorum)); } } diff --git a/daemons/controld/controld_te_actions.c b/daemons/controld/controld_te_actions.c index fe6b744..a4c99fc 100644 --- a/daemons/controld/controld_te_actions.c +++ b/daemons/controld/controld_te_actions.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -13,7 +13,6 @@ #include #include #include // lrmd_event_data_t, lrmd_free_event() -#include #include #include @@ -44,7 +43,7 @@ te_start_action_timer(const pcmk__graph_t *graph, pcmk__graph_action_t *action) static int execute_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *pseudo) { - const char *task = crm_element_value(pseudo->xml, XML_LRM_ATTR_TASK); + const char *task = crm_element_value(pseudo->xml, PCMK_XA_OPERATION); /* send to peers as well? */ if (pcmk__str_eq(task, PCMK_ACTION_MAINTENANCE_NODES, pcmk__str_casei)) { @@ -62,7 +61,7 @@ execute_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *pseudo) cmd = create_request(task, pseudo->xml, node->uname, CRM_SYSTEM_CRMD, CRM_SYSTEM_TENGINE, NULL); - send_cluster_message(node, crm_msg_crmd, cmd, FALSE); + pcmk__cluster_send_message(node, crm_msg_crmd, cmd); free_xml(cmd); } @@ -73,7 +72,7 @@ execute_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *pseudo) } crm_debug("Pseudo-action %d (%s) fired and confirmed", pseudo->id, - crm_element_value(pseudo->xml, XML_LRM_ATTR_TASK_KEY)); + crm_element_value(pseudo->xml, PCMK__XA_OPERATION_KEY)); te_action_confirmed(pseudo, graph); return pcmk_rc_ok; } @@ -83,7 +82,7 @@ get_target_rc(pcmk__graph_action_t *action) { int exit_status; - pcmk__scan_min_int(crm_meta_value(action->params, XML_ATTR_TE_TARGET_RC), + pcmk__scan_min_int(crm_meta_value(action->params, PCMK__META_OP_TARGET_RC), &exit_status, 0); return exit_status; } @@ -113,22 +112,24 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) gboolean rc = TRUE; gboolean no_wait = FALSE; - id = ID(action->xml); + const crm_node_t *node = NULL; + + id = pcmk__xe_id(action->xml); CRM_CHECK(!pcmk__str_empty(id), return EPROTO); - task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); + task = crm_element_value(action->xml, PCMK_XA_OPERATION); CRM_CHECK(!pcmk__str_empty(task), return EPROTO); - on_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); + on_node = crm_element_value(action->xml, PCMK__META_ON_NODE); CRM_CHECK(!pcmk__str_empty(on_node), return pcmk_rc_node_unknown); - router_node = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); + router_node = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if (router_node == NULL) { router_node = on_node; if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { const char *mode = crm_element_value(action->xml, PCMK__XA_MODE); - if (pcmk__str_eq(mode, XML_TAG_CIB, pcmk__str_none)) { + if (pcmk__str_eq(mode, PCMK__VALUE_CIB, pcmk__str_none)) { router_node = controld_globals.our_nodename; } } @@ -139,7 +140,7 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) is_local = TRUE; } - value = crm_meta_value(action->params, XML_ATTR_TE_NOWAIT); + value = crm_meta_value(action->params, PCMK__META_OP_NO_WAIT); if (crm_is_true(value)) { no_wait = TRUE; } @@ -158,7 +159,8 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) return pcmk_rc_ok; } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) { - crm_node_t *peer = crm_get_peer(0, router_node); + crm_node_t *peer = pcmk__get_node(0, router_node, NULL, + pcmk__node_search_cluster_member); pcmk__update_peer_expected(__func__, peer, CRMD_JOINSTATE_DOWN); } @@ -168,9 +170,11 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) counter = pcmk__transition_key(controld_globals.transition_graph->id, action->id, get_target_rc(action), controld_globals.te_uuid); - crm_xml_add(cmd, XML_ATTR_TRANSITION_KEY, counter); + crm_xml_add(cmd, PCMK__XA_TRANSITION_KEY, counter); - rc = send_cluster_message(crm_get_peer(0, router_node), crm_msg_crmd, cmd, TRUE); + node = pcmk__get_node(0, router_node, NULL, + pcmk__node_search_cluster_member); + rc = pcmk__cluster_send_message(node, crm_msg_crmd, cmd); free(counter); free_xml(cmd); @@ -213,16 +217,17 @@ static lrmd_event_data_t * synthesize_timeout_event(const pcmk__graph_action_t *action, int target_rc) { lrmd_event_data_t *op = NULL; - const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); + const char *target = crm_element_value(action->xml, PCMK__META_ON_NODE); const char *reason = NULL; char *dynamic_reason = NULL; - if (pcmk__str_eq(target, get_local_node_name(), pcmk__str_casei)) { + if (pcmk__str_eq(target, pcmk__cluster_local_node_name(), + pcmk__str_casei)) { reason = "Local executor did not return result in time"; } else { const char *router_node = NULL; - router_node = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); + router_node = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if (router_node == NULL) { router_node = target; } @@ -254,18 +259,21 @@ controld_record_action_event(pcmk__graph_action_t *action, int rc = pcmk_ok; const char *rsc_id = NULL; - const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); - const char *task_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); - const char *target_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); + const char *target = crm_element_value(action->xml, PCMK__META_ON_NODE); + const char *task_uuid = crm_element_value(action->xml, + PCMK__XA_OPERATION_KEY); + const char *target_uuid = crm_element_value(action->xml, + PCMK__META_ON_NODE_UUID); int target_rc = get_target_rc(action); - action_rsc = find_xml_node(action->xml, XML_CIB_TAG_RESOURCE, TRUE); + action_rsc = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL, + NULL); if (action_rsc == NULL) { return; } - rsc_id = ID(action_rsc); + rsc_id = pcmk__xe_id(action_rsc); CRM_CHECK(rsc_id != NULL, crm_log_xml_err(action->xml, "Bad:action"); return); @@ -278,27 +286,27 @@ controld_record_action_event(pcmk__graph_action_t *action, */ - state = create_xml_node(NULL, XML_CIB_TAG_STATE); + state = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE); - crm_xml_add(state, XML_ATTR_ID, target_uuid); - crm_xml_add(state, XML_ATTR_UNAME, target); + crm_xml_add(state, PCMK_XA_ID, target_uuid); + crm_xml_add(state, PCMK_XA_UNAME, target); - rsc = create_xml_node(state, XML_CIB_TAG_LRM); - crm_xml_add(rsc, XML_ATTR_ID, target_uuid); + rsc = pcmk__xe_create(state, PCMK__XE_LRM); + crm_xml_add(rsc, PCMK_XA_ID, target_uuid); - rsc = create_xml_node(rsc, XML_LRM_TAG_RESOURCES); - rsc = create_xml_node(rsc, XML_LRM_TAG_RESOURCE); - crm_xml_add(rsc, XML_ATTR_ID, rsc_id); + rsc = pcmk__xe_create(rsc, PCMK__XE_LRM_RESOURCES); + rsc = pcmk__xe_create(rsc, PCMK__XE_LRM_RESOURCE); + crm_xml_add(rsc, PCMK_XA_ID, rsc_id); - crm_copy_xml_element(action_rsc, rsc, XML_ATTR_TYPE); - crm_copy_xml_element(action_rsc, rsc, XML_AGENT_ATTR_CLASS); - crm_copy_xml_element(action_rsc, rsc, XML_AGENT_ATTR_PROVIDER); + crm_copy_xml_element(action_rsc, rsc, PCMK_XA_TYPE); + crm_copy_xml_element(action_rsc, rsc, PCMK_XA_CLASS); + crm_copy_xml_element(action_rsc, rsc, PCMK_XA_PROVIDER); pcmk__create_history_xml(rsc, op, CRM_FEATURE_SET, target_rc, target, __func__); - rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_STATUS, state, + rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_STATUS, state, cib_scope_local); fsa_register_cib_callback(rc, NULL, cib_action_updated); free_xml(state); @@ -313,8 +321,9 @@ controld_record_action_timeout(pcmk__graph_action_t *action) { lrmd_event_data_t *op = NULL; - const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); - const char *task_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); + const char *target = crm_element_value(action->xml, PCMK__META_ON_NODE); + const char *task_uuid = crm_element_value(action->xml, + PCMK__XA_OPERATION_KEY); int target_rc = get_target_rc(action); @@ -362,17 +371,17 @@ execute_rsc_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) CRM_ASSERT(action->xml != NULL); pcmk__clear_graph_action_flags(action, pcmk__graph_action_executed); - on_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); + on_node = crm_element_value(action->xml, PCMK__META_ON_NODE); CRM_CHECK(!pcmk__str_empty(on_node), crm_err("Corrupted command(id=%s) %s: no node", - ID(action->xml), pcmk__s(task, "without task")); + pcmk__xe_id(action->xml), pcmk__s(task, "without task")); return pcmk_rc_node_unknown); rsc_op = action->xml; - task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); - task_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); - router_node = crm_element_value(rsc_op, XML_LRM_ATTR_ROUTER_NODE); + task = crm_element_value(rsc_op, PCMK_XA_OPERATION); + task_uuid = crm_element_value(action->xml, PCMK__XA_OPERATION_KEY); + router_node = crm_element_value(rsc_op, PCMK__XA_ROUTER_NODE); if (!router_node) { router_node = on_node; @@ -381,14 +390,14 @@ execute_rsc_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) counter = pcmk__transition_key(controld_globals.transition_graph->id, action->id, get_target_rc(action), controld_globals.te_uuid); - crm_xml_add(rsc_op, XML_ATTR_TRANSITION_KEY, counter); + crm_xml_add(rsc_op, PCMK__XA_TRANSITION_KEY, counter); if (pcmk__str_eq(router_node, controld_globals.our_nodename, pcmk__str_casei)) { is_local = TRUE; } - value = crm_meta_value(action->params, XML_ATTR_TE_NOWAIT); + value = crm_meta_value(action->params, PCMK__META_OP_NO_WAIT); if (crm_is_true(value)) { no_wait = TRUE; } @@ -421,7 +430,11 @@ execute_rsc_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) I_NULL, &msg); } else { - rc = send_cluster_message(crm_get_peer(0, router_node), crm_msg_lrmd, cmd, TRUE); + const crm_node_t *node = + pcmk__get_node(0, router_node, NULL, + pcmk__node_search_cluster_member); + + rc = pcmk__cluster_send_message(node, crm_msg_lrmd, cmd); } free(counter); @@ -500,8 +513,8 @@ te_update_job_count_on(const char *target, int offset, bool migrate) r = g_hash_table_lookup(te_targets, target); if(r == NULL) { - r = calloc(1, sizeof(struct te_peer_s)); - r->name = strdup(target); + r = pcmk__assert_alloc(1, sizeof(struct te_peer_s)); + r->name = pcmk__str_copy(target); g_hash_table_insert(te_targets, r->name, r); } @@ -515,8 +528,8 @@ te_update_job_count_on(const char *target, int offset, bool migrate) static void te_update_job_count(pcmk__graph_action_t *action, int offset) { - const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); - const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); + const char *task = crm_element_value(action->xml, PCMK_XA_OPERATION); + const char *target = crm_element_value(action->xml, PCMK__META_ON_NODE); if ((action->type != pcmk__rsc_graph_action) || (target == NULL)) { /* No limit on these */ @@ -527,19 +540,22 @@ te_update_job_count(pcmk__graph_action_t *action, int offset) * on a remote node. For now, we count all actions occurring on a * remote node against the job list on the cluster node hosting * the connection resources */ - target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); + target = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if ((target == NULL) && pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, NULL)) { - const char *t1 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE); - const char *t2 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_TARGET); + + const char *t1 = crm_meta_value(action->params, + PCMK__META_MIGRATE_SOURCE); + const char *t2 = crm_meta_value(action->params, + PCMK__META_MIGRATE_TARGET); te_update_job_count_on(t1, offset, TRUE); te_update_job_count_on(t2, offset, TRUE); return; } else if (target == NULL) { - target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); + target = crm_element_value(action->xml, PCMK__META_ON_NODE); } te_update_job_count_on(target, offset, FALSE); @@ -561,8 +577,8 @@ allowed_on_node(const pcmk__graph_t *graph, const pcmk__graph_action_t *action, { int limit = 0; struct te_peer_s *r = NULL; - const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); - const char *id = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); + const char *task = crm_element_value(action->xml, PCMK_XA_OPERATION); + const char *id = crm_element_value(action->xml, PCMK__XA_OPERATION_KEY); if(target == NULL) { /* No limit on these */ @@ -576,8 +592,8 @@ allowed_on_node(const pcmk__graph_t *graph, const pcmk__graph_action_t *action, limit = throttle_get_job_limit(target); if(r == NULL) { - r = calloc(1, sizeof(struct te_peer_s)); - r->name = strdup(target); + r = pcmk__assert_alloc(1, sizeof(struct te_peer_s)); + r->name = pcmk__str_copy(target); g_hash_table_insert(te_targets, r->name, r); } @@ -613,7 +629,7 @@ static bool graph_action_allowed(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *target = NULL; - const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); + const char *task = crm_element_value(action->xml, PCMK_XA_OPERATION); if (action->type != pcmk__rsc_graph_action) { /* No limit on these */ @@ -624,20 +640,20 @@ graph_action_allowed(pcmk__graph_t *graph, pcmk__graph_action_t *action) * on a remote node. For now, we count all actions occurring on a * remote node against the job list on the cluster node hosting * the connection resources */ - target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); + target = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if ((target == NULL) && pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, NULL)) { - target = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE); + target = crm_meta_value(action->params, PCMK__META_MIGRATE_SOURCE); if (!allowed_on_node(graph, action, target)) { return false; } - target = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_TARGET); + target = crm_meta_value(action->params, PCMK__META_MIGRATE_TARGET); } else if (target == NULL) { - target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); + target = crm_element_value(action->xml, PCMK__META_ON_NODE); } return allowed_on_node(graph, action, target); @@ -654,7 +670,7 @@ te_action_confirmed(pcmk__graph_action_t *action, pcmk__graph_t *graph) { if (!pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { if ((action->type == pcmk__rsc_graph_action) - && (crm_element_value(action->xml, XML_LRM_ATTR_TARGET) != NULL)) { + && (crm_element_value(action->xml, PCMK__META_ON_NODE) != NULL)) { te_update_job_count(action, -1); } pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); diff --git a/daemons/controld/controld_te_callbacks.c b/daemons/controld/controld_te_callbacks.c index c26e757..901d44d 100644 --- a/daemons/controld/controld_te_callbacks.c +++ b/daemons/controld/controld_te_callbacks.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,23 +14,21 @@ #include #include #include -#include -#include /* For ONLINESTATUS etc */ #include void te_update_confirm(const char *event, xmlNode * msg); -#define RSC_OP_PREFIX "//" XML_TAG_DIFF_ADDED "//" XML_TAG_CIB \ - "//" XML_LRM_TAG_RSC_OP "[@" XML_ATTR_ID "='" +#define RSC_OP_PREFIX "//" PCMK__XE_DIFF_ADDED "//" PCMK_XE_CIB \ + "//" PCMK__XE_LRM_RSC_OP "[@" PCMK_XA_ID "='" -// An explicit shutdown-lock of 0 means the lock has been cleared +// An explicit PCMK_OPT_SHUTDOWN_LOCK of 0 means the lock has been cleared static bool shutdown_lock_cleared(xmlNode *lrm_resource) { time_t shutdown_lock = 0; - return (crm_element_value_epoch(lrm_resource, XML_CONFIG_ATTR_SHUTDOWN_LOCK, + return (crm_element_value_epoch(lrm_resource, PCMK_OPT_SHUTDOWN_LOCK, &shutdown_lock) == pcmk_ok) && (shutdown_lock == 0); } @@ -49,19 +47,21 @@ te_update_diff_v1(const char *event, xmlNode *diff) "xml-patchset", diff); if (cib__config_changed_v1(NULL, NULL, &diff)) { - abort_transition(INFINITY, pcmk__graph_restart, "Non-status change", - diff); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Non-status change", diff); goto bail; /* configuration changed */ } /* Tickets Attributes - Added/Updated */ xpathObj = xpath_search(diff, - "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_TICKETS); + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_ADDED + "//" PCMK_XE_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Ticket attribute: update", aborted); goto bail; @@ -71,11 +71,13 @@ te_update_diff_v1(const char *event, xmlNode *diff) /* Tickets Attributes - Removed */ xpathObj = xpath_search(diff, - "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_TICKETS); + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_REMOVED + "//" PCMK_XE_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Ticket attribute: removal", aborted); goto bail; } @@ -84,23 +86,24 @@ te_update_diff_v1(const char *event, xmlNode *diff) /* Transient Attributes - Removed */ xpathObj = xpath_search(diff, - "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" - XML_TAG_TRANSIENT_NODEATTRS); + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_REMOVED + "//" PCMK__XE_TRANSIENT_ATTRIBUTES); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Transient attribute: removal", aborted); goto bail; } freeXpathObject(xpathObj); - // Check for lrm_resource entries + // Check for PCMK__XE_LRM_RESOURCE entries xpathObj = xpath_search(diff, - "//" F_CIB_UPDATE_RESULT - "//" XML_TAG_DIFF_ADDED - "//" XML_LRM_TAG_RESOURCE); + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_ADDED + "//" PCMK__XE_LRM_RESOURCE); max = numXpathResults(xpathObj); /* @@ -117,8 +120,8 @@ te_update_diff_v1(const char *event, xmlNode *diff) crm_debug("Ignoring resource operation updates due to history refresh of %d resources", max); crm_log_xml_trace(diff, "lrm-refresh"); - abort_transition(INFINITY, pcmk__graph_restart, "History refresh", - NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "History refresh", NULL); goto bail; } @@ -127,7 +130,7 @@ te_update_diff_v1(const char *event, xmlNode *diff) if (shutdown_lock_cleared(lrm_resource)) { // @TODO would be more efficient to abort once after transition done - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Shutdown lock cleared", lrm_resource); // Still process results, so we stop timers and update failcounts } @@ -137,7 +140,9 @@ te_update_diff_v1(const char *event, xmlNode *diff) /* Process operation updates */ xpathObj = xpath_search(diff, - "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RSC_OP); + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_ADDED + "//" PCMK__XE_LRM_RSC_OP); max = numXpathResults(xpathObj); if (max > 0) { int lpc = 0; @@ -152,7 +157,9 @@ te_update_diff_v1(const char *event, xmlNode *diff) freeXpathObject(xpathObj); /* Detect deleted (as opposed to replaced or added) actions - eg. crm_resource -C */ - xpathObj = xpath_search(diff, "//" XML_TAG_DIFF_REMOVED "//" XML_LRM_TAG_RSC_OP); + xpathObj = xpath_search(diff, + "//" PCMK__XE_DIFF_REMOVED + "//" PCMK__XE_LRM_RSC_OP); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { const char *op_id = NULL; @@ -162,7 +169,7 @@ te_update_diff_v1(const char *event, xmlNode *diff) CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; - op_id = ID(match); + op_id = pcmk__xe_id(match); if (rsc_op_xpath == NULL) { rsc_op_xpath = g_string_new(RSC_OP_PREFIX); @@ -180,13 +187,14 @@ te_update_diff_v1(const char *event, xmlNode *diff) if (cancelled == NULL) { crm_debug("No match for deleted action %s (%s on %s)", (const char *) rsc_op_xpath->str, op_id, node); - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Resource op removal", match); freeXpathObject(op_match); goto bail; } else { - crm_debug("Deleted lrm_rsc_op %s on %s was for graph event %d", + crm_debug("Deleted " PCMK__XE_LRM_RSC_OP " %s on %s was for " + "graph event %d", op_id, node, cancelled->id); } } @@ -204,14 +212,14 @@ te_update_diff_v1(const char *event, xmlNode *diff) static void process_lrm_resource_diff(xmlNode *lrm_resource, const char *node) { - for (xmlNode *rsc_op = pcmk__xml_first_child(lrm_resource); rsc_op != NULL; - rsc_op = pcmk__xml_next(rsc_op)) { + for (xmlNode *rsc_op = pcmk__xe_first_child(lrm_resource, NULL, NULL, NULL); + rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) { process_graph_event(rsc_op, node); } if (shutdown_lock_cleared(lrm_resource)) { // @TODO would be more efficient to abort once after transition done - abort_transition(INFINITY, pcmk__graph_restart, "Shutdown lock cleared", - lrm_resource); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Shutdown lock cleared", lrm_resource); } } @@ -225,12 +233,12 @@ process_resource_updates(const char *node, xmlNode *xml, xmlNode *change, return; } - if (pcmk__xe_is(xml, XML_CIB_TAG_LRM)) { - xml = first_named_child(xml, XML_LRM_TAG_RESOURCES); + if (pcmk__xe_is(xml, PCMK__XE_LRM)) { + xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL); CRM_CHECK(xml != NULL, return); } - CRM_CHECK(pcmk__xe_is(xml, XML_LRM_TAG_RESOURCES), return); + CRM_CHECK(pcmk__xe_is(xml, PCMK__XE_LRM_RESOURCES), return); /* * Updates by, or in response to, TE actions will never contain updates @@ -248,31 +256,31 @@ process_resource_updates(const char *node, xmlNode *xml, xmlNode *change, && (xml->children != NULL) && (xml->children->next != NULL)) { crm_log_xml_trace(change, "lrm-refresh"); - abort_transition(INFINITY, pcmk__graph_restart, "History refresh", - NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "History refresh", NULL); return; } - for (rsc = pcmk__xml_first_child(xml); rsc != NULL; - rsc = pcmk__xml_next(rsc)) { - crm_trace("Processing %s", ID(rsc)); + for (rsc = pcmk__xe_first_child(xml, NULL, NULL, NULL); rsc != NULL; + rsc = pcmk__xe_next(rsc)) { + crm_trace("Processing %s", pcmk__xe_id(rsc)); process_lrm_resource_diff(rsc, node); } } static char *extract_node_uuid(const char *xpath) { - char *mutable_path = strdup(xpath); + char *mutable_path = pcmk__str_copy(xpath); char *node_uuid = NULL; char *search = NULL; char *match = NULL; - match = strstr(mutable_path, "node_state[@" XML_ATTR_ID "=\'"); + match = strstr(mutable_path, PCMK__XE_NODE_STATE "[@" PCMK_XA_ID "=\'"); if (match == NULL) { free(mutable_path); return NULL; } - match += strlen("node_state[@" XML_ATTR_ID "=\'"); + match += strlen(PCMK__XE_NODE_STATE "[@" PCMK_XA_ID "=\'"); search = strchr(match, '\''); if (search == NULL) { @@ -281,7 +289,7 @@ static char *extract_node_uuid(const char *xpath) } search[0] = 0; - node_uuid = strdup(match); + node_uuid = pcmk__str_copy(match); free(mutable_path); return node_uuid; } @@ -293,22 +301,25 @@ abort_unless_down(const char *xpath, const char *op, xmlNode *change, char *node_uuid = NULL; pcmk__graph_action_t *down = NULL; - if(!pcmk__str_eq(op, "delete", pcmk__str_casei)) { - abort_transition(INFINITY, pcmk__graph_restart, reason, change); + if (!pcmk__str_eq(op, PCMK_VALUE_DELETE, pcmk__str_none)) { + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, reason, + change); return; } node_uuid = extract_node_uuid(xpath); if(node_uuid == NULL) { crm_err("Could not extract node ID from %s", xpath); - abort_transition(INFINITY, pcmk__graph_restart, reason, change); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, reason, + change); return; } down = match_down_event(node_uuid); if (down == NULL) { crm_trace("Not expecting %s to be down (%s)", node_uuid, xpath); - abort_transition(INFINITY, pcmk__graph_restart, reason, change); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, reason, + change); } else { crm_trace("Expecting changes to %s (%s)", node_uuid, xpath); } @@ -318,7 +329,7 @@ abort_unless_down(const char *xpath, const char *op, xmlNode *change, static void process_op_deletion(const char *xpath, xmlNode *change) { - char *mutable_key = strdup(xpath); + char *mutable_key = pcmk__str_copy(xpath); char *key; char *node_uuid; @@ -338,7 +349,7 @@ process_op_deletion(const char *xpath, xmlNode *change) node_uuid = extract_node_uuid(xpath); if (confirm_cancel_action(key, node_uuid) == FALSE) { - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Resource operation removal", change); } free(mutable_key); @@ -348,13 +359,13 @@ process_op_deletion(const char *xpath, xmlNode *change) static void process_delete_diff(const char *xpath, const char *op, xmlNode *change) { - if (strstr(xpath, "/" XML_LRM_TAG_RSC_OP "[")) { + if (strstr(xpath, "/" PCMK__XE_LRM_RSC_OP "[")) { process_op_deletion(xpath, change); - } else if (strstr(xpath, "/" XML_CIB_TAG_LRM "[")) { + } else if (strstr(xpath, "/" PCMK__XE_LRM "[")) { abort_unless_down(xpath, op, change, "Resource state removal"); - } else if (strstr(xpath, "/" XML_CIB_TAG_STATE "[")) { + } else if (strstr(xpath, "/" PCMK__XE_NODE_STATE "[")) { abort_unless_down(xpath, op, change, "Node state removal"); } else { @@ -366,17 +377,18 @@ static void process_node_state_diff(xmlNode *state, xmlNode *change, const char *op, const char *xpath) { - xmlNode *lrm = first_named_child(state, XML_CIB_TAG_LRM); + xmlNode *lrm = pcmk__xe_first_child(state, PCMK__XE_LRM, NULL, NULL); - process_resource_updates(ID(state), lrm, change, op, xpath); + process_resource_updates(pcmk__xe_id(state), lrm, change, op, xpath); } static void process_status_diff(xmlNode *status, xmlNode *change, const char *op, const char *xpath) { - for (xmlNode *state = pcmk__xml_first_child(status); state != NULL; - state = pcmk__xml_next(state)) { + for (xmlNode *state = pcmk__xe_first_child(status, NULL, NULL, NULL); + state != NULL; state = pcmk__xe_next(state)) { + process_node_state_diff(state, change, op, xpath); } } @@ -385,144 +397,152 @@ static void process_cib_diff(xmlNode *cib, xmlNode *change, const char *op, const char *xpath) { - xmlNode *status = first_named_child(cib, XML_CIB_TAG_STATUS); - xmlNode *config = first_named_child(cib, XML_CIB_TAG_CONFIGURATION); + xmlNode *status = pcmk__xe_first_child(cib, PCMK_XE_STATUS, NULL, NULL); + xmlNode *config = pcmk__xe_first_child(cib, PCMK_XE_CONFIGURATION, NULL, + NULL); if (status) { process_status_diff(status, change, op, xpath); } if (config) { - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Non-status-only change", change); } } -static void -te_update_diff_v2(xmlNode *diff) +static int +te_update_diff_element_v2(xmlNode *change, void *userdata) { - crm_log_xml_trace(diff, "Patch:Raw"); - - for (xmlNode *change = pcmk__xml_first_child(diff); change != NULL; - change = pcmk__xml_next(change)) { - - xmlNode *match = NULL; - const char *name = NULL; - const char *xpath = crm_element_value(change, XML_DIFF_PATH); - - // Possible ops: create, modify, delete, move - const char *op = crm_element_value(change, XML_DIFF_OP); - - // Ignore uninteresting updates - if (op == NULL) { - continue; - - } else if (xpath == NULL) { - crm_trace("Ignoring %s change for version field", op); - continue; - - } else if ((strcmp(op, "move") == 0) - && (strstr(xpath, - "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION - "/" XML_CIB_TAG_RESOURCES) == NULL)) { - /* We still need to consider moves within the resources section, - * since they affect placement order. - */ - crm_trace("Ignoring move change at %s", xpath); - continue; + xmlNode *match = NULL; + const char *name = NULL; + const char *xpath = crm_element_value(change, PCMK_XA_PATH); + + // Possible ops: create, modify, delete, move + const char *op = crm_element_value(change, PCMK_XA_OPERATION); + + // Ignore uninteresting updates + if (op == NULL) { + return pcmk_rc_ok; + + } else if (xpath == NULL) { + crm_trace("Ignoring %s change for version field", op); + return pcmk_rc_ok; + + } else if ((strcmp(op, PCMK_VALUE_MOVE) == 0) + && (strstr(xpath, + "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION + "/" PCMK_XE_RESOURCES) == NULL)) { + /* We still need to consider moves within the resources section, + * since they affect placement order. + */ + crm_trace("Ignoring move change at %s", xpath); + return pcmk_rc_ok; + } + + // Find the result of create/modify ops + if (strcmp(op, PCMK_VALUE_CREATE) == 0) { + match = change->children; + + } else if (strcmp(op, PCMK_VALUE_MODIFY) == 0) { + match = pcmk__xe_first_child(change, PCMK_XE_CHANGE_RESULT, NULL, NULL); + if(match) { + match = match->children; } - // Find the result of create/modify ops - if (strcmp(op, "create") == 0) { - match = change->children; - - } else if (strcmp(op, "modify") == 0) { - match = first_named_child(change, XML_DIFF_RESULT); - if(match) { - match = match->children; - } - - } else if (!pcmk__str_any_of(op, "delete", "move", NULL)) { - crm_warn("Ignoring malformed CIB update (%s operation on %s is unrecognized)", - op, xpath); - continue; - } + } else if (!pcmk__str_any_of(op, + PCMK_VALUE_DELETE, PCMK_VALUE_MOVE, + NULL)) { + crm_warn("Ignoring malformed CIB update (%s operation on %s is unrecognized)", + op, xpath); + return pcmk_rc_ok; + } - if (match) { - if (match->type == XML_COMMENT_NODE) { - crm_trace("Ignoring %s operation for comment at %s", op, xpath); - continue; - } - name = (const char *)match->name; + if (match) { + if (match->type == XML_COMMENT_NODE) { + crm_trace("Ignoring %s operation for comment at %s", op, xpath); + return pcmk_rc_ok; } + name = (const char *)match->name; + } - crm_trace("Handling %s operation for %s%s%s", - op, (xpath? xpath : "CIB"), - (name? " matched by " : ""), (name? name : "")); + crm_trace("Handling %s operation for %s%s%s", + op, (xpath? xpath : "CIB"), + (name? " matched by " : ""), (name? name : "")); - if (strstr(xpath, "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION)) { - abort_transition(INFINITY, pcmk__graph_restart, - "Configuration change", change); - break; // Won't be packaged with operation results we may be waiting for + if (strstr(xpath, "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION)) { + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Configuration change", change); + return pcmk_rc_cib_modified; // Won't be packaged with operation results we may be waiting for - } else if (strstr(xpath, "/" XML_CIB_TAG_TICKETS) - || pcmk__str_eq(name, XML_CIB_TAG_TICKETS, pcmk__str_none)) { - abort_transition(INFINITY, pcmk__graph_restart, - "Ticket attribute change", change); - break; // Won't be packaged with operation results we may be waiting for + } else if (strstr(xpath, "/" PCMK_XE_TICKETS) + || pcmk__str_eq(name, PCMK_XE_TICKETS, pcmk__str_none)) { + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Ticket attribute change", change); + return pcmk_rc_cib_modified; // Won't be packaged with operation results we may be waiting for - } else if (strstr(xpath, "/" XML_TAG_TRANSIENT_NODEATTRS "[") - || pcmk__str_eq(name, XML_TAG_TRANSIENT_NODEATTRS, - pcmk__str_none)) { - abort_unless_down(xpath, op, change, "Transient attribute change"); - break; // Won't be packaged with operation results we may be waiting for + } else if (strstr(xpath, "/" PCMK__XE_TRANSIENT_ATTRIBUTES "[") + || pcmk__str_eq(name, PCMK__XE_TRANSIENT_ATTRIBUTES, + pcmk__str_none)) { + abort_unless_down(xpath, op, change, "Transient attribute change"); + return pcmk_rc_cib_modified; // Won't be packaged with operation results we may be waiting for - } else if (strcmp(op, "delete") == 0) { - process_delete_diff(xpath, op, change); + } else if (strcmp(op, PCMK_VALUE_DELETE) == 0) { + process_delete_diff(xpath, op, change); - } else if (name == NULL) { - crm_warn("Ignoring malformed CIB update (%s at %s has no result)", - op, xpath); + } else if (name == NULL) { + crm_warn("Ignoring malformed CIB update (%s at %s has no result)", + op, xpath); - } else if (strcmp(name, XML_TAG_CIB) == 0) { - process_cib_diff(match, change, op, xpath); + } else if (strcmp(name, PCMK_XE_CIB) == 0) { + process_cib_diff(match, change, op, xpath); - } else if (strcmp(name, XML_CIB_TAG_STATUS) == 0) { - process_status_diff(match, change, op, xpath); + } else if (strcmp(name, PCMK_XE_STATUS) == 0) { + process_status_diff(match, change, op, xpath); - } else if (strcmp(name, XML_CIB_TAG_STATE) == 0) { - process_node_state_diff(match, change, op, xpath); + } else if (strcmp(name, PCMK__XE_NODE_STATE) == 0) { + process_node_state_diff(match, change, op, xpath); - } else if (strcmp(name, XML_CIB_TAG_LRM) == 0) { - process_resource_updates(ID(match), match, change, op, xpath); + } else if (strcmp(name, PCMK__XE_LRM) == 0) { + process_resource_updates(pcmk__xe_id(match), match, change, op, + xpath); - } else if (strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { - char *local_node = pcmk__xpath_node_id(xpath, "lrm"); + } else if (strcmp(name, PCMK__XE_LRM_RESOURCES) == 0) { + char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM); - process_resource_updates(local_node, match, change, op, xpath); - free(local_node); + process_resource_updates(local_node, match, change, op, xpath); + free(local_node); - } else if (strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { - char *local_node = pcmk__xpath_node_id(xpath, "lrm"); + } else if (strcmp(name, PCMK__XE_LRM_RESOURCE) == 0) { + char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM); - process_lrm_resource_diff(match, local_node); - free(local_node); + process_lrm_resource_diff(match, local_node); + free(local_node); - } else if (strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { - char *local_node = pcmk__xpath_node_id(xpath, "lrm"); + } else if (strcmp(name, PCMK__XE_LRM_RSC_OP) == 0) { + char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM); - process_graph_event(match, local_node); - free(local_node); + process_graph_event(match, local_node); + free(local_node); - } else { - crm_warn("Ignoring malformed CIB update (%s at %s has unrecognized result %s)", - op, xpath, name); - } + } else { + crm_warn("Ignoring malformed CIB update (%s at %s has unrecognized result %s)", + op, xpath, name); } + + return pcmk_rc_ok; +} + +static void +te_update_diff_v2(xmlNode *diff) +{ + crm_log_xml_trace(diff, "Patch:Raw"); + pcmk__xe_foreach_child(diff, NULL, te_update_diff_element_v2, NULL); } void te_update_diff(const char *event, xmlNode * msg) { + xmlNode *wrapper = NULL; xmlNode *diff = NULL; const char *op = NULL; int rc = -EINVAL; @@ -531,7 +551,7 @@ te_update_diff(const char *event, xmlNode * msg) int p_del[] = { 0, 0, 0 }; CRM_CHECK(msg != NULL, return); - crm_element_value_int(msg, F_CIB_RC, &rc); + crm_element_value_int(msg, PCMK__XA_CIB_RC, &rc); if (controld_globals.transition_graph == NULL) { crm_trace("No graph"); @@ -550,8 +570,10 @@ te_update_diff(const char *event, xmlNode * msg) return; } - op = crm_element_value(msg, F_CIB_OPERATION); - diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); + op = crm_element_value(msg, PCMK__XA_CIB_OP); + + wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, NULL, NULL); + diff = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); xml_patch_versions(diff, p_add, p_del); crm_debug("Processing (%s) diff: %d.%d.%d -> %d.%d.%d (%s)", op, @@ -583,7 +605,7 @@ process_te_message(xmlNode * msg, xmlNode * xml_data) CRM_CHECK(msg != NULL, return); // Transition requests must specify transition engine as subsystem - value = crm_element_value(msg, F_CRM_SYS_TO); + value = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); if (pcmk__str_empty(value) || !pcmk__str_eq(value, CRM_SYSTEM_TENGINE, pcmk__str_none)) { crm_info("Received invalid transition request: subsystem '%s' not '" @@ -592,7 +614,7 @@ process_te_message(xmlNode * msg, xmlNode * xml_data) } // Only the lrm_invoke command is supported as a transition request - value = crm_element_value(msg, F_CRM_TASK); + value = crm_element_value(msg, PCMK__XA_CRM_TASK); if (!pcmk__str_eq(value, CRM_OP_INVOKE_LRM, pcmk__str_none)) { crm_info("Received invalid transition request: command '%s' not '" CRM_OP_INVOKE_LRM "'", pcmk__s(value, "")); @@ -600,7 +622,7 @@ process_te_message(xmlNode * msg, xmlNode * xml_data) } // Transition requests must be marked as coming from the executor - value = crm_element_value(msg, F_CRM_SYS_FROM); + value = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM); if (!pcmk__str_eq(value, CRM_SYSTEM_LRMD, pcmk__str_none)) { crm_info("Received invalid transition request: from '%s' not '" CRM_SYSTEM_LRMD "'", pcmk__s(value, "")); @@ -608,10 +630,10 @@ process_te_message(xmlNode * msg, xmlNode * xml_data) } crm_debug("Processing transition request with ref='%s' origin='%s'", - pcmk__s(crm_element_value(msg, F_CRM_REFERENCE), ""), - pcmk__s(crm_element_value(msg, F_ORIG), "")); + pcmk__s(crm_element_value(msg, PCMK_XA_REFERENCE), ""), + pcmk__s(crm_element_value(msg, PCMK__XA_SRC), "")); - xpathObj = xpath_search(xml_data, "//" XML_LRM_TAG_RSC_OP); + xpathObj = xpath_search(xml_data, "//" PCMK__XE_LRM_RSC_OP); nmatches = numXpathResults(xpathObj); if (nmatches == 0) { crm_err("Received transition request with no results (bug?)"); @@ -653,9 +675,9 @@ action_timer_callback(gpointer data) stop_te_timer(action); - task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); - on_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); - via_node = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); + task = crm_element_value(action->xml, PCMK_XA_OPERATION); + on_node = crm_element_value(action->xml, PCMK__META_ON_NODE); + via_node = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if (controld_globals.transition_graph->complete) { crm_notice("Node %s did not send %s result (via %s) within %dms " @@ -666,7 +688,7 @@ action_timer_callback(gpointer data) /* fail the action */ crm_err("Node %s did not send %s result (via %s) within %dms " - "(action timeout plus cluster-delay)", + "(action timeout plus " PCMK_OPT_CLUSTER_DELAY ")", (on_node? on_node : ""), (task? task : "unknown action"), (via_node? via_node : "controller"), (action->timeout @@ -676,7 +698,8 @@ action_timer_callback(gpointer data) pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); te_action_confirmed(action, controld_globals.transition_graph); - abort_transition(INFINITY, pcmk__graph_restart, "Action lost", NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Action lost", NULL); // Record timeout in the CIB if appropriate if ((action->type == pcmk__rsc_graph_action) diff --git a/daemons/controld/controld_te_events.c b/daemons/controld/controld_te_events.c index 28977c0..49c09f6 100644 --- a/daemons/controld/controld_te_events.c +++ b/daemons/controld/controld_te_events.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -12,12 +12,11 @@ #include #include #include -#include #include #include -#include +#include #include /*! @@ -109,17 +108,22 @@ fail_incompletable_actions(pcmk__graph_t *graph, const char *down_node) || pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { continue; } else if (action->type == pcmk__cluster_graph_action) { - const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); + const char *task = crm_element_value(action->xml, + PCMK_XA_OPERATION); if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) { continue; } } - target_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); - router = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); + target_uuid = crm_element_value(action->xml, + PCMK__META_ON_NODE_UUID); + router = crm_element_value(action->xml, PCMK__XA_ROUTER_NODE); if (router) { - crm_node_t *node = crm_get_peer(0, router); + const crm_node_t *node = + pcmk__get_node(0, router, NULL, + pcmk__node_search_cluster_member); + if (node) { router_uuid = node->uuid; } @@ -134,10 +138,15 @@ fail_incompletable_actions(pcmk__graph_t *graph, const char *down_node) if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) { crm_notice("Action %d (%s) was pending on %s (offline)", - action->id, crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY), down_node); + action->id, + crm_element_value(action->xml, + PCMK__XA_OPERATION_KEY), + down_node); } else { crm_info("Action %d (%s) is scheduled for %s (offline)", - action->id, crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY), down_node); + action->id, + crm_element_value(action->xml, PCMK__XA_OPERATION_KEY), + down_node); } } } @@ -145,8 +154,8 @@ fail_incompletable_actions(pcmk__graph_t *graph, const char *down_node) if (last_action != NULL) { crm_info("Node %s shutdown resulted in un-runnable actions", down_node); - abort_transition(INFINITY, pcmk__graph_restart, "Node failure", - last_action); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Node failure", last_action); return TRUE; } @@ -176,9 +185,9 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, char *rsc_id = NULL; const char *value = NULL; - const char *id = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); - const char *on_uname = crm_peer_uname(event_node_uuid); - const char *origin = crm_element_value(event, XML_ATTR_ORIGIN); + const char *id = crm_element_value(event, PCMK__XA_OPERATION_KEY); + const char *on_uname = pcmk__node_name_from_uuid(event_node_uuid); + const char *origin = crm_element_value(event, PCMK_XA_CRM_DEBUG_ORIGIN); // Nothing needs to be done for success or status refresh if (rc == target_rc) { @@ -192,7 +201,7 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, /* Sanity check */ CRM_CHECK(on_uname != NULL, return TRUE); CRM_CHECK(parse_op_key(id, &rsc_id, &task, &interval_ms), - crm_err("Couldn't parse: %s", ID(event)); goto bail); + crm_err("Couldn't parse: %s", pcmk__xe_id(event)); goto bail); /* Decide whether update is necessary and what value to use */ if ((interval_ms > 0) @@ -203,12 +212,12 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_none)) { do_update = TRUE; value = pcmk__s(controld_globals.transition_graph->failed_start_offset, - CRM_INFINITY_S); + PCMK_VALUE_INFINITY); } else if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_none)) { do_update = TRUE; value = pcmk__s(controld_globals.transition_graph->failed_stop_offset, - CRM_INFINITY_S); + PCMK_VALUE_INFINITY); } if (do_update) { @@ -224,7 +233,7 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, // Fail count will be either incremented or set to infinity if (!pcmk_str_is_infinity(value)) { - value = XML_NVPAIR_ATTR_VALUE "++"; + value = PCMK_XA_VALUE "++"; } if (g_hash_table_lookup(crm_remote_peer_cache, event_node_uuid)) { @@ -237,8 +246,7 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, /* Update the fail count, if we're not ignoring failures */ if (!ignore_failures) { - fail_pair = calloc(1, sizeof(pcmk__attrd_query_pair_t)); - CRM_ASSERT(fail_pair != NULL); + fail_pair = pcmk__assert_alloc(1, sizeof(pcmk__attrd_query_pair_t)); fail_name = pcmk__failcount_name(rsc_id, task, interval_ms); fail_pair->name = fail_name; @@ -251,8 +259,7 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc, /* Update the last failure time (even if we're ignoring failures, * so that failure can still be detected and shown, e.g. by crm_mon) */ - last_pair = calloc(1, sizeof(pcmk__attrd_query_pair_t)); - CRM_ASSERT(last_pair != NULL); + last_pair = pcmk__assert_alloc(1, sizeof(pcmk__attrd_query_pair_t)); last_name = pcmk__lastfailure_name(rsc_id, task, interval_ms); last_pair->name = last_name; @@ -313,18 +320,18 @@ get_cancel_action(const char *id, const char *node) const char *target = NULL; pcmk__graph_action_t *action = (pcmk__graph_action_t *) gIter2->data; - task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); + task = crm_element_value(action->xml, PCMK_XA_OPERATION); if (!pcmk__str_eq(PCMK_ACTION_CANCEL, task, pcmk__str_casei)) { continue; } - task = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); + task = crm_element_value(action->xml, PCMK__XA_OPERATION_KEY); if (!pcmk__str_eq(task, id, pcmk__str_casei)) { crm_trace("Wrong key %s for %s on %s", task, id, node); continue; } - target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); + target = crm_element_value(action->xml, PCMK__META_ON_NODE_UUID); if (node && !pcmk__str_eq(target, node, pcmk__str_casei)) { crm_trace("Wrong node %s for %s on %s", target, id, node); continue; @@ -348,8 +355,8 @@ confirm_cancel_action(const char *id, const char *node_id) if (cancel == NULL) { return FALSE; } - op_key = crm_element_value(cancel->xml, XML_LRM_ATTR_TASK_KEY); - node_name = crm_element_value(cancel->xml, XML_LRM_ATTR_TARGET); + op_key = crm_element_value(cancel->xml, PCMK__XA_OPERATION_KEY); + node_name = crm_element_value(cancel->xml, PCMK__META_ON_NODE); stop_te_timer(cancel); te_action_confirmed(cancel, controld_globals.transition_graph); @@ -360,8 +367,8 @@ confirm_cancel_action(const char *id, const char *node_id) } /* downed nodes are listed like: ... */ -#define XPATH_DOWNED "//" XML_GRAPH_TAG_DOWNED \ - "/" XML_CIB_TAG_NODE "[@" XML_ATTR_ID "='%s']" +#define XPATH_DOWNED "//" PCMK__XE_DOWNED \ + "/" PCMK_XE_NODE "[@" PCMK_XA_ID "='%s']" /*! * \brief Find a transition event that would have made a specified node down @@ -405,7 +412,8 @@ match_down_event(const char *target) if (match != NULL) { crm_debug("Shutdown action %d (%s) found for node %s", match->id, - crm_element_value(match->xml, XML_LRM_ATTR_TASK_KEY), target); + crm_element_value(match->xml, PCMK__XA_OPERATION_KEY), + target); } else { crm_debug("No reason to expect node %s to be down", target); } @@ -434,20 +442,20 @@ process_graph_event(xmlNode *event, const char *event_node) */ - magic = crm_element_value(event, XML_ATTR_TRANSITION_KEY); + magic = crm_element_value(event, PCMK__XA_TRANSITION_KEY); if (magic == NULL) { /* non-change */ return; } - crm_element_value_int(event, XML_LRM_ATTR_OPSTATUS, &status); + crm_element_value_int(event, PCMK__XA_OP_STATUS, &status); if (status == PCMK_EXEC_PENDING) { return; } - id = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); - crm_element_value_int(event, XML_LRM_ATTR_RC, &rc); - crm_element_value_int(event, XML_LRM_ATTR_CALLID, &callid); + id = crm_element_value(event, PCMK__XA_OPERATION_KEY); + crm_element_value_int(event, PCMK__XA_RC_CODE, &rc); + crm_element_value_int(event, PCMK__XA_CALL_ID, &callid); rc = pcmk__effective_rc(rc); @@ -456,7 +464,8 @@ process_graph_event(xmlNode *event, const char *event_node) // decode_transition_key() already logged the bad key crm_err("Can't process action %s result: Incompatible versions? " CRM_XS " call-id=%d", id, callid); - abort_transition(INFINITY, pcmk__graph_restart, "Bad event", event); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Bad event", event); return; } @@ -468,14 +477,15 @@ process_graph_event(xmlNode *event, const char *event_node) goto bail; } desc = "initiated outside of the cluster"; - abort_transition(INFINITY, pcmk__graph_restart, "Unexpected event", - event); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Unexpected event", event); } else if ((action_num < 0) || !pcmk__str_eq(update_te_uuid, controld_globals.te_uuid, pcmk__str_none)) { desc = "initiated by a different DC"; - abort_transition(INFINITY, pcmk__graph_restart, "Foreign event", event); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Foreign event", event); } else if ((controld_globals.transition_graph->id != transition_num) || controld_globals.transition_graph->complete) { @@ -496,16 +506,17 @@ process_graph_event(xmlNode *event, const char *event_node) } desc = "arrived after initial scheduling"; - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Change in recurring result", event); } else if (controld_globals.transition_graph->id != transition_num) { desc = "arrived really late"; - abort_transition(INFINITY, pcmk__graph_restart, "Old event", event); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Old event", event); } else { desc = "arrived late"; - abort_transition(INFINITY, pcmk__graph_restart, "Inactive graph", - event); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Inactive graph", event); } } else { @@ -515,8 +526,8 @@ process_graph_event(xmlNode *event, const char *event_node) if (action == NULL) { // Should never happen desc = "unknown"; - abort_transition(INFINITY, pcmk__graph_restart, "Unknown event", - event); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Unknown event", event); } else if (pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { /* Nothing further needs to be done if the action has already been @@ -533,7 +544,8 @@ process_graph_event(xmlNode *event, const char *event_node) * (This is the only case where desc == NULL.) */ - if (pcmk__str_eq(crm_meta_value(action->params, XML_OP_ATTR_ON_FAIL), "ignore", pcmk__str_casei)) { + if (pcmk__str_eq(crm_meta_value(action->params, PCMK_META_ON_FAIL), + PCMK_VALUE_IGNORE, pcmk__str_casei)) { ignore_failures = TRUE; } else if (rc != target_rc) { @@ -553,7 +565,7 @@ process_graph_event(xmlNode *event, const char *event_node) if (id == NULL) { id = "unknown action"; } - uname = crm_element_value(event, XML_LRM_ATTR_TARGET); + uname = crm_element_value(event, PCMK__META_ON_NODE); if (uname == NULL) { uname = "unknown node"; } diff --git a/daemons/controld/controld_te_utils.c b/daemons/controld/controld_te_utils.c index 5a9f029..3e71209 100644 --- a/daemons/controld/controld_te_utils.c +++ b/daemons/controld/controld_te_utils.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -9,7 +9,6 @@ #include #include -#include #include #include @@ -192,8 +191,8 @@ node_pending_timer_popped(gpointer key) return FALSE; } - crm_warn("Node with id '%s' pending timed out (%us) on joining the process " - "group", + crm_warn("Node with " PCMK_XA_ID " '%s' pending timed out (%us) " + "on joining the process group", (const char *) key, controld_globals.node_pending_timeout); if (controld_globals.node_pending_timeout > 0) { @@ -224,22 +223,19 @@ init_node_pending_timer(const crm_node_t *node, guint timeout) return; } - crm_notice("Waiting for pending %s with id '%s' to join the process " - "group (timeout=%us)", + crm_notice("Waiting for pending %s with " PCMK_XA_ID " '%s' " + "to join the process group (timeout=%us)", node->uname ? node->uname : "node", node->uuid, controld_globals.node_pending_timeout); - node_pending_timer = calloc(1, sizeof(struct abort_timer_s)); - CRM_ASSERT(node_pending_timer != NULL); + key = pcmk__str_copy(node->uuid); + node_pending_timer = pcmk__assert_alloc(1, sizeof(struct abort_timer_s)); node_pending_timer->aborted = FALSE; - node_pending_timer->priority = INFINITY; + node_pending_timer->priority = PCMK_SCORE_INFINITY; node_pending_timer->action = pcmk__graph_restart; node_pending_timer->text = "Node pending timed out"; - key = strdup(node->uuid); - CRM_ASSERT(key != NULL); - g_hash_table_replace(node_pending_timers, key, node_pending_timer); node_pending_timer->id = g_timeout_add_seconds(timeout, @@ -264,8 +260,8 @@ controld_node_pending_timer(const crm_node_t *node) long long remaining_timeout = 0; /* If the node is not an active cluster node, is leaving the cluster, or is - * already part of CPG, or node-pending-timeout is disabled, free any - * node pending timer for it. + * already part of CPG, or PCMK_OPT_NODE_PENDING_TIMEOUT is disabled, free + * any node pending timer for it. */ if (pcmk_is_set(node->flags, crm_remote_node) || (node->when_member <= 1) || (node->when_online > 0) @@ -386,7 +382,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, const xmlNode *search = NULL; for(search = reason; search; search = search->parent) { - if (pcmk__xe_is(search, XML_TAG_DIFF)) { + if (pcmk__xe_is(search, PCMK_XE_DIFF)) { diff = search; break; } @@ -395,7 +391,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, if(diff) { xml_patch_versions(diff, add, del); for(search = reason; search; search = search->parent) { - if (pcmk__xe_is(search, XML_DIFF_CHANGE)) { + if (pcmk__xe_is(search, PCMK_XE_CHANGE)) { change = search; break; } @@ -417,21 +413,22 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, do_crm_log(level, "Transition %d aborted by %s.%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, reason->name, - ID(reason), abort_text, add[0], add[1], add[2], fn, line, - (const char *) local_path->str, + pcmk__xe_id(reason), abort_text, add[0], add[1], add[2], fn, + line, (const char *) local_path->str, pcmk__btoa(controld_globals.transition_graph->complete)); g_string_free(local_path, TRUE); } else { - const char *op = crm_element_value(change, XML_DIFF_OP); - const char *path = crm_element_value(change, XML_DIFF_PATH); + const char *op = crm_element_value(change, PCMK_XA_OPERATION); + const char *path = crm_element_value(change, PCMK_XA_PATH); if(change == reason) { - if(strcmp(op, "create") == 0) { + if (strcmp(op, PCMK_VALUE_CREATE) == 0) { reason = reason->children; - } else if(strcmp(op, "modify") == 0) { - reason = first_named_child(reason, XML_DIFF_RESULT); + } else if (strcmp(op, PCMK_VALUE_MODIFY) == 0) { + reason = pcmk__xe_first_child(reason, PCMK_XE_CHANGE_RESULT, + NULL, NULL); if(reason) { reason = reason->children; } @@ -439,7 +436,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, CRM_CHECK(reason != NULL, goto done); } - if(strcmp(op, "delete") == 0) { + if (strcmp(op, PCMK_VALUE_DELETE) == 0) { const char *shortpath = strrchr(path, '/'); do_crm_log(level, "Transition %d aborted by deletion of %s: %s " @@ -449,40 +446,42 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); - } else if (pcmk__xe_is(reason, XML_CIB_TAG_NVPAIR)) { + } else if (pcmk__xe_is(reason, PCMK_XE_NVPAIR)) { do_crm_log(level, "Transition %d aborted by %s doing %s %s=%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, - crm_element_value(reason, XML_ATTR_ID), op, - crm_element_value(reason, XML_NVPAIR_ATTR_NAME), - crm_element_value(reason, XML_NVPAIR_ATTR_VALUE), + crm_element_value(reason, PCMK_XA_ID), op, + crm_element_value(reason, PCMK_XA_NAME), + crm_element_value(reason, PCMK_XA_VALUE), abort_text, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); - } else if (pcmk__xe_is(reason, XML_LRM_TAG_RSC_OP)) { - const char *magic = crm_element_value(reason, XML_ATTR_TRANSITION_MAGIC); + } else if (pcmk__xe_is(reason, PCMK__XE_LRM_RSC_OP)) { + const char *magic = crm_element_value(reason, + PCMK__XA_TRANSITION_MAGIC); do_crm_log(level, "Transition %d aborted by operation %s '%s' on %s: %s " CRM_XS " magic=%s cib=%d.%d.%d source=%s:%d complete=%s", controld_globals.transition_graph->id, - crm_element_value(reason, XML_LRM_ATTR_TASK_KEY), op, - crm_element_value(reason, XML_LRM_ATTR_TARGET), abort_text, + crm_element_value(reason, PCMK__XA_OPERATION_KEY), op, + crm_element_value(reason, PCMK__META_ON_NODE), + abort_text, magic, add[0], add[1], add[2], fn, line, pcmk__btoa(controld_globals.transition_graph->complete)); } else if (pcmk__str_any_of((const char *) reason->name, - XML_CIB_TAG_STATE, XML_CIB_TAG_NODE, NULL)) { - const char *uname = crm_peer_uname(ID(reason)); + PCMK__XE_NODE_STATE, PCMK_XE_NODE, NULL)) { + const char *uname = pcmk__node_name_from_uuid(pcmk__xe_id(reason)); do_crm_log(level, "Transition %d aborted by %s '%s' on %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d complete=%s", controld_globals.transition_graph->id, - reason->name, op, pcmk__s(uname, ID(reason)), + reason->name, op, pcmk__s(uname, pcmk__xe_id(reason)), abort_text, add[0], add[1], add[2], fn, line, pcmk__btoa(controld_globals.transition_graph->complete)); } else { - const char *id = ID(reason); + const char *id = pcmk__xe_id(reason); do_crm_log(level, "Transition %d aborted by %s.%s '%s': %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", diff --git a/daemons/controld/controld_throttle.c b/daemons/controld/controld_throttle.c index a4775e5..08ec329 100644 --- a/daemons/controld/controld_throttle.c +++ b/daemons/controld/controld_throttle.c @@ -1,5 +1,5 @@ /* - * Copyright 2013-2021 the Pacemaker project contributors + * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include @@ -160,7 +160,7 @@ throttle_cib_load(float *load) } if(fgets(buffer, sizeof(buffer), stream)) { - char *comm = calloc(1, 256); + char *comm = pcmk__assert_alloc(1, 256); char state = 0; int rc = 0, pid = 0, ppid = 0, pgrp = 0, session = 0, tty_nr = 0, tpgid = 0; unsigned long flags = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0, utime = 0, stime = 0; @@ -368,10 +368,10 @@ throttle_send_command(enum throttle_state_e mode) last = mode; xml = create_request(CRM_OP_THROTTLE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); - crm_xml_add_int(xml, F_CRM_THROTTLE_MODE, mode); - crm_xml_add_int(xml, F_CRM_THROTTLE_MAX, throttle_job_max); + crm_xml_add_int(xml, PCMK__XA_CRM_LIMIT_MODE, mode); + crm_xml_add_int(xml, PCMK__XA_CRM_LIMIT_MAX, throttle_job_max); - send_cluster_message(NULL, crm_msg_crmd, xml, TRUE); + pcmk__cluster_send_message(NULL, crm_msg_crmd, xml); free_xml(xml); } } @@ -401,7 +401,8 @@ throttle_set_load_target(float target) * \internal * \brief Update the maximum number of simultaneous jobs * - * \param[in] preference Cluster-wide node-action-limit from the CIB + * \param[in] preference Cluster-wide \c PCMK_OPT_NODE_ACTION_LIMIT from the + * CIB */ static void throttle_update_job_max(const char *preference) @@ -416,7 +417,7 @@ throttle_update_job_max(const char *preference) pcmk__scan_ll(preference, &max, 0LL); } if (max > 0) { - throttle_job_max = (int) max; + throttle_job_max = (max >= INT_MAX)? INT_MAX : (int) max; } else { // Default is based on the number of cores detected throttle_job_max = 2 * pcmk__procfs_num_cores(); @@ -444,13 +445,13 @@ throttle_init(void) void controld_configure_throttle(GHashTable *options) { - const char *value = g_hash_table_lookup(options, "load-threshold"); + const char *value = g_hash_table_lookup(options, PCMK_OPT_LOAD_THRESHOLD); if (value != NULL) { throttle_set_load_target(strtof(value, NULL) / 100.0); } - value = g_hash_table_lookup(options, "node-action-limit"); + value = g_hash_table_lookup(options, PCMK_OPT_NODE_ACTION_LIMIT); throttle_update_job_max(value); } @@ -473,7 +474,7 @@ throttle_get_total_job_limit(int l) /* Cluster-wide limit */ GHashTableIter iter; int limit = l; - int peers = crm_active_peers(); + int peers = pcmk__cluster_num_active_nodes(); struct throttle_record_s *r = NULL; g_hash_table_iter_init(&iter, throttle_records); @@ -497,13 +498,12 @@ throttle_get_total_job_limit(int l) } } if(limit == l) { - /* crm_trace("No change to batch-limit=%d", limit); */ } else if(l == 0) { - crm_trace("Using batch-limit=%d", limit); + crm_trace("Using " PCMK_OPT_BATCH_LIMIT "=%d", limit); } else { - crm_trace("Using batch-limit=%d instead of %d", limit, l); + crm_trace("Using " PCMK_OPT_BATCH_LIMIT "=%d instead of %d", limit, l); } return limit; } @@ -516,8 +516,8 @@ throttle_get_job_limit(const char *node) r = g_hash_table_lookup(throttle_records, node); if(r == NULL) { - r = calloc(1, sizeof(struct throttle_record_s)); - r->node = strdup(node); + r = pcmk__assert_alloc(1, sizeof(struct throttle_record_s)); + r->node = pcmk__str_copy(node); r->mode = throttle_low; r->max = throttle_job_max; crm_trace("Defaulting to local values for unknown node %s", node); @@ -552,16 +552,16 @@ throttle_update(xmlNode *xml) int max = 0; int mode = 0; struct throttle_record_s *r = NULL; - const char *from = crm_element_value(xml, F_CRM_HOST_FROM); + const char *from = crm_element_value(xml, PCMK__XA_SRC); - crm_element_value_int(xml, F_CRM_THROTTLE_MODE, &mode); - crm_element_value_int(xml, F_CRM_THROTTLE_MAX, &max); + crm_element_value_int(xml, PCMK__XA_CRM_LIMIT_MODE, &mode); + crm_element_value_int(xml, PCMK__XA_CRM_LIMIT_MAX, &max); r = g_hash_table_lookup(throttle_records, from); if(r == NULL) { - r = calloc(1, sizeof(struct throttle_record_s)); - r->node = strdup(from); + r = pcmk__assert_alloc(1, sizeof(struct throttle_record_s)); + r->node = pcmk__str_copy(from); g_hash_table_insert(throttle_records, r->node, r); } diff --git a/daemons/controld/controld_timers.c b/daemons/controld/controld_timers.c index a65bef5..0d387b9 100644 --- a/daemons/controld/controld_timers.c +++ b/daemons/controld/controld_timers.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -13,7 +13,7 @@ #include #include -#include +#include #include //! FSA mainloop timer type @@ -38,10 +38,10 @@ static fsa_timer_t *election_timer = NULL; //! Delay start of new transition with expectation something else might happen static fsa_timer_t *transition_timer = NULL; -//! join-integration-timeout +//! \c PCMK_OPT_JOIN_INTEGRATION_TIMEOUT static fsa_timer_t *integration_timer = NULL; -//! join-finalization-timeout +//! \c PCMK_OPT_JOIN_FINALIZATION_TIMEOUT static fsa_timer_t *finalization_timer = NULL; // Wait for DC to stop all resources and give us the all-clear to shut down @@ -229,40 +229,13 @@ crm_timer_popped(gpointer data) bool controld_init_fsa_timers(void) { - transition_timer = calloc(1, sizeof(fsa_timer_t)); - if (transition_timer == NULL) { - return FALSE; - } - - integration_timer = calloc(1, sizeof(fsa_timer_t)); - if (integration_timer == NULL) { - return FALSE; - } - - finalization_timer = calloc(1, sizeof(fsa_timer_t)); - if (finalization_timer == NULL) { - return FALSE; - } - - election_timer = calloc(1, sizeof(fsa_timer_t)); - if (election_timer == NULL) { - return FALSE; - } - - shutdown_escalation_timer = calloc(1, sizeof(fsa_timer_t)); - if (shutdown_escalation_timer == NULL) { - return FALSE; - } - - wait_timer = calloc(1, sizeof(fsa_timer_t)); - if (wait_timer == NULL) { - return FALSE; - } - - recheck_timer = calloc(1, sizeof(fsa_timer_t)); - if (recheck_timer == NULL) { - return FALSE; - } + transition_timer = pcmk__assert_alloc(1, sizeof(fsa_timer_t)); + integration_timer = pcmk__assert_alloc(1, sizeof(fsa_timer_t)); + finalization_timer = pcmk__assert_alloc(1, sizeof(fsa_timer_t)); + election_timer = pcmk__assert_alloc(1, sizeof(fsa_timer_t)); + shutdown_escalation_timer = pcmk__assert_alloc(1, sizeof(fsa_timer_t)); + wait_timer = pcmk__assert_alloc(1, sizeof(fsa_timer_t)); + recheck_timer = pcmk__assert_alloc(1, sizeof(fsa_timer_t)); election_timer->source_id = 0; election_timer->period_ms = 0; @@ -332,30 +305,30 @@ controld_configure_fsa_timers(GHashTable *options) const char *value = NULL; // Election timer - value = g_hash_table_lookup(options, XML_CONFIG_ATTR_DC_DEADTIME); - election_timer->period_ms = crm_parse_interval_spec(value); + value = g_hash_table_lookup(options, PCMK_OPT_DC_DEADTIME); + pcmk_parse_interval_spec(value, &(election_timer->period_ms)); // Integration timer - value = g_hash_table_lookup(options, "join-integration-timeout"); - integration_timer->period_ms = crm_parse_interval_spec(value); + value = g_hash_table_lookup(options, PCMK_OPT_JOIN_INTEGRATION_TIMEOUT); + pcmk_parse_interval_spec(value, &(integration_timer->period_ms)); // Finalization timer - value = g_hash_table_lookup(options, "join-finalization-timeout"); - finalization_timer->period_ms = crm_parse_interval_spec(value); + value = g_hash_table_lookup(options, PCMK_OPT_JOIN_FINALIZATION_TIMEOUT); + pcmk_parse_interval_spec(value, &(finalization_timer->period_ms)); // Shutdown escalation timer - value = g_hash_table_lookup(options, XML_CONFIG_ATTR_FORCE_QUIT); - shutdown_escalation_timer->period_ms = crm_parse_interval_spec(value); + value = g_hash_table_lookup(options, PCMK_OPT_SHUTDOWN_ESCALATION); + pcmk_parse_interval_spec(value, &(shutdown_escalation_timer->period_ms)); crm_debug("Shutdown escalation occurs if DC has not responded to request " "in %ums", shutdown_escalation_timer->period_ms); // Transition timer - value = g_hash_table_lookup(options, "transition-delay"); - transition_timer->period_ms = crm_parse_interval_spec(value); + value = g_hash_table_lookup(options, PCMK_OPT_TRANSITION_DELAY); + pcmk_parse_interval_spec(value, &(transition_timer->period_ms)); // Recheck interval - value = g_hash_table_lookup(options, XML_CONFIG_ATTR_RECHECK); - recheck_interval_ms = crm_parse_interval_spec(value); + value = g_hash_table_lookup(options, PCMK_OPT_CLUSTER_RECHECK_INTERVAL); + pcmk_parse_interval_spec(value, &recheck_interval_ms); crm_debug("Re-run scheduler after %dms of inactivity", recheck_interval_ms); } diff --git a/daemons/controld/controld_transition.c b/daemons/controld/controld_transition.c index 897c6d3..184253d 100644 --- a/daemons/controld/controld_transition.c +++ b/daemons/controld/controld_transition.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,7 +10,6 @@ #include #include -#include #include #include @@ -41,7 +40,8 @@ do_te_control(long long action, controld_globals.transition_graph = NULL; if (cib_conn != NULL) { - cib_conn->cmds->del_notify_callback(cib_conn, T_CIB_DIFF_NOTIFY, + cib_conn->cmds->del_notify_callback(cib_conn, + PCMK__VALUE_CIB_DIFF_NOTIFY, te_update_diff); } @@ -71,12 +71,11 @@ do_te_control(long long action, crm_err("Could not set CIB callbacks"); init_ok = FALSE; - } else { - if (cib_conn->cmds->add_notify_callback(cib_conn, T_CIB_DIFF_NOTIFY, - te_update_diff) != pcmk_ok) { - crm_err("Could not set CIB notification callback"); - init_ok = FALSE; - } + } else if (cib_conn->cmds->add_notify_callback(cib_conn, + PCMK__VALUE_CIB_DIFF_NOTIFY, + te_update_diff) != pcmk_ok) { + crm_err("Could not set CIB notification callback"); + init_ok = FALSE; } if (init_ok) { @@ -110,13 +109,15 @@ do_te_invoke(long long action, if (action & A_TE_CANCEL) { crm_debug("Cancelling the transition: %sactive", controld_globals.transition_graph->complete? "in" : ""); - abort_transition(INFINITY, pcmk__graph_restart, "Peer Cancelled", NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Peer Cancelled", NULL); if (!controld_globals.transition_graph->complete) { crmd_fsa_stall(FALSE); } } else if (action & A_TE_HALT) { - abort_transition(INFINITY, pcmk__graph_wait, "Peer Halt", NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_wait, "Peer Halt", + NULL); if (!controld_globals.transition_graph->complete) { crmd_fsa_stall(FALSE); } @@ -124,11 +125,11 @@ do_te_invoke(long long action, } else if (action & A_TE_INVOKE) { ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); xmlNode *graph_data = input->xml; - const char *ref = crm_element_value(input->msg, XML_ATTR_REFERENCE); - const char *graph_file = crm_element_value(input->msg, F_CRM_TGRAPH); - const char *graph_input = crm_element_value(input->msg, F_CRM_TGRAPH_INPUT); + const char *ref = crm_element_value(input->msg, PCMK_XA_REFERENCE); + const char *graph_input = crm_element_value(input->msg, + PCMK__XA_CRM_TGRAPH_IN); - if (graph_file == NULL && graph_data == NULL) { + if (graph_data == NULL) { crm_log_xml_err(input->msg, "Bad command"); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); return; @@ -136,8 +137,8 @@ do_te_invoke(long long action, if (!controld_globals.transition_graph->complete) { crm_info("Another transition is already active"); - abort_transition(INFINITY, pcmk__graph_restart, "Transition Active", - NULL); + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, + "Transition Active", NULL); return; } @@ -147,14 +148,10 @@ do_te_invoke(long long action, crm_info("Transition is redundant: %s expected but %s received", pcmk__s(controld_globals.fsa_pe_ref, "no reference"), pcmk__s(ref, "no reference")); - abort_transition(INFINITY, pcmk__graph_restart, + abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Transition Redundant", NULL); } - if (graph_data == NULL && graph_file != NULL) { - graph_data = filename2xml(graph_file); - } - if (controld_is_started_transition_timer()) { crm_debug("The transitioner wait for a transition timer"); return; diff --git a/daemons/controld/controld_utils.c b/daemons/controld/controld_utils.c index 9b306ee..fc0a8fd 100644 --- a/daemons/controld/controld_utils.c +++ b/daemons/controld/controld_utils.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,6 @@ #include #include -#include #include #include @@ -694,8 +693,8 @@ update_dc(xmlNode * msg) if (msg != NULL) { gboolean invalid = FALSE; - dc_version = crm_element_value(msg, F_CRM_VERSION); - welcome_from = crm_element_value(msg, F_CRM_HOST_FROM); + dc_version = crm_element_value(msg, PCMK_XA_VERSION); + welcome_from = crm_element_value(msg, PCMK__XA_SRC); CRM_CHECK(dc_version != NULL, return FALSE); CRM_CHECK(welcome_from != NULL, return FALSE); @@ -734,7 +733,8 @@ update_dc(xmlNode * msg) /* do nothing */ } else if (controld_globals.dc_name != NULL) { - crm_node_t *dc_node = crm_get_peer(0, controld_globals.dc_name); + crm_node_t *dc_node = pcmk__get_node(0, controld_globals.dc_name, NULL, + pcmk__node_search_cluster_member); crm_info("Set DC to %s (%s)", controld_globals.dc_name, @@ -828,10 +828,10 @@ get_node_id(xmlNode *lrm_rsc_op) { xmlNode *node = lrm_rsc_op; - while ((node != NULL) && !pcmk__xe_is(node, XML_CIB_TAG_STATE)) { + while ((node != NULL) && !pcmk__xe_is(node, PCMK__XE_NODE_STATE)) { node = node->parent; } CRM_CHECK(node != NULL, return NULL); - return ID(node); + return pcmk__xe_id(node); } diff --git a/daemons/controld/pacemaker-controld.c b/daemons/controld/pacemaker-controld.c index e4a72c2..913518b 100644 --- a/daemons/controld/pacemaker-controld.c +++ b/daemons/controld/pacemaker-controld.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -46,11 +46,23 @@ static pcmk__supported_format_t formats[] = { { NULL, NULL, NULL } }; +/* @COMPAT Deprecated since 2.1.8. Use pcmk_list_cluster_options() or + * crm_attribute --list-options=cluster instead of querying daemon metadata. + */ +static int +controld_metadata(pcmk__output_t *out) +{ + return pcmk__daemon_metadata(out, "pacemaker-controld", + "Pacemaker controller options", + "Cluster options used by Pacemaker's " + "controller", + pcmk__opt_controld); +} + static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - return pcmk__build_arg_context(args, "text (default), xml", group, - "[metadata]"); + return pcmk__build_arg_context(args, "text (default), xml", group, NULL); } int @@ -96,8 +108,14 @@ main(int argc, char **argv) if ((g_strv_length(processed_args) >= 2) && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) { - crmd_metadata(); + initialize = false; + rc = controld_metadata(out); + if (rc != pcmk_rc_ok) { + exit_code = CRM_EX_FATAL; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Unable to display metadata: %s", pcmk_rc_str(rc)); + } goto done; } @@ -178,7 +196,7 @@ crmd_init(void) init_dotfile(); register_fsa_input(C_STARTUP, I_STARTUP, NULL); - crm_peer_init(); + pcmk__cluster_init_node_caches(); state = s_crmd_fsa(C_STARTUP); if (state == S_PENDING || state == S_STARTING) { diff --git a/daemons/controld/pacemaker-controld.h b/daemons/controld/pacemaker-controld.h index 2334cce..ba8dc8f 100644 --- a/daemons/controld/pacemaker-controld.h +++ b/daemons/controld/pacemaker-controld.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -28,7 +28,6 @@ # define controld_trigger_config() \ controld_trigger_config_as(__func__, __LINE__) -void crmd_metadata(void); void controld_trigger_config_as(const char *fn, int line); void controld_election_init(const char *uname); void controld_configure_election(GHashTable *options); diff --git a/daemons/execd/Makefile.am b/daemons/execd/Makefile.am index ab8544f..ce0e161 100644 --- a/daemons/execd/Makefile.am +++ b/daemons/execd/Makefile.am @@ -44,12 +44,14 @@ pacemaker_remoted_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemaker_remoted_LDADD = $(top_builddir)/lib/fencing/libstonithd.la pacemaker_remoted_LDADD += $(top_builddir)/lib/services/libcrmservice.la +pacemaker_remoted_LDADD += $(top_builddir)/lib/cib/libcib.la pacemaker_remoted_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la pacemaker_remoted_LDADD += $(top_builddir)/lib/common/libcrmcommon.la pacemaker_remoted_SOURCES = $(pacemaker_execd_SOURCES) \ remoted_tls.c \ remoted_pidone.c \ - remoted_proxy.c + remoted_proxy.c \ + remoted_schemas.c endif cts_exec_helper_LDADD = $(top_builddir)/lib/pengine/libpe_status.la diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c index 6ebbedf..b8f4407 100644 --- a/daemons/execd/cts-exec-helper.c +++ b/daemons/execd/cts-exec-helper.c @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 the Pacemaker project contributors + * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -56,8 +56,8 @@ static struct { static gboolean interval_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { - options.interval_ms = crm_parse_interval_spec(optarg); - return errno == 0; + return pcmk_parse_interval_spec(optarg, + &options.interval_ms) == pcmk_rc_ok; } static gboolean @@ -461,9 +461,9 @@ generate_params(void) if (rc != pcmk_rc_ok) { return rc; } - if (!cli_config_update(&cib_xml_copy, NULL, FALSE)) { - crm_err("Could not update CIB"); - return pcmk_rc_cib_corrupt; + rc = pcmk_update_configured_schema(&cib_xml_copy, false); + if (rc != pcmk_rc_ok) { + return rc; } // Calculate cluster status @@ -472,7 +472,7 @@ generate_params(void) crm_crit("Could not allocate scheduler data"); return ENOMEM; } - pe__set_working_set_flags(scheduler, + pcmk__set_scheduler_flags(scheduler, pcmk_sched_no_counts|pcmk_sched_no_compat); scheduler->input = cib_xml_copy; scheduler->now = crm_time_new(NULL); diff --git a/daemons/execd/execd_alerts.c b/daemons/execd/execd_alerts.c index 5944d93..362f7a5 100644 --- a/daemons/execd/execd_alerts.c +++ b/daemons/execd/execd_alerts.c @@ -1,5 +1,5 @@ /* - * Copyright 2016-2022 the Pacemaker project contributors + * Copyright 2016-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "pacemaker-execd.h" @@ -105,9 +105,11 @@ process_lrmd_alert_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) { static int alert_sequence_no = 0; - xmlNode *alert_xml = get_xpath_object("//" F_LRMD_ALERT, request, LOG_ERR); - const char *alert_id = crm_element_value(alert_xml, F_LRMD_ALERT_ID); - const char *alert_path = crm_element_value(alert_xml, F_LRMD_ALERT_PATH); + xmlNode *alert_xml = get_xpath_object("//" PCMK__XE_LRMD_ALERT, request, + LOG_ERR); + const char *alert_id = crm_element_value(alert_xml, PCMK__XA_LRMD_ALERT_ID); + const char *alert_path = crm_element_value(alert_xml, + PCMK__XA_LRMD_ALERT_PATH); svc_action_t *action = NULL; int alert_timeout = 0; int rc = pcmk_ok; @@ -116,13 +118,14 @@ process_lrmd_alert_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) if ((alert_id == NULL) || (alert_path == NULL) || (client == NULL) || (client->id == NULL)) { /* hint static analyzer */ - return -EINVAL; + rc = -EINVAL; + goto err; } if (draining_alerts) { return pcmk_ok; } - crm_element_value_int(alert_xml, F_LRMD_TIMEOUT, &alert_timeout); + crm_element_value_int(alert_xml, PCMK__XA_LRMD_TIMEOUT, &alert_timeout); crm_info("Executing alert %s for %s", alert_id, client->id); @@ -130,20 +133,11 @@ process_lrmd_alert_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) pcmk__add_alert_key_int(params, PCMK__alert_key_node_sequence, ++alert_sequence_no); - cb_data = calloc(1, sizeof(struct alert_cb_s)); - if (cb_data == NULL) { - rc = -errno; - goto err; - } + cb_data = pcmk__assert_alloc(1, sizeof(struct alert_cb_s)); - /* coverity[deref_ptr] False Positive */ - cb_data->client_id = strdup(client->id); - if (cb_data->client_id == NULL) { - rc = -errno; - goto err; - } + cb_data->client_id = pcmk__str_copy(client->id); - crm_element_value_int(request, F_LRMD_CALLID, &(cb_data->call_id)); + crm_element_value_int(request, PCMK__XA_LRMD_CALLID, &(cb_data->call_id)); action = services_alert_create(alert_id, alert_path, alert_timeout, params, alert_sequence_no, cb_data); @@ -165,9 +159,7 @@ process_lrmd_alert_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) err: if (cb_data) { - if (cb_data->client_id) { - free(cb_data->client_id); - } + free(cb_data->client_id); free(cb_data); } services_action_free(action); diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c index cf4503a..6b1ded1 100644 --- a/daemons/execd/execd_commands.c +++ b/daemons/execd/execd_commands.c @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 the Pacemaker project contributors + * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include "pacemaker-execd.h" @@ -274,17 +274,17 @@ normalize_action_name(lrmd_rsc_t * rsc, const char *action) static lrmd_rsc_t * build_rsc_from_xml(xmlNode * msg) { - xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); + xmlNode *rsc_xml = get_xpath_object("//" PCMK__XE_LRMD_RSC, msg, LOG_ERR); lrmd_rsc_t *rsc = NULL; - rsc = calloc(1, sizeof(lrmd_rsc_t)); + rsc = pcmk__assert_alloc(1, sizeof(lrmd_rsc_t)); - crm_element_value_int(msg, F_LRMD_CALLOPTS, &rsc->call_opts); + crm_element_value_int(msg, PCMK__XA_LRMD_CALLOPT, &rsc->call_opts); - rsc->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); - rsc->class = crm_element_value_copy(rsc_xml, F_LRMD_CLASS); - rsc->provider = crm_element_value_copy(rsc_xml, F_LRMD_PROVIDER); - rsc->type = crm_element_value_copy(rsc_xml, F_LRMD_TYPE); + rsc->rsc_id = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_RSC_ID); + rsc->class = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_CLASS); + rsc->provider = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_PROVIDER); + rsc->type = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_TYPE); rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, execute_resource_action, rsc); @@ -298,29 +298,33 @@ static lrmd_cmd_t * create_lrmd_cmd(xmlNode *msg, pcmk__client_t *client) { int call_options = 0; - xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); + xmlNode *rsc_xml = get_xpath_object("//" PCMK__XE_LRMD_RSC, msg, LOG_ERR); lrmd_cmd_t *cmd = NULL; - cmd = calloc(1, sizeof(lrmd_cmd_t)); + cmd = pcmk__assert_alloc(1, sizeof(lrmd_cmd_t)); - crm_element_value_int(msg, F_LRMD_CALLOPTS, &call_options); + crm_element_value_int(msg, PCMK__XA_LRMD_CALLOPT, &call_options); cmd->call_opts = call_options; - cmd->client_id = strdup(client->id); - - crm_element_value_int(msg, F_LRMD_CALLID, &cmd->call_id); - crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &cmd->interval_ms); - crm_element_value_int(rsc_xml, F_LRMD_TIMEOUT, &cmd->timeout); - crm_element_value_int(rsc_xml, F_LRMD_RSC_START_DELAY, &cmd->start_delay); + cmd->client_id = pcmk__str_copy(client->id); + + crm_element_value_int(msg, PCMK__XA_LRMD_CALLID, &cmd->call_id); + crm_element_value_ms(rsc_xml, PCMK__XA_LRMD_RSC_INTERVAL, + &cmd->interval_ms); + crm_element_value_int(rsc_xml, PCMK__XA_LRMD_TIMEOUT, &cmd->timeout); + crm_element_value_int(rsc_xml, PCMK__XA_LRMD_RSC_START_DELAY, + &cmd->start_delay); cmd->timeout_orig = cmd->timeout; - cmd->origin = crm_element_value_copy(rsc_xml, F_LRMD_ORIGIN); - cmd->action = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ACTION); - cmd->userdata_str = crm_element_value_copy(rsc_xml, F_LRMD_RSC_USERDATA_STR); - cmd->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); + cmd->origin = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_ORIGIN); + cmd->action = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_RSC_ACTION); + cmd->userdata_str = crm_element_value_copy(rsc_xml, + PCMK__XA_LRMD_RSC_USERDATA_STR); + cmd->rsc_id = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_RSC_ID); cmd->params = xml2list(rsc_xml); - if (pcmk__str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), "block", pcmk__str_casei)) { + if (pcmk__str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), + PCMK_VALUE_BLOCK, pcmk__str_casei)) { crm_debug("Setting flag to leave pid group on timeout and " "only kill action pid for " PCMK__OP_FMT, cmd->rsc_id, cmd->action, cmd->interval_ms); @@ -535,11 +539,11 @@ schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) static xmlNode * create_lrmd_reply(const char *origin, int rc, int call_id) { - xmlNode *reply = create_xml_node(NULL, T_LRMD_REPLY); + xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_LRMD_REPLY); - crm_xml_add(reply, F_LRMD_ORIGIN, origin); - crm_xml_add_int(reply, F_LRMD_RC, rc); - crm_xml_add_int(reply, F_LRMD_CALLID, call_id); + crm_xml_add(reply, PCMK__XA_LRMD_ORIGIN, origin); + crm_xml_add_int(reply, PCMK__XA_LRMD_RC, rc); + crm_xml_add_int(reply, PCMK__XA_LRMD_CALLID, call_id); return reply; } @@ -614,41 +618,44 @@ send_cmd_complete_notify(lrmd_cmd_t * cmd) cmd->last_notify_rc = cmd->result.exit_status; cmd->last_notify_op_status = cmd->result.execution_status; - notify = create_xml_node(NULL, T_LRMD_NOTIFY); + notify = pcmk__xe_create(NULL, PCMK__XE_LRMD_NOTIFY); - crm_xml_add(notify, F_LRMD_ORIGIN, __func__); - crm_xml_add_int(notify, F_LRMD_TIMEOUT, cmd->timeout); - crm_xml_add_ms(notify, F_LRMD_RSC_INTERVAL, cmd->interval_ms); - crm_xml_add_int(notify, F_LRMD_RSC_START_DELAY, cmd->start_delay); - crm_xml_add_int(notify, F_LRMD_EXEC_RC, cmd->result.exit_status); - crm_xml_add_int(notify, F_LRMD_OP_STATUS, cmd->result.execution_status); - crm_xml_add_int(notify, F_LRMD_CALLID, cmd->call_id); - crm_xml_add_int(notify, F_LRMD_RSC_DELETED, cmd->rsc_deleted); + crm_xml_add(notify, PCMK__XA_LRMD_ORIGIN, __func__); + crm_xml_add_int(notify, PCMK__XA_LRMD_TIMEOUT, cmd->timeout); + crm_xml_add_ms(notify, PCMK__XA_LRMD_RSC_INTERVAL, cmd->interval_ms); + crm_xml_add_int(notify, PCMK__XA_LRMD_RSC_START_DELAY, cmd->start_delay); + crm_xml_add_int(notify, PCMK__XA_LRMD_EXEC_RC, cmd->result.exit_status); + crm_xml_add_int(notify, PCMK__XA_LRMD_EXEC_OP_STATUS, + cmd->result.execution_status); + crm_xml_add_int(notify, PCMK__XA_LRMD_CALLID, cmd->call_id); + crm_xml_add_int(notify, PCMK__XA_LRMD_RSC_DELETED, cmd->rsc_deleted); - crm_xml_add_ll(notify, F_LRMD_RSC_RUN_TIME, + crm_xml_add_ll(notify, PCMK__XA_LRMD_RUN_TIME, (long long) cmd->epoch_last_run); - crm_xml_add_ll(notify, F_LRMD_RSC_RCCHANGE_TIME, + crm_xml_add_ll(notify, PCMK__XA_LRMD_RCCHANGE_TIME, (long long) cmd->epoch_rcchange); #ifdef PCMK__TIME_USE_CGT - crm_xml_add_int(notify, F_LRMD_RSC_EXEC_TIME, exec_time); - crm_xml_add_int(notify, F_LRMD_RSC_QUEUE_TIME, queue_time); + crm_xml_add_int(notify, PCMK__XA_LRMD_EXEC_TIME, exec_time); + crm_xml_add_int(notify, PCMK__XA_LRMD_QUEUE_TIME, queue_time); #endif - crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_RSC_EXEC); - crm_xml_add(notify, F_LRMD_RSC_ID, cmd->rsc_id); + crm_xml_add(notify, PCMK__XA_LRMD_OP, LRMD_OP_RSC_EXEC); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_ID, cmd->rsc_id); if(cmd->real_action) { - crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->real_action); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_ACTION, cmd->real_action); } else { - crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->action); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_ACTION, cmd->action); } - crm_xml_add(notify, F_LRMD_RSC_USERDATA_STR, cmd->userdata_str); - crm_xml_add(notify, F_LRMD_RSC_EXIT_REASON, cmd->result.exit_reason); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_USERDATA_STR, cmd->userdata_str); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_EXIT_REASON, cmd->result.exit_reason); if (cmd->result.action_stderr != NULL) { - crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->result.action_stderr); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_OUTPUT, + cmd->result.action_stderr); } else if (cmd->result.action_stdout != NULL) { - crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->result.action_stdout); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_OUTPUT, + cmd->result.action_stdout); } if (cmd->params) { @@ -656,7 +663,7 @@ send_cmd_complete_notify(lrmd_cmd_t * cmd) char *value = NULL; GHashTableIter iter; - xmlNode *args = create_xml_node(notify, XML_TAG_ATTRS); + xmlNode *args = pcmk__xe_create(notify, PCMK__XE_ATTRIBUTES); g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { @@ -684,18 +691,19 @@ send_generic_notify(int rc, xmlNode * request) if (pcmk__ipc_client_count() != 0) { int call_id = 0; xmlNode *notify = NULL; - xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); - const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); - const char *op = crm_element_value(request, F_LRMD_OPERATION); + xmlNode *rsc_xml = get_xpath_object("//" PCMK__XE_LRMD_RSC, request, + LOG_ERR); + const char *rsc_id = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ID); + const char *op = crm_element_value(request, PCMK__XA_LRMD_OP); - crm_element_value_int(request, F_LRMD_CALLID, &call_id); + crm_element_value_int(request, PCMK__XA_LRMD_CALLID, &call_id); - notify = create_xml_node(NULL, T_LRMD_NOTIFY); - crm_xml_add(notify, F_LRMD_ORIGIN, __func__); - crm_xml_add_int(notify, F_LRMD_RC, rc); - crm_xml_add_int(notify, F_LRMD_CALLID, call_id); - crm_xml_add(notify, F_LRMD_OPERATION, op); - crm_xml_add(notify, F_LRMD_RSC_ID, rsc_id); + notify = pcmk__xe_create(NULL, PCMK__XE_LRMD_NOTIFY); + crm_xml_add(notify, PCMK__XA_LRMD_ORIGIN, __func__); + crm_xml_add_int(notify, PCMK__XA_LRMD_RC, rc); + crm_xml_add_int(notify, PCMK__XA_LRMD_CALLID, call_id); + crm_xml_add(notify, PCMK__XA_LRMD_OP, op); + crm_xml_add(notify, PCMK__XA_LRMD_RSC_ID, rsc_id); pcmk__foreach_ipc_client(send_client_notify, notify); @@ -778,9 +786,9 @@ notify_of_new_client(pcmk__client_t *new_client) struct notify_new_client_data data; data.new_client = new_client; - data.notify = create_xml_node(NULL, T_LRMD_NOTIFY); - crm_xml_add(data.notify, F_LRMD_ORIGIN, __func__); - crm_xml_add(data.notify, F_LRMD_OPERATION, LRMD_OP_NEW_CLIENT); + data.notify = pcmk__xe_create(NULL, PCMK__XE_LRMD_NOTIFY); + crm_xml_add(data.notify, PCMK__XA_LRMD_ORIGIN, __func__); + crm_xml_add(data.notify, PCMK__XA_LRMD_OP, LRMD_OP_NEW_CLIENT); pcmk__foreach_ipc_client(notify_one_client, &data); free_xml(data.notify); } @@ -853,7 +861,7 @@ action_complete(svc_action_t * action) */ goagain = true; cmd->real_action = cmd->action; - cmd->action = strdup(PCMK_ACTION_MONITOR); + cmd->action = pcmk__str_copy(PCMK_ACTION_MONITOR); } else if (cmd->real_action != NULL) { // This is follow-up monitor to check whether start/stop completed @@ -1479,23 +1487,33 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id, { int rc = pcmk_ok; time_t now = time(NULL); - const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION); + const char *protocol_version = + crm_element_value(request, PCMK__XA_LRMD_PROTOCOL_VERSION); const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE); - if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) { + if (compare_version(protocol_version, LRMD_COMPATIBLE_PROTOCOL) < 0) { crm_err("Cluster API version must be greater than or equal to %s, not %s", - LRMD_MIN_PROTOCOL_VERSION, protocol_version); + LRMD_COMPATIBLE_PROTOCOL, protocol_version); rc = -EPROTO; } - if (pcmk__xe_attr_is_true(request, F_LRMD_IS_IPC_PROVIDER)) { + if (pcmk__xe_attr_is_true(request, PCMK__XA_LRMD_IS_IPC_PROVIDER)) { #ifdef PCMK__COMPILE_REMOTE if ((client->remote != NULL) && pcmk_is_set(client->flags, pcmk__client_tls_handshake_complete)) { + const char *op = crm_element_value(request, PCMK__XA_LRMD_OP); // This is a remote connection from a cluster node's controller ipc_proxy_add_provider(client); + + /* If this was a register operation, also ask for new schema files but + * only if it's supported by the protocol version. + */ + if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none) && + LRMD_SUPPORTS_SCHEMA_XFER(protocol_version)) { + remoted_request_cib_schema_files(); + } } else { rc = -EACCES; } @@ -1505,9 +1523,9 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id, } *reply = create_lrmd_reply(__func__, rc, call_id); - crm_xml_add(*reply, F_LRMD_OPERATION, CRM_OP_REGISTER); - crm_xml_add(*reply, F_LRMD_CLIENTID, client->id); - crm_xml_add(*reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); + crm_xml_add(*reply, PCMK__XA_LRMD_OP, CRM_OP_REGISTER); + crm_xml_add(*reply, PCMK__XA_LRMD_CLIENTID, client->id); + crm_xml_add(*reply, PCMK__XA_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); crm_xml_add_ll(*reply, PCMK__XA_UPTIME, now - start_time); if (start_state) { @@ -1542,8 +1560,9 @@ static xmlNode * process_lrmd_get_rsc_info(xmlNode *request, int call_id) { int rc = pcmk_ok; - xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); - const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); + xmlNode *rsc_xml = get_xpath_object("//" PCMK__XE_LRMD_RSC, request, + LOG_ERR); + const char *rsc_id = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ID); xmlNode *reply = NULL; lrmd_rsc_t *rsc = NULL; @@ -1559,10 +1578,10 @@ process_lrmd_get_rsc_info(xmlNode *request, int call_id) reply = create_lrmd_reply(__func__, rc, call_id); if (rsc) { - crm_xml_add(reply, F_LRMD_RSC_ID, rsc->rsc_id); - crm_xml_add(reply, F_LRMD_CLASS, rsc->class); - crm_xml_add(reply, F_LRMD_PROVIDER, rsc->provider); - crm_xml_add(reply, F_LRMD_TYPE, rsc->type); + crm_xml_add(reply, PCMK__XA_LRMD_RSC_ID, rsc->rsc_id); + crm_xml_add(reply, PCMK__XA_LRMD_CLASS, rsc->class); + crm_xml_add(reply, PCMK__XA_LRMD_PROVIDER, rsc->provider); + crm_xml_add(reply, PCMK__XA_LRMD_TYPE, rsc->type); } return reply; } @@ -1573,8 +1592,9 @@ process_lrmd_rsc_unregister(pcmk__client_t *client, uint32_t id, { int rc = pcmk_ok; lrmd_rsc_t *rsc = NULL; - xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); - const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); + xmlNode *rsc_xml = get_xpath_object("//" PCMK__XE_LRMD_RSC, request, + LOG_ERR); + const char *rsc_id = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ID); if (!rsc_id) { return -ENODEV; @@ -1604,8 +1624,9 @@ process_lrmd_rsc_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) { lrmd_rsc_t *rsc = NULL; lrmd_cmd_t *cmd = NULL; - xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); - const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); + xmlNode *rsc_xml = get_xpath_object("//" PCMK__XE_LRMD_RSC, request, + LOG_ERR); + const char *rsc_id = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ID); int call_id; if (!rsc_id) { @@ -1727,12 +1748,13 @@ cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id) static int process_lrmd_rsc_cancel(pcmk__client_t *client, uint32_t id, xmlNode *request) { - xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); - const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); - const char *action = crm_element_value(rsc_xml, F_LRMD_RSC_ACTION); + xmlNode *rsc_xml = get_xpath_object("//" PCMK__XE_LRMD_RSC, request, + LOG_ERR); + const char *rsc_id = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ID); + const char *action = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ACTION); guint interval_ms = 0; - crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &interval_ms); + crm_element_value_ms(rsc_xml, PCMK__XA_LRMD_RSC_INTERVAL, &interval_ms); if (!rsc_id || !action) { return -EINVAL; @@ -1744,17 +1766,17 @@ process_lrmd_rsc_cancel(pcmk__client_t *client, uint32_t id, xmlNode *request) static void add_recurring_op_xml(xmlNode *reply, lrmd_rsc_t *rsc) { - xmlNode *rsc_xml = create_xml_node(reply, F_LRMD_RSC); + xmlNode *rsc_xml = pcmk__xe_create(reply, PCMK__XE_LRMD_RSC); - crm_xml_add(rsc_xml, F_LRMD_RSC_ID, rsc->rsc_id); + crm_xml_add(rsc_xml, PCMK__XA_LRMD_RSC_ID, rsc->rsc_id); for (GList *item = rsc->recurring_ops; item != NULL; item = item->next) { lrmd_cmd_t *cmd = item->data; - xmlNode *op_xml = create_xml_node(rsc_xml, T_LRMD_RSC_OP); + xmlNode *op_xml = pcmk__xe_create(rsc_xml, PCMK__XE_LRMD_RSC_OP); - crm_xml_add(op_xml, F_LRMD_RSC_ACTION, - (cmd->real_action? cmd->real_action : cmd->action)); - crm_xml_add_ms(op_xml, F_LRMD_RSC_INTERVAL, cmd->interval_ms); - crm_xml_add_int(op_xml, F_LRMD_TIMEOUT, cmd->timeout_orig); + crm_xml_add(op_xml, PCMK__XA_LRMD_RSC_ACTION, + pcmk__s(cmd->real_action, cmd->action)); + crm_xml_add_ms(op_xml, PCMK__XA_LRMD_RSC_INTERVAL, cmd->interval_ms); + crm_xml_add_int(op_xml, PCMK__XA_LRMD_TIMEOUT, cmd->timeout_orig); } } @@ -1768,12 +1790,12 @@ process_lrmd_get_recurring(xmlNode *request, int call_id) xmlNode *rsc_xml = NULL; // Resource ID is optional - rsc_xml = first_named_child(request, F_LRMD_CALLDATA); + rsc_xml = pcmk__xe_first_child(request, PCMK__XE_LRMD_CALLDATA, NULL, NULL); if (rsc_xml) { - rsc_xml = first_named_child(rsc_xml, F_LRMD_RSC); + rsc_xml = pcmk__xe_first_child(rsc_xml, PCMK__XE_LRMD_RSC, NULL, NULL); } if (rsc_xml) { - rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); + rsc_id = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ID); } // If resource ID is specified, resource must exist @@ -1809,7 +1831,7 @@ process_lrmd_message(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; int call_id = 0; - const char *op = crm_element_value(request, F_LRMD_OPERATION); + const char *op = crm_element_value(request, PCMK__XA_LRMD_OP); int do_reply = 0; int do_notify = 0; xmlNode *reply = NULL; @@ -1821,7 +1843,7 @@ process_lrmd_message(pcmk__client_t *client, uint32_t id, xmlNode *request) bool allowed = pcmk_is_set(client->flags, pcmk__client_privileged); crm_trace("Processing %s operation from %s", op, client->id); - crm_element_value_int(request, F_LRMD_CALLID, &call_id); + crm_element_value_int(request, PCMK__XA_LRMD_CALLID, &call_id); if (pcmk__str_eq(op, CRM_OP_IPC_FWD, pcmk__str_none)) { #ifdef PCMK__COMPILE_REMOTE @@ -1882,10 +1904,16 @@ process_lrmd_message(pcmk__client_t *client, uint32_t id, xmlNode *request) do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_CHECK, pcmk__str_none)) { if (allowed) { - xmlNode *data = get_message_xml(request, F_LRMD_CALLDATA); + xmlNode *wrapper = pcmk__xe_first_child(request, + PCMK__XE_LRMD_CALLDATA, + NULL, NULL); + xmlNode *data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + + const char *timeout = NULL; CRM_LOG_ASSERT(data != NULL); - pcmk__valid_sbd_timeout(crm_element_value(data, F_LRMD_WATCHDOG)); + timeout = crm_element_value(data, PCMK__XA_LRMD_WATCHDOG); + pcmk__valid_stonith_watchdog_timeout(timeout); } else { rc = -EACCES; } diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c index e7e30eb..926e278 100644 --- a/daemons/execd/pacemaker-execd.c +++ b/daemons/execd/pacemaker-execd.c @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 the Pacemaker project contributors + * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include #include @@ -89,9 +89,11 @@ get_stonith_connection(void) stonith_api_delete(stonith_api); stonith_api = NULL; } else { - stonith_api->cmds->register_notification(stonith_api, - T_STONITH_NOTIFY_DISCONNECT, - stonith_connection_destroy_cb); + stonith_api_operations_t *cmds = stonith_api->cmds; + + cmds->register_notification(stonith_api, + PCMK__VALUE_ST_NOTIFY_DISCONNECT, + stonith_connection_destroy_cb); } } return stonith_api; @@ -102,7 +104,7 @@ lrmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { - return -EIO; + return -ENOMEM; } return 0; } @@ -141,12 +143,13 @@ lrmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) } if (!client->name) { - const char *value = crm_element_value(request, F_LRMD_CLIENTNAME); + const char *value = crm_element_value(request, + PCMK__XA_LRMD_CLIENTNAME); if (value == NULL) { client->name = pcmk__itoa(pcmk__client_pid(c)); } else { - client->name = strdup(value); + client->name = pcmk__str_copy(value); } } @@ -155,9 +158,9 @@ lrmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) lrmd_call_id = 1; } - crm_xml_add(request, F_LRMD_CLIENTID, client->id); - crm_xml_add(request, F_LRMD_CLIENTNAME, client->name); - crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id); + crm_xml_add(request, PCMK__XA_LRMD_CLIENTID, client->id); + crm_xml_add(request, PCMK__XA_LRMD_CLIENTNAME, client->name); + crm_xml_add_int(request, PCMK__XA_LRMD_CALLID, lrmd_call_id); process_lrmd_message(client, id, request); @@ -281,11 +284,7 @@ static gboolean lrmd_exit(gpointer data) { crm_info("Terminating with %d clients", pcmk__ipc_client_count()); - if (stonith_api) { - stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT); - stonith_api->cmds->disconnect(stonith_api); - stonith_api_delete(stonith_api); - } + stonith_api_delete(stonith_api); if (ipcs) { mainloop_del_ipc_server(ipcs); } @@ -443,19 +442,23 @@ main(int argc, char **argv, char **envp) GError *error = NULL; GOptionGroup *output_group = NULL; - pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); -#ifdef PCMK__COMPILE_REMOTE - gchar **processed_args = pcmk__cmdline_preproc(argv, "lp"); -#else - gchar **processed_args = pcmk__cmdline_preproc(argv, "l"); -#endif // PCMK__COMPILE_REMOTE - GOptionContext *context = build_arg_context(args, &output_group); + pcmk__common_args_t *args = NULL; + gchar **processed_args = NULL; + GOptionContext *context = NULL; #ifdef PCMK__COMPILE_REMOTE // If necessary, create PID 1 now before any file descriptors are opened remoted_spawn_pidone(argc, argv, envp); #endif + args = pcmk__new_common_args(SUMMARY); +#ifdef PCMK__COMPILE_REMOTE + processed_args = pcmk__cmdline_preproc(argv, "lp"); +#else + processed_args = pcmk__cmdline_preproc(argv, "l"); +#endif // PCMK__COMPILE_REMOTE + context = build_arg_context(args, &output_group); + crm_log_preinit(EXECD_NAME, argc, argv); pcmk__register_formats(output_group, formats); @@ -495,7 +498,7 @@ main(int argc, char **argv, char **envp) // ocf_log() (in resource-agents) uses the capitalized env options below option = pcmk__env_option(PCMK__ENV_LOGFACILITY); - if (!pcmk__str_eq(option, PCMK__VALUE_NONE, + if (!pcmk__str_eq(option, PCMK_VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches) && !pcmk__str_eq(option, "/dev/null", pcmk__str_none)) { @@ -503,7 +506,7 @@ main(int argc, char **argv, char **envp) } option = pcmk__env_option(PCMK__ENV_LOGFILE); - if (!pcmk__str_eq(option, PCMK__VALUE_NONE, + if (!pcmk__str_eq(option, PCMK_VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__set_env_option("LOGFILE", option, true); diff --git a/daemons/execd/pacemaker-execd.h b/daemons/execd/pacemaker-execd.h index 9c1d173..6fb8ef4 100644 --- a/daemons/execd/pacemaker-execd.h +++ b/daemons/execd/pacemaker-execd.h @@ -101,6 +101,7 @@ void ipc_proxy_forward_client(pcmk__client_t *client, xmlNode *xml); pcmk__client_t *ipc_proxy_get_provider(void); int ipc_proxy_shutdown_req(pcmk__client_t *ipc_proxy); void remoted_spawn_pidone(int argc, char **argv, char **envp); +void remoted_request_cib_schema_files(void); #endif int process_lrmd_alert_exec(pcmk__client_t *client, uint32_t id, diff --git a/daemons/execd/remoted_pidone.c b/daemons/execd/remoted_pidone.c index 08271bf..0a6c251 100644 --- a/daemons/execd/remoted_pidone.c +++ b/daemons/execd/remoted_pidone.c @@ -1,5 +1,5 @@ /* - * Copyright 2017-2023 the Pacemaker project contributors + * Copyright 2017-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -25,7 +25,7 @@ static pid_t main_pid = 0; static void sigdone(void) { - exit(CRM_EX_OK); + crm_exit(CRM_EX_OK); } static void @@ -44,9 +44,9 @@ sigreap(void) if (pid == main_pid) { /* Exit when pacemaker-remote exits and use the same return code */ if (WIFEXITED(status)) { - exit(WEXITSTATUS(status)); + crm_exit(WEXITSTATUS(status)); } - exit(CRM_EX_ERROR); + crm_exit(CRM_EX_ERROR); } } while (pid > 0); } @@ -203,7 +203,7 @@ remoted_spawn_pidone(int argc, char **argv, char **envp) * from /etc/pacemaker/pcmk-init.env, which could be useful for testing or * containers with a custom PID 1 script that launches pacemaker-remoted. */ - const char *pid1 = "default"; + const char *pid1 = PCMK_VALUE_DEFAULT; if (getpid() != 1) { pid1 = pcmk__env_option(PCMK__ENV_REMOTE_PID1); diff --git a/daemons/execd/remoted_proxy.c b/daemons/execd/remoted_proxy.c index 62c8c3a..40dfdc6 100644 --- a/daemons/execd/remoted_proxy.c +++ b/daemons/execd/remoted_proxy.c @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 the Pacemaker project contributors + * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,7 @@ #include "pacemaker-execd.h" #include -#include +#include #include #include #include @@ -81,12 +81,12 @@ ipc_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid, const char *ipc */ client = pcmk__new_client(c, uid, gid); if (client == NULL) { - return -EREMOTEIO; + return -ENOMEM; } /* This ipc client is bound to a single ipc provider. If the * provider goes away, this client is disconnected */ - client->userdata = strdup(ipc_proxy->id); + client->userdata = pcmk__str_copy(ipc_proxy->id); client->name = crm_strdup_printf("proxy-%s-%d-%.8s", ipc_channel, client->pid, client->id); /* Allow remote executor to distinguish between proxied local clients and @@ -96,10 +96,10 @@ ipc_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid, const char *ipc g_hash_table_insert(ipc_clients, client->id, client); - msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); - crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_NEW); - crm_xml_add(msg, F_LRMD_IPC_IPC_SERVER, ipc_channel); - crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); + msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_NEW); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_SERVER, ipc_channel); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, client->id); lrmd_server_send_notify(ipc_proxy, msg); free_xml(msg); crm_debug("Accepted IPC proxy connection (session ID %s) " @@ -117,7 +117,7 @@ crmd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) static int32_t attrd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { - return ipc_proxy_accept(c, uid, gid, T_ATTRD); + return ipc_proxy_accept(c, uid, gid, PCMK__VALUE_ATTRD); } static int32_t @@ -147,9 +147,13 @@ cib_proxy_accept_ro(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) void ipc_proxy_forward_client(pcmk__client_t *ipc_proxy, xmlNode *xml) { - const char *session = crm_element_value(xml, F_LRMD_IPC_SESSION); - const char *msg_type = crm_element_value(xml, F_LRMD_IPC_OP); - xmlNode *msg = get_message_xml(xml, F_LRMD_IPC_MSG); + const char *session = crm_element_value(xml, PCMK__XA_LRMD_IPC_SESSION); + const char *msg_type = crm_element_value(xml, PCMK__XA_LRMD_IPC_OP); + + xmlNode *wrapper = pcmk__xe_first_child(xml, PCMK__XE_LRMD_IPC_MSG, NULL, + NULL); + xmlNode *msg = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + pcmk__client_t *ipc_client; int rc = pcmk_rc_ok; @@ -169,9 +173,9 @@ ipc_proxy_forward_client(pcmk__client_t *ipc_proxy, xmlNode *xml) ipc_client = pcmk__find_client_by_id(session); if (ipc_client == NULL) { - xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); - crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); - crm_xml_add(msg, F_LRMD_IPC_SESSION, session); + xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, session); lrmd_server_send_notify(ipc_proxy, msg); free_xml(msg); return; @@ -198,7 +202,7 @@ ipc_proxy_forward_client(pcmk__client_t *ipc_proxy, xmlNode *xml) } else if (pcmk__str_eq(msg_type, LRMD_IPC_OP_RESPONSE, pcmk__str_casei)) { int msg_id = 0; - crm_element_value_int(xml, F_LRMD_IPC_MSG_ID, &msg_id); + crm_element_value_int(xml, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id); crm_trace("Sending response to %d - %s", ipc_client->request_id, ipc_client->id); rc = pcmk__ipc_send_xml(ipc_client, msg_id, msg, FALSE); @@ -225,6 +229,7 @@ ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) uint32_t flags = 0; pcmk__client_t *client = pcmk__find_client(c); pcmk__client_t *ipc_proxy = pcmk__find_client_by_id(client->userdata); + xmlNode *wrapper = NULL; xmlNode *request = NULL; xmlNode *msg = NULL; @@ -263,18 +268,22 @@ ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) pcmk__set_ipc_flags(flags, pcmk__client_name(client), crm_ipc_proxied); client->request_id = id; - msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); - crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_REQUEST); - crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); - crm_xml_add(msg, F_LRMD_IPC_CLIENT, pcmk__client_name(client)); - crm_xml_add(msg, F_LRMD_IPC_USER, client->user); - crm_xml_add_int(msg, F_LRMD_IPC_MSG_ID, id); - crm_xml_add_int(msg, F_LRMD_IPC_MSG_FLAGS, flags); - add_message_xml(msg, F_LRMD_IPC_MSG, request); + msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_REQUEST); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, client->id); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_CLIENT, pcmk__client_name(client)); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_USER, client->user); + crm_xml_add_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, id); + crm_xml_add_int(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, flags); + + wrapper = pcmk__xe_create(msg, PCMK__XE_LRMD_IPC_MSG); + + pcmk__xml_copy(wrapper, request); + lrmd_server_send_notify(ipc_proxy, msg); + free_xml(request); free_xml(msg); - return 0; } @@ -289,15 +298,15 @@ ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) int ipc_proxy_shutdown_req(pcmk__client_t *ipc_proxy) { - xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); + xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); int rc; - crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_REQ); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_REQ); /* We don't really have a session, but the controller needs this attribute * to recognize this as proxy communication. */ - crm_xml_add(msg, F_LRMD_IPC_SESSION, "0"); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, "0"); rc = (lrmd_server_send_notify(ipc_proxy, msg) != pcmk_rc_ok)? -1 : 0; free_xml(msg); @@ -319,9 +328,9 @@ ipc_proxy_closed(qb_ipcs_connection_t * c) crm_trace("Connection %p", c); if (ipc_proxy) { - xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); - crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); - crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); + xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); + crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, client->id); lrmd_server_send_notify(ipc_proxy, msg); free_xml(msg); } diff --git a/daemons/execd/remoted_schemas.c b/daemons/execd/remoted_schemas.c new file mode 100644 index 0000000..f0ec068 --- /dev/null +++ b/daemons/execd/remoted_schemas.c @@ -0,0 +1,286 @@ +/* + * Copyright 2023-2024 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "pacemaker-execd.h" + +static pid_t schema_fetch_pid = 0; + +static int +rm_files(const char *pathname, const struct stat *sbuf, int type, struct FTW *ftwb) +{ + /* Don't delete PCMK__REMOTE_SCHEMA_DIR . */ + if (ftwb->level == 0) { + return 0; + } + + if (remove(pathname) != 0) { + int rc = errno; + crm_err("Could not remove %s: %s", pathname, pcmk_rc_str(rc)); + return -1; + } + + return 0; +} + +static void +clean_up_extra_schema_files(void) +{ + const char *remote_schema_dir = pcmk__remote_schema_dir(); + struct stat sb; + int rc; + + rc = stat(remote_schema_dir, &sb); + + if (rc == -1) { + if (errno == ENOENT) { + /* If the directory doesn't exist, try to make it first. */ + if (mkdir(remote_schema_dir, 0755) != 0) { + rc = errno; + crm_err("Could not create directory for schemas: %s", + pcmk_rc_str(rc)); + } + + } else { + rc = errno; + crm_err("Could not create directory for schemas: %s", + pcmk_rc_str(rc)); + } + + } else if (!S_ISDIR(sb.st_mode)) { + /* If something exists with the same name that's not a directory, that's + * an error. + */ + crm_err("%s already exists but is not a directory", remote_schema_dir); + + } else { + /* It's a directory - clear it out so we can download potentially new + * schema files. + */ + rc = nftw(remote_schema_dir, rm_files, 10, FTW_DEPTH|FTW_MOUNT|FTW_PHYS); + + if (rc != 0) { + crm_err("Could not remove %s: %s", remote_schema_dir, pcmk_rc_str(rc)); + } + } +} + +static void +write_extra_schema_file(xmlNode *xml, void *user_data) +{ + const char *remote_schema_dir = pcmk__remote_schema_dir(); + const char *file = NULL; + char *path = NULL; + int rc; + + file = crm_element_value(xml, PCMK_XA_PATH); + if (file == NULL) { + crm_warn("No destination path given in schema request"); + return; + } + + path = crm_strdup_printf("%s/%s", remote_schema_dir, file); + + /* The schema is a CDATA node, which is a child of the node. Traverse + * all children and look for the first CDATA child. There can't be more than + * one because we only have one file attribute on the parent. + */ + for (xmlNode *child = xml->children; child != NULL; child = child->next) { + FILE *stream = NULL; + + if (child->type != XML_CDATA_SECTION_NODE) { + continue; + } + + stream = fopen(path, "w+"); + if (stream == NULL) { + crm_warn("Could not write schema file %s: %s", path, strerror(errno)); + } else { + rc = fprintf(stream, "%s", child->content); + + if (rc < 0) { + crm_warn("Could not write schema file %s: %s", path, strerror(errno)); + } + + fclose(stream); + } + + break; + } + + free(path); +} + +static void +get_schema_files(void) +{ + int rc = pcmk_rc_ok; + cib_t *cib = NULL; + xmlNode *reply; + + cib = cib_new(); + if (cib == NULL) { + _exit(ENOTCONN); + } + + rc = cib->cmds->signon(cib, crm_system_name, cib_query); + if (rc != pcmk_ok) { + crm_err("Could not connect to the CIB manager: %s", pcmk_strerror(rc)); + _exit(pcmk_rc2exitc(rc)); + } + + rc = cib->cmds->fetch_schemas(cib, &reply, pcmk__highest_schema_name(), + cib_sync_call); + if (rc != pcmk_ok) { + crm_err("Could not get schema files: %s", pcmk_strerror(rc)); + rc = pcmk_legacy2rc(rc); + + } else if (reply->children != NULL) { + /* The returned document looks something like this: + * + * + * + * + * + * + * + * + * ... + * + * + * + * ... + * + * + * + * + * All the and tags are really just there for organizing + * the XML a little better. What we really care about are the nodes, + * and specifically the path attributes and the CDATA children (not shown) + * of each. We can use an xpath query to reach down and get all the + * nodes at once. + * + * If we already have the latest schema version, or we asked for one later + * than what the cluster supports, we'll get back an empty node, + * so all this will continue to work. It just won't do anything. + */ + crm_foreach_xpath_result(reply, "//" PCMK_XA_FILE, + write_extra_schema_file, NULL); + } + + free_xml(reply); + cib__clean_up_connection(&cib); + _exit(pcmk_rc2exitc(rc)); +} + +/* Load any additional schema files when the child is finished fetching and + * saving them to disk. + */ +static void +get_schema_files_complete(mainloop_child_t *p, pid_t pid, int core, int signo, int exitcode) +{ + const char *errmsg = "Could not load additional schema files"; + + if ((signo == 0) && (exitcode == 0)) { + const char *remote_schema_dir = pcmk__remote_schema_dir(); + + /* Don't just crm_schema_init here because that will load the base + * schemas again too. Instead just load the things we fetched. + */ + pcmk__load_schemas_from_dir(remote_schema_dir); + pcmk__sort_schemas(); + crm_info("Fetching extra schema files completed successfully"); + + } else { + if (signo == 0) { + crm_err("%s: process %d exited %d", errmsg, (int) pid, exitcode); + + } else { + crm_err("%s: process %d terminated with signal %d (%s)%s", + errmsg, (int) pid, signo, strsignal(signo), + (core? " and dumped core" : "")); + } + + /* Clean up any incomplete schema data we might have been downloading when + * the process timed out or crashed. We don't need to do any extra cleanup + * because we never loaded the extra schemas, and we don't need to call + * crm_schema_init because that was called in remoted_request_cib_schema_files + * before this function. + */ + clean_up_extra_schema_files(); + } +} + +void +remoted_request_cib_schema_files(void) +{ + pid_t pid; + int rc; + + /* If a previous schema-fetch process is still running when we're called + * again, it's hung. Attempt to kill it before cleaning up the extra + * directory. + */ + if (schema_fetch_pid != 0) { + if (mainloop_child_kill(schema_fetch_pid) == FALSE) { + crm_warn("Unable to kill pre-existing schema-fetch process"); + return; + } + + schema_fetch_pid = 0; + } + + /* Clean up any extra schema files we downloaded from a previous cluster + * connection. After the files are gone, we need to wipe them from + * known_schemas, but there's no opposite operation for add_schema(). + * + * Instead, unload all the schemas. This means we'll also forget about all + * installed schemas as well, which means that pcmk__highest_schema_name() + * would fail. So we need to load the base schemas right now. + */ + clean_up_extra_schema_files(); + crm_schema_cleanup(); + crm_schema_init(); + + crm_info("Fetching extra schema files from cluster"); + pid = fork(); + + switch (pid) { + case -1: { + rc = errno; + crm_warn("Could not spawn process to get schema files: %s", pcmk_rc_str(rc)); + break; + } + + case 0: + /* child */ + get_schema_files(); + break; + + default: + /* parent */ + schema_fetch_pid = pid; + mainloop_child_add_with_flags(pid, 5 * 60 * 1000, "schema-fetch", NULL, + mainloop_leave_pid_group, + get_schema_files_complete); + break; + } +} diff --git a/daemons/execd/remoted_tls.c b/daemons/execd/remoted_tls.c index 23a2dcf..e98991f 100644 --- a/daemons/execd/remoted_tls.c +++ b/daemons/execd/remoted_tls.c @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 the Pacemaker project contributors + * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -13,10 +13,8 @@ #include #include -#include -#include -#include #include +#include #include #include @@ -111,14 +109,11 @@ lrmd_remote_client_msg(gpointer data) request = pcmk__remote_message_xml(client->remote); while (request) { - crm_element_value_int(request, F_LRMD_REMOTE_MSG_ID, &id); + crm_element_value_int(request, PCMK__XA_LRMD_REMOTE_MSG_ID, &id); crm_trace("Processing remote client request %d", id); if (!client->name) { - const char *value = crm_element_value(request, F_LRMD_CLIENTNAME); - - if (value) { - client->name = strdup(value); - } + client->name = crm_element_value_copy(request, + PCMK__XA_LRMD_CLIENTNAME); } lrmd_call_id++; @@ -126,9 +121,9 @@ lrmd_remote_client_msg(gpointer data) lrmd_call_id = 1; } - crm_xml_add(request, F_LRMD_CLIENTID, client->id); - crm_xml_add(request, F_LRMD_CLIENTNAME, client->name); - crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id); + crm_xml_add(request, PCMK__XA_LRMD_CLIENTID, client->id); + crm_xml_add(request, PCMK__XA_LRMD_CLIENTNAME, client->name); + crm_xml_add_int(request, PCMK__XA_LRMD_CALLID, lrmd_call_id); process_lrmd_message(client, id, request); free_xml(request); @@ -175,6 +170,7 @@ lrmd_remote_client_destroy(gpointer user_data) gnutls_bye(*client->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(*client->remote->tls_session); gnutls_free(client->remote->tls_session); + client->remote->tls_session = NULL; close(csock); } @@ -229,7 +225,7 @@ lrmd_remote_listen(gpointer data) } new_client = pcmk__new_unauth_client(NULL); - new_client->remote = calloc(1, sizeof(pcmk__remote_t)); + new_client->remote = pcmk__assert_alloc(1, sizeof(pcmk__remote_t)); pcmk__set_client_flags(new_client, pcmk__client_tls); new_client->remote->tls_session = session; diff --git a/daemons/fenced/cts-fence-helper.c b/daemons/fenced/cts-fence-helper.c index 07bd500..edde8ca 100644 --- a/daemons/fenced/cts-fence-helper.c +++ b/daemons/fenced/cts-fence-helper.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2023 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. @@ -20,7 +20,6 @@ #include #include -#include #include #include @@ -164,8 +163,10 @@ passive_test(void) stonith_api_delete(st); crm_exit(CRM_EX_DISCONNECT); } - st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback); - st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback); + st->cmds->register_notification(st, PCMK__VALUE_ST_NOTIFY_DISCONNECT, + st_callback); + st->cmds->register_notification(st, PCMK__VALUE_ST_NOTIFY_FENCE, + st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback); st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback", @@ -325,8 +326,10 @@ sanity_tests(void) stonith_api_delete(st); crm_exit(CRM_EX_DISCONNECT); } - st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback); - st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback); + st->cmds->register_notification(st, PCMK__VALUE_ST_NOTIFY_DISCONNECT, + st_callback); + st->cmds->register_notification(st, PCMK__VALUE_ST_NOTIFY_FENCE, + st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback); st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback); st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback", diff --git a/daemons/fenced/fenced_cib.c b/daemons/fenced/fenced_cib.c index e11bf68..6bf0e6f 100644 --- a/daemons/fenced/fenced_cib.c +++ b/daemons/fenced/fenced_cib.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2023 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -15,7 +15,6 @@ #include #include -#include #include #include @@ -55,11 +54,12 @@ node_has_attr(const char *node, const char *name, const char *value) */ xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, - "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE - "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS - "/" XML_CIB_TAG_NVPAIR - "[@" XML_NVPAIR_ATTR_NAME "='", name, "' " - "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL); + "//" PCMK_XE_NODES "/" PCMK_XE_NODE + "[@" PCMK_XA_UNAME "='", node, "']" + "/" PCMK_XE_INSTANCE_ATTRIBUTES + "/" PCMK_XE_NVPAIR + "[@" PCMK_XA_NAME "='", name, "' " + "and @" PCMK_XA_VALUE "='", value, "']", NULL); match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER); @@ -76,7 +76,7 @@ add_topology_level(xmlNode *match) CRM_CHECK(match != NULL, return); fenced_register_level(match, &desc, &result); - fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc); + fenced_send_config_notification(STONITH_OP_LEVEL_ADD, &result, desc); pcmk__reset_result(&result); free(desc); } @@ -86,14 +86,14 @@ topology_remove_helper(const char *node, int level) { char *desc = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; - xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); + xmlNode *data = pcmk__xe_create(NULL, PCMK_XE_FENCING_LEVEL); - crm_xml_add(data, F_STONITH_ORIGIN, __func__); - crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); - crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); + crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); + crm_xml_add_int(data, PCMK_XA_INDEX, level); + crm_xml_add(data, PCMK_XA_TARGET, node); fenced_unregister_level(data, &desc, &result); - fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc); + fenced_send_config_notification(STONITH_OP_LEVEL_DEL, &result, desc); pcmk__reset_result(&result); free_xml(data); free(desc); @@ -108,7 +108,7 @@ remove_topology_level(xmlNode *match) CRM_CHECK(match != NULL, return); key = stonith_level_key(match, fenced_target_by_unknown); - crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); + crm_element_value_int(match, PCMK_XA_INDEX, &index); topology_remove_helper(key, index); free(key); } @@ -149,7 +149,7 @@ void fencing_topology_init(void) { xmlXPathObjectPtr xpathObj = NULL; - const char *xpath = "//" XML_TAG_FENCING_LEVEL; + const char *xpath = "//" PCMK_XE_FENCING_LEVEL; crm_trace("Full topology refresh"); free_topology_list(); @@ -174,37 +174,41 @@ remove_cib_device(xmlXPathObjectPtr xpathObj) CRM_LOG_ASSERT(match != NULL); if(match != NULL) { - standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); + standard = crm_element_value(match, PCMK_XA_CLASS); } if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { continue; } - rsc_id = crm_element_value(match, XML_ATTR_ID); + rsc_id = crm_element_value(match, PCMK_XA_ID); stonith_device_remove(rsc_id, true); } } +#define XPATH_WATCHDOG_TIMEOUT "//" PCMK_XE_NVPAIR \ + "[@" PCMK_XA_NAME "='" \ + PCMK_OPT_STONITH_WATCHDOG_TIMEOUT "']" + static void update_stonith_watchdog_timeout_ms(xmlNode *cib) { - long timeout_ms = 0; + long long timeout_ms = 0; xmlNode *stonith_watchdog_xml = NULL; const char *value = NULL; - stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", - cib, LOG_NEVER); + stonith_watchdog_xml = get_xpath_object(XPATH_WATCHDOG_TIMEOUT, cib, + LOG_NEVER); if (stonith_watchdog_xml) { - value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE); + value = crm_element_value(stonith_watchdog_xml, PCMK_XA_VALUE); } if (value) { timeout_ms = crm_get_msec(value); } if (timeout_ms < 0) { - timeout_ms = pcmk__auto_watchdog_timeout(); + timeout_ms = pcmk__auto_stonith_watchdog_timeout(); } stonith_watchdog_timeout_ms = timeout_ms; @@ -221,9 +225,9 @@ cib_devices_update(void) stonith_device_t *device = NULL; crm_info("Updating devices to version %s.%s.%s", - crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN), - crm_element_value(local_cib, XML_ATTR_GENERATION), - crm_element_value(local_cib, XML_ATTR_NUMUPDATES)); + crm_element_value(local_cib, PCMK_XA_ADMIN_EPOCH), + crm_element_value(local_cib, PCMK_XA_EPOCH), + crm_element_value(local_cib, PCMK_XA_NUM_UPDATES)); g_hash_table_iter_init(&iter, device_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { @@ -256,7 +260,9 @@ update_cib_stonith_devices_v1(const char *event, xmlNode * msg) xmlXPathObjectPtr xpath_obj = NULL; /* process new constraints */ - xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION); + xpath_obj = xpath_search(msg, + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK_XE_RSC_LOCATION); if (numXpathResults(xpath_obj) > 0) { int max = numXpathResults(xpath_obj), lpc = 0; @@ -273,14 +279,20 @@ update_cib_stonith_devices_v1(const char *event, xmlNode * msg) freeXpathObject(xpath_obj); /* process deletions */ - xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE); + xpath_obj = xpath_search(msg, + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_REMOVED + "//" PCMK_XE_PRIMITIVE); if (numXpathResults(xpath_obj) > 0) { remove_cib_device(xpath_obj); } freeXpathObject(xpath_obj); /* process additions */ - xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE); + xpath_obj = xpath_search(msg, + "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_ADDED + "//" PCMK_XE_PRIMITIVE); if (numXpathResults(xpath_obj) > 0) { int max = numXpathResults(xpath_obj), lpc = 0; @@ -289,8 +301,8 @@ update_cib_stonith_devices_v1(const char *event, xmlNode * msg) const char *standard = NULL; xmlNode *match = getXpathResult(xpath_obj, lpc); - rsc_id = crm_element_value(match, XML_ATTR_ID); - standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); + rsc_id = crm_element_value(match, PCMK_XA_ID); + standard = crm_element_value(match, PCMK_XA_CLASS); if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { continue; @@ -314,35 +326,39 @@ update_cib_stonith_devices_v2(const char *event, xmlNode * msg) { xmlNode *change = NULL; char *reason = NULL; - bool needs_update = FALSE; - xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, + NULL, NULL); + xmlNode *patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + + for (change = pcmk__xe_first_child(patchset, NULL, NULL, NULL); + change != NULL; change = pcmk__xe_next(change)) { - for (change = pcmk__xml_first_child(patchset); change != NULL; - change = pcmk__xml_next(change)) { - const char *op = crm_element_value(change, XML_DIFF_OP); - const char *xpath = crm_element_value(change, XML_DIFF_PATH); + const char *op = crm_element_value(change, PCMK_XA_OPERATION); + const char *xpath = crm_element_value(change, PCMK_XA_PATH); const char *shortpath = NULL; - if ((op == NULL) || - (strcmp(op, "move") == 0) || - strstr(xpath, "/"XML_CIB_TAG_STATUS)) { + if (pcmk__str_eq(op, PCMK_VALUE_MOVE, pcmk__str_null_matches) + || (strstr(xpath, "/" PCMK_XE_STATUS) != NULL)) { continue; - } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) { + } + + if (pcmk__str_eq(op, PCMK_VALUE_DELETE, pcmk__str_none) + && (strstr(xpath, "/" PCMK_XE_PRIMITIVE) != NULL)) { const char *rsc_id = NULL; char *search = NULL; char *mutable = NULL; - if (strstr(xpath, XML_TAG_ATTR_SETS) || - strstr(xpath, XML_TAG_META_SETS)) { - needs_update = TRUE; - pcmk__str_update(&reason, - "(meta) attribute deleted from resource"); + if ((strstr(xpath, PCMK_XE_INSTANCE_ATTRIBUTES) != NULL) + || (strstr(xpath, PCMK_XE_META_ATTRIBUTES) != NULL)) { + + reason = pcmk__str_copy("(meta) attribute deleted from " + "resource"); break; } - pcmk__str_update(&mutable, xpath); - rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'"); + mutable = pcmk__str_copy(xpath); + rsc_id = strstr(mutable, PCMK_XE_PRIMITIVE "[@" PCMK_XA_ID "=\'"); if (rsc_id != NULL) { - rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'"); + rsc_id += strlen(PCMK_XE_PRIMITIVE "[@" PCMK_XA_ID "=\'"); search = strchr(rsc_id, '\''); } if (search != NULL) { @@ -355,30 +371,31 @@ update_cib_stonith_devices_v2(const char *event, xmlNode * msg) } free(mutable); - } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) || - strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) || - strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) { + } else if (strstr(xpath, "/" PCMK_XE_RESOURCES) + || strstr(xpath, "/" PCMK_XE_CONSTRAINTS) + || strstr(xpath, "/" PCMK_XE_RSC_DEFAULTS)) { shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath); reason = crm_strdup_printf("%s %s", op, shortpath+1); - needs_update = TRUE; break; } } - if(needs_update) { + if (reason != NULL) { crm_info("Updating device list from CIB: %s", reason); cib_devices_update(); + free(reason); } else { crm_trace("No updates for device list found in CIB"); } - free(reason); } static void update_cib_stonith_devices(const char *event, xmlNode * msg) { int format = 1; - xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, + NULL, NULL); + xmlNode *patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); CRM_ASSERT(patchset); crm_element_value_int(patchset, PCMK_XA_FORMAT, &format); @@ -465,17 +482,19 @@ remove_fencing_topology(xmlXPathObjectPtr xpathObj) xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); - if (match && crm_element_value(match, XML_DIFF_MARKER)) { + if (match && crm_element_value(match, PCMK__XA_CRM_DIFF_MARKER)) { /* Deletion */ int index = 0; char *target = stonith_level_key(match, fenced_target_by_unknown); - crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); + crm_element_value_int(match, PCMK_XA_INDEX, &index); if (target == NULL) { - crm_err("Invalid fencing target in element %s", ID(match)); + crm_err("Invalid fencing target in element %s", + pcmk__xe_id(match)); } else if (index <= 0) { - crm_err("Invalid level for %s in element %s", target, ID(match)); + crm_err("Invalid level for %s in element %s", + target, pcmk__xe_id(match)); } else { topology_remove_helper(target, index); @@ -491,21 +510,27 @@ update_fencing_topology(const char *event, xmlNode * msg) int format = 1; const char *xpath; xmlXPathObjectPtr xpathObj = NULL; - xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, + NULL, NULL); + xmlNode *patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); CRM_ASSERT(patchset); crm_element_value_int(patchset, PCMK_XA_FORMAT, &format); if(format == 1) { /* Process deletions (only) */ - xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL; + xpath = "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_REMOVED + "//" PCMK_XE_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); remove_fencing_topology(xpathObj); freeXpathObject(xpathObj); /* Process additions and changes */ - xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL; + xpath = "//" PCMK__XE_CIB_UPDATE_RESULT + "//" PCMK__XE_DIFF_ADDED + "//" PCMK_XE_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); register_fencing_topology(xpathObj); @@ -518,33 +543,36 @@ update_fencing_topology(const char *event, xmlNode * msg) xml_patch_versions(patchset, add, del); - for (change = pcmk__xml_first_child(patchset); change != NULL; - change = pcmk__xml_next(change)) { - const char *op = crm_element_value(change, XML_DIFF_OP); - const char *xpath = crm_element_value(change, XML_DIFF_PATH); + for (change = pcmk__xe_first_child(patchset, NULL, NULL, NULL); + change != NULL; change = pcmk__xe_next(change)) { + + const char *op = crm_element_value(change, PCMK_XA_OPERATION); + const char *xpath = crm_element_value(change, PCMK_XA_PATH); if(op == NULL) { continue; - } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) { + } else if(strstr(xpath, "/" PCMK_XE_FENCING_LEVEL) != NULL) { /* Change to a specific entry */ crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); - if(strcmp(op, "move") == 0) { + if (strcmp(op, PCMK_VALUE_MOVE) == 0) { continue; - } else if(strcmp(op, "create") == 0) { + } else if (strcmp(op, PCMK_VALUE_CREATE) == 0) { add_topology_level(change->children); - } else if(strcmp(op, "modify") == 0) { - xmlNode *match = first_named_child(change, XML_DIFF_RESULT); + } else if (strcmp(op, PCMK_VALUE_MODIFY) == 0) { + xmlNode *match = pcmk__xe_first_child(change, + PCMK_XE_CHANGE_RESULT, + NULL, NULL); if(match) { remove_topology_level(match->children); add_topology_level(match->children); } - } else if(strcmp(op, "delete") == 0) { + } else if (strcmp(op, PCMK_VALUE_DELETE) == 0) { /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */ crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); @@ -552,20 +580,23 @@ update_fencing_topology(const char *event, xmlNode * msg) return; } - } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) { + } else if (strstr(xpath, "/" PCMK_XE_FENCING_TOPOLOGY) != NULL) { /* Change to the topology in general */ crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; - } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) { + } else if (strstr(xpath, "/" PCMK_XE_CONFIGURATION)) { /* Changes to the whole config section, possibly including the topology as a whild */ - if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) { + if (pcmk__xe_first_child(change, PCMK_XE_FENCING_TOPOLOGY, NULL, + NULL) == NULL) { crm_trace("Nothing for us in %s operation %d.%d.%d for %s.", op, add[0], add[1], add[2], xpath); - } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) { + } else if (pcmk__str_any_of(op, + PCMK_VALUE_DELETE, + PCMK_VALUE_CREATE, NULL)) { crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.", op, add[0], add[1], add[2], xpath); fencing_topology_init(); @@ -586,7 +617,7 @@ update_fencing_topology(const char *event, xmlNode * msg) static void update_cib_cache_cb(const char *event, xmlNode * msg) { - long timeout_ms_saved = stonith_watchdog_timeout_ms; + long long timeout_ms_saved = stonith_watchdog_timeout_ms; bool need_full_refresh = false; if(!have_cib_devices) { @@ -603,14 +634,18 @@ update_cib_cache_cb(const char *event, xmlNode * msg) */ if (local_cib != NULL) { int rc = pcmk_ok; + xmlNode *wrapper = NULL; xmlNode *patchset = NULL; - crm_element_value_int(msg, F_CIB_RC, &rc); + crm_element_value_int(msg, PCMK__XA_CIB_RC, &rc); if (rc != pcmk_ok) { return; } - patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); + wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, NULL, + NULL); + patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); + rc = xml_apply_patchset(local_cib, patchset, TRUE); switch (rc) { case pcmk_ok: @@ -660,7 +695,7 @@ init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us { crm_info("Updating device list from CIB"); have_cib_devices = TRUE; - local_cib = copy_xml(output); + local_cib = pcmk__xml_copy(NULL, output); pcmk__refresh_node_caches_from_cib(local_cib); update_stonith_watchdog_timeout_ms(local_cib); @@ -693,7 +728,7 @@ void fenced_cib_cleanup(void) { if (cib_api != NULL) { - cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, + cib_api->cmds->del_notify_callback(cib_api, PCMK__VALUE_CIB_DIFF_NOTIFY, update_cib_cache_cb); cib__clean_up_connection(&cib_api); } @@ -719,16 +754,20 @@ setup_cib(void) if (rc != pcmk_ok) { crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc); + return; + } - } else if (pcmk_ok != - cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) { + rc = cib_api->cmds->add_notify_callback(cib_api, + PCMK__VALUE_CIB_DIFF_NOTIFY, + update_cib_cache_cb); + if (rc != pcmk_ok) { crm_err("Could not set CIB notification callback"); - - } else { - rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local); - cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb", - init_cib_cache_cb); - cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy); - crm_info("Watching for fencing topology changes"); + return; } + + rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local); + cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, + "init_cib_cache_cb", init_cib_cache_cb); + cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy); + crm_info("Watching for fencing topology changes"); } diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c index 7a62ed6..223a701 100644 --- a/daemons/fenced/fenced_commands.c +++ b/daemons/fenced/fenced_commands.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2023 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -23,7 +23,6 @@ #include #include -#include #include #include #include @@ -129,7 +128,7 @@ static int get_action_delay_max(const stonith_device_t *device, const char *action) { const char *value = NULL; - int delay_max = 0; + guint delay_max = 0U; if (!pcmk__is_fencing_action(action)) { return 0; @@ -137,10 +136,11 @@ get_action_delay_max(const stonith_device_t *device, const char *action) value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_MAX); if (value) { - delay_max = crm_parse_interval_spec(value) / 1000; + pcmk_parse_interval_spec(value, &delay_max); + delay_max /= 1000; } - return delay_max; + return (int) delay_max; } static int @@ -148,7 +148,7 @@ get_action_delay_base(const stonith_device_t *device, const char *action, const char *target) { char *hash_value = NULL; - int delay_base = 0; + guint delay_base = 0U; if (!pcmk__is_fencing_action(action)) { return 0; @@ -157,11 +157,9 @@ get_action_delay_base(const stonith_device_t *device, const char *action, hash_value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_BASE); if (hash_value) { - char *value = strdup(hash_value); + char *value = pcmk__str_copy(hash_value); char *valptr = value; - CRM_ASSERT(value != NULL); - if (target != NULL) { for (char *val = strtok(value, "; \t"); val != NULL; val = strtok(NULL, "; \t")) { char *mapval = strchr(val, ':'); @@ -181,13 +179,14 @@ get_action_delay_base(const stonith_device_t *device, const char *action, } if (strchr(value, ':') == 0) { - delay_base = crm_parse_interval_spec(value) / 1000; + pcmk_parse_interval_spec(value, &delay_base); + delay_base /= 1000; } free(valptr); } - return delay_base; + return (int) delay_base; } /*! @@ -232,7 +231,8 @@ get_action_timeout(const stonith_device_t *device, const char *action, snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action); value = g_hash_table_lookup(device->params, buffer); if (value) { - return atoi(value); + long long timeout_ms = crm_get_msec(value); + return (int) QB_MIN(timeout_ms / 1000, INT_MAX); } } return default_timeout; @@ -345,34 +345,33 @@ create_async_command(xmlNode *msg) return NULL; } - op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR); + op = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, msg, LOG_ERR); if (op == NULL) { return NULL; } - cmd = calloc(1, sizeof(async_command_t)); - CRM_ASSERT(cmd != NULL); + cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); // All messages must include these - cmd->action = crm_element_value_copy(op, F_STONITH_ACTION); - cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION); - cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID); + cmd->action = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ACTION); + cmd->op = crm_element_value_copy(msg, PCMK__XA_ST_OP); + cmd->client = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTID); if ((cmd->action == NULL) || (cmd->op == NULL) || (cmd->client == NULL)) { free_async_command(cmd); return NULL; } - crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id)); - crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options)); - crm_element_value_int(msg, F_STONITH_DELAY, &(cmd->start_delay)); - crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout)); + crm_element_value_int(msg, PCMK__XA_ST_CALLID, &(cmd->id)); + crm_element_value_int(msg, PCMK__XA_ST_CALLOPT, &(cmd->options)); + crm_element_value_int(msg, PCMK__XA_ST_DELAY, &(cmd->start_delay)); + crm_element_value_int(msg, PCMK__XA_ST_TIMEOUT, &(cmd->default_timeout)); cmd->timeout = cmd->default_timeout; - cmd->origin = crm_element_value_copy(msg, F_ORIG); - cmd->remote_op_id = crm_element_value_copy(msg, F_STONITH_REMOTE_OP_ID); - cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME); - cmd->target = crm_element_value_copy(op, F_STONITH_TARGET); - cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE); + cmd->origin = crm_element_value_copy(msg, PCMK__XA_SRC); + cmd->remote_op_id = crm_element_value_copy(msg, PCMK__XA_ST_REMOTE_OP); + cmd->client_name = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTNAME); + cmd->target = crm_element_value_copy(op, PCMK__XA_ST_TARGET); + cmd->device = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ID); cmd->done_cb = st_child_done; @@ -645,12 +644,13 @@ schedule_stonith_command(async_command_t * cmd, stonith_device_t * device) } if (device->include_nodeid && (cmd->target != NULL)) { - crm_node_t *node = crm_get_peer(0, cmd->target); + crm_node_t *node = pcmk__get_node(0, cmd->target, NULL, + pcmk__node_search_cluster_member); cmd->target_nodeid = node->id; } - cmd->device = strdup(device->id); + cmd->device = pcmk__str_copy(device->id); cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout); if (cmd->remote_op_id) { @@ -785,7 +785,7 @@ build_port_aliases(const char *hostmap, GList ** targets) case ':': if (lpc > last) { free(name); - name = calloc(1, 1 + lpc - last); + name = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(name, hostmap + last, lpc - last); } last = lpc + 1; @@ -801,7 +801,7 @@ build_port_aliases(const char *hostmap, GList ** targets) char *value = NULL; int k = 0; - value = calloc(1, 1 + lpc - last); + value = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(value, hostmap + last, lpc - last); for (int i = 0; value[i] != '\0'; i++) { @@ -814,7 +814,7 @@ build_port_aliases(const char *hostmap, GList ** targets) crm_debug("Adding alias '%s'='%s'", name, value); g_hash_table_replace(aliases, name, value); if (targets) { - *targets = g_list_append(*targets, strdup(value)); + *targets = g_list_append(*targets, pcmk__str_copy(value)); } value = NULL; name = NULL; @@ -888,10 +888,10 @@ get_agent_metadata(const char *agent, xmlNode ** metadata) crm_err("Could not retrieve metadata for fencing agent %s", agent); return EAGAIN; } - g_hash_table_replace(metadata_cache, strdup(agent), buffer); + g_hash_table_replace(metadata_cache, pcmk__str_copy(agent), buffer); } - *metadata = string2xml(buffer); + *metadata = pcmk__xml_parse(buffer); return pcmk_rc_ok; } @@ -908,7 +908,8 @@ is_nodeid_required(xmlNode * xml) return FALSE; } - xpath = xpath_search(xml, "//parameter[@name='nodeid']"); + xpath = xpath_search(xml, + "//" PCMK_XE_PARAMETER "[@" PCMK_XA_NAME "='nodeid']"); if (numXpathResults(xpath) <= 0) { freeXpathObject(xpath); return FALSE; @@ -944,7 +945,7 @@ read_action_metadata(stonith_device_t *device) CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; - action = crm_element_value(match, "name"); + action = crm_element_value(match, PCMK_XA_NAME); if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, @@ -956,16 +957,23 @@ read_action_metadata(stonith_device_t *device) stonith__set_device_flags(device->flags, device->id, st_device_supports_reboot); } else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { - /* "automatic" means the cluster will unfence node when it joins */ - /* "required" is a deprecated synonym for "automatic" */ - if (pcmk__xe_attr_is_true(match, "automatic") || pcmk__xe_attr_is_true(match, "required")) { + /* PCMK_XA_AUTOMATIC means the cluster will unfence a node when it + * joins. + * + * @COMPAT PCMK__XA_REQUIRED is a deprecated synonym for + * PCMK_XA_AUTOMATIC. + */ + if (pcmk__xe_attr_is_true(match, PCMK_XA_AUTOMATIC) + || pcmk__xe_attr_is_true(match, PCMK__XA_REQUIRED)) { device->automatic_unfencing = TRUE; } stonith__set_device_flags(device->flags, device->id, st_device_supports_on); } - if ((action != NULL) && pcmk__xe_attr_is_true(match, "on_target")) { + if ((action != NULL) + && pcmk__xe_attr_is_true(match, PCMK_XA_ON_TARGET)) { + pcmk__add_word(&(device->on_target_actions), 64, action); } } @@ -993,7 +1001,7 @@ map_action(GHashTable *params, const char *action, const char *value) } else { crm_warn("Mapping %s='%s' to %s='%s'", STONITH_ATTR_ACTION_OP, value, key, value); - g_hash_table_insert(params, key, strdup(value)); + g_hash_table_insert(params, key, pcmk__str_copy(value)); } } @@ -1023,7 +1031,8 @@ xml2device_params(const char *name, const xmlNode *dev) crm_warn("Ignoring empty '%s' parameter", STONITH_ATTR_ACTION_OP); } else if (strcmp(value, PCMK_ACTION_REBOOT) == 0) { - crm_warn("Ignoring %s='reboot' (see stonith-action cluster property instead)", + crm_warn("Ignoring %s='reboot' (see " PCMK_OPT_STONITH_ACTION + " cluster property instead)", STONITH_ATTR_ACTION_OP); } else if (strcmp(value, PCMK_ACTION_OFF) == 0) { @@ -1050,15 +1059,15 @@ target_list_type(stonith_device_t * dev) if (check_type == NULL) { if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) { - check_type = "static-list"; + check_type = PCMK_VALUE_STATIC_LIST; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) { - check_type = "static-list"; + check_type = PCMK_VALUE_STATIC_LIST; } else if (pcmk_is_set(dev->flags, st_device_supports_list)) { - check_type = "dynamic-list"; + check_type = PCMK_VALUE_DYNAMIC_LIST; } else if (pcmk_is_set(dev->flags, st_device_supports_status)) { - check_type = "status"; + check_type = PCMK_VALUE_STATUS; } else { - check_type = PCMK__VALUE_NONE; + check_type = PCMK_VALUE_NONE; } } @@ -1070,17 +1079,15 @@ build_device_from_xml(xmlNode *dev) { const char *value; stonith_device_t *device = NULL; - char *agent = crm_element_value_copy(dev, "agent"); + char *agent = crm_element_value_copy(dev, PCMK_XA_AGENT); CRM_CHECK(agent != NULL, return device); - device = calloc(1, sizeof(stonith_device_t)); - - CRM_CHECK(device != NULL, {free(agent); return device;}); + device = pcmk__assert_alloc(1, sizeof(stonith_device_t)); - device->id = crm_element_value_copy(dev, XML_ATTR_ID); + device->id = crm_element_value_copy(dev, PCMK_XA_ID); device->agent = agent; - device->namespace = crm_element_value_copy(dev, "namespace"); + device->namespace = crm_element_value_copy(dev, PCMK__XA_NAMESPACE); device->params = xml2device_params(device->id, dev); value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_LIST); @@ -1092,8 +1099,10 @@ build_device_from_xml(xmlNode *dev) device->aliases = build_port_aliases(value, &(device->targets)); value = target_list_type(device); - if (!pcmk__str_eq(value, "static-list", pcmk__str_casei) && device->targets) { - /* Other than "static-list", dev-> targets is unnecessary. */ + if (!pcmk__str_eq(value, PCMK_VALUE_STATIC_LIST, pcmk__str_casei) + && (device->targets != NULL)) { + + // device->targets is necessary only with PCMK_VALUE_STATIC_LIST g_list_free_full(device->targets, free); device->targets = NULL; } @@ -1125,8 +1134,8 @@ build_device_from_xml(xmlNode *dev) device->include_nodeid = is_nodeid_required(device->agent_metadata); } - value = crm_element_value(dev, "rsc_provides"); - if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) { + value = crm_element_value(dev, PCMK__XA_RSC_PROVIDES); + if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) { device->automatic_unfencing = TRUE; } @@ -1159,17 +1168,17 @@ schedule_internal_command(const char *origin, { async_command_t *cmd = NULL; - cmd = calloc(1, sizeof(async_command_t)); + cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); cmd->id = -1; cmd->default_timeout = timeout ? timeout : 60; cmd->timeout = cmd->default_timeout; - cmd->action = strdup(action); - pcmk__str_update(&cmd->target, target); - cmd->device = strdup(device->id); - cmd->origin = strdup(origin); - cmd->client = strdup(crm_system_name); - cmd->client_name = strdup(crm_system_name); + cmd->action = pcmk__str_copy(action); + cmd->target = pcmk__str_copy(target); + cmd->device = pcmk__str_copy(device->id); + cmd->origin = pcmk__str_copy(origin); + cmd->client = pcmk__str_copy(crm_system_name); + cmd->client_name = pcmk__str_copy(crm_system_name); cmd->internal_user_data = internal_user_data; cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */ @@ -1292,13 +1301,13 @@ dynamic_list_search_cb(int pid, const pcmk__action_result_t *result, ((result->exit_reason == NULL)? "" : ")")); } - /* Fall back to pcmk_host_check="status" if the user didn't explicitly - * specify "dynamic-list". + /* Fall back to pcmk_host_check=PCMK_VALUE_STATUS if the user didn't + * explicitly specify PCMK_VALUE_DYNAMIC_LIST */ if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK) == NULL) { crm_notice("Switching to pcmk_host_check='status' for %s", dev->id); - g_hash_table_replace(dev->params, strdup(PCMK_STONITH_HOST_CHECK), - strdup("status")); + pcmk__insert_dup(dev->params, PCMK_STONITH_HOST_CHECK, + PCMK_VALUE_STATUS); } } @@ -1330,7 +1339,7 @@ device_params_diff(GHashTable *first, GHashTable *second) { if(strstr(key, "CRM_meta") == key) { continue; - } else if(strcmp(key, "crm_feature_set") == 0) { + } else if (strcmp(key, PCMK_XA_CRM_FEATURE_SET) == 0) { continue; } else { char *other_value = g_hash_table_lookup(second, key); @@ -1389,7 +1398,7 @@ stonith_device_register(xmlNode *dev, gboolean from_cib) STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) do { if (stonith_watchdog_timeout_ms <= 0) { crm_err("Ignoring watchdog fence device without " - "stonith-watchdog-timeout set."); + PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " set."); rv = -ENODEV; /* fall through to cleanup & return */ } else if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, @@ -1419,9 +1428,8 @@ stonith_device_register(xmlNode *dev, gboolean from_cib) if (node_does_watchdog_fencing(stonith_our_uname)) { g_list_free_full(device->targets, free); device->targets = stonith__parse_targets(stonith_our_uname); - g_hash_table_replace(device->params, - strdup(PCMK_STONITH_HOST_LIST), - strdup(stonith_our_uname)); + pcmk__insert_dup(device->params, + PCMK_STONITH_HOST_LIST, stonith_our_uname); /* proceed as with any other stonith-device */ break; } @@ -1578,18 +1586,18 @@ stonith_level_key(const xmlNode *level, enum fenced_target_by mode) } switch (mode) { case fenced_target_by_name: - return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET); + return crm_element_value_copy(level, PCMK_XA_TARGET); case fenced_target_by_pattern: - return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN); + return crm_element_value_copy(level, PCMK_XA_TARGET_PATTERN); case fenced_target_by_attribute: return crm_strdup_printf("%s=%s", - crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE), - crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE)); + crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE), + crm_element_value(level, PCMK_XA_TARGET_VALUE)); default: - return crm_strdup_printf("unknown-%s", ID(level)); + return crm_strdup_printf("unknown-%s", pcmk__xe_id(level)); } } @@ -1604,15 +1612,15 @@ stonith_level_key(const xmlNode *level, enum fenced_target_by mode) static enum fenced_target_by unpack_level_kind(const xmlNode *level) { - if (crm_element_value(level, XML_ATTR_STONITH_TARGET) != NULL) { + if (crm_element_value(level, PCMK_XA_TARGET) != NULL) { return fenced_target_by_name; } - if (crm_element_value(level, XML_ATTR_STONITH_TARGET_PATTERN) != NULL) { + if (crm_element_value(level, PCMK_XA_TARGET_PATTERN) != NULL) { return fenced_target_by_pattern; } if (!stand_alone /* if standalone, there's no attribute manager */ - && (crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE) != NULL) - && (crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE) != NULL)) { + && (crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE) != NULL) + && (crm_element_value(level, PCMK_XA_TARGET_VALUE) != NULL)) { return fenced_target_by_attribute; } return fenced_target_by_unknown; @@ -1670,8 +1678,8 @@ unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target, * search by xpath, because it might give multiple hits if the XML is the * CIB. */ - if ((xml != NULL) && !pcmk__xe_is(xml, XML_TAG_FENCING_LEVEL)) { - xml = get_xpath_object("//" XML_TAG_FENCING_LEVEL, xml, LOG_WARNING); + if ((xml != NULL) && !pcmk__xe_is(xml, PCMK_XE_FENCING_LEVEL)) { + xml = get_xpath_object("//" PCMK_XE_FENCING_LEVEL, xml, LOG_WARNING); } if (xml == NULL) { @@ -1681,7 +1689,7 @@ unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target, } else { local_mode = unpack_level_kind(xml); local_target = stonith_level_key(xml, local_mode); - crm_element_value_int(xml, XML_ATTR_STONITH_INDEX, &local_id); + crm_element_value_int(xml, PCMK_XA_INDEX, &local_id); if (desc != NULL) { *desc = crm_strdup_printf("%s[%d]", local_target, local_id); } @@ -1737,7 +1745,7 @@ fenced_register_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) } // Ensure an ID was given (even the client API adds an ID) - if (pcmk__str_empty(ID(level))) { + if (pcmk__str_empty(pcmk__xe_id(level))) { crm_warn("Ignoring registration for topology level without ID"); free(target); crm_log_xml_trace(level, "Bad level"); @@ -1749,12 +1757,12 @@ fenced_register_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) // Ensure a valid target was specified if (mode == fenced_target_by_unknown) { crm_warn("Ignoring registration for topology level '%s' " - "without valid target", ID(level)); + "without valid target", pcmk__xe_id(level)); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid target for topology level '%s'", - ID(level)); + pcmk__xe_id(level)); return; } @@ -1766,28 +1774,24 @@ fenced_register_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level '%s'", - pcmk__s(crm_element_value(level, - XML_ATTR_STONITH_INDEX), + pcmk__s(crm_element_value(level, PCMK_XA_INDEX), ""), - ID(level)); + pcmk__xe_id(level)); return; } /* Find or create topology table entry */ tp = g_hash_table_lookup(topology, target); if (tp == NULL) { - tp = calloc(1, sizeof(stonith_topology_t)); - if (tp == NULL) { - pcmk__set_result(result, CRM_EX_ERROR, PCMK_EXEC_ERROR, - strerror(ENOMEM)); - free(target); - return; - } + tp = pcmk__assert_alloc(1, sizeof(stonith_topology_t)); + tp->kind = mode; tp->target = target; - tp->target_value = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_VALUE); - tp->target_pattern = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN); - tp->target_attribute = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE); + tp->target_value = crm_element_value_copy(level, PCMK_XA_TARGET_VALUE); + tp->target_pattern = crm_element_value_copy(level, + PCMK_XA_TARGET_PATTERN); + tp->target_attribute = crm_element_value_copy(level, + PCMK_XA_TARGET_ATTRIBUTE); g_hash_table_replace(topology, tp->target, tp); crm_trace("Added %s (%d) to the topology (%d active entries)", @@ -1801,12 +1805,12 @@ fenced_register_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) tp->target, id); } - devices = parse_device_list(crm_element_value(level, XML_ATTR_STONITH_DEVICES)); + devices = parse_device_list(crm_element_value(level, PCMK_XA_DEVICES)); for (dIter = devices; dIter; dIter = dIter->next) { const char *device = dIter->value; crm_trace("Adding device '%s' for %s[%d]", device, tp->target, id); - tp->levels[id] = g_list_append(tp->levels[id], strdup(device)); + tp->levels[id] = g_list_append(tp->levels[id], pcmk__str_copy(device)); } stonith_key_value_freeall(devices, 1, 1); @@ -1857,12 +1861,11 @@ fenced_unregister_level(xmlNode *msg, char **desc, crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level %s", - pcmk__s(crm_element_value(level, - XML_ATTR_STONITH_INDEX), + pcmk__s(crm_element_value(level, PCMK_XA_INDEX), ""), // Client API doesn't add ID to unregistration XML - pcmk__s(ID(level), "")); + pcmk__s(pcmk__xe_id(level), "")); return; } @@ -1906,26 +1909,29 @@ list_to_string(GList *list, const char *delim, gboolean terminate_with_delim) char *rv; GList *gIter; + char *pos = NULL; + const char *lead_delim = ""; + for (gIter = list; gIter != NULL; gIter = gIter->next) { const char *value = (const char *) gIter->data; alloc_size += strlen(value); } - rv = calloc(alloc_size, sizeof(char)); - if (rv) { - char *pos = rv; - const char *lead_delim = ""; - for (gIter = list; gIter != NULL; gIter = gIter->next) { - const char *value = (const char *) gIter->data; + rv = pcmk__assert_alloc(alloc_size, sizeof(char)); + pos = rv; - pos = &pos[sprintf(pos, "%s%s", lead_delim, value)]; - lead_delim = delim; - } - if (max && terminate_with_delim) { - sprintf(pos, "%s", delim); - } + for (gIter = list; gIter != NULL; gIter = gIter->next) { + const char *value = (const char *) gIter->data; + + pos = &pos[sprintf(pos, "%s%s", lead_delim, value)]; + lead_delim = delim; + } + + if (max && terminate_with_delim) { + sprintf(pos, "%s", delim); } + return rv; } @@ -1947,10 +1953,11 @@ list_to_string(GList *list, const char *delim, gboolean terminate_with_delim) static void execute_agent_action(xmlNode *msg, pcmk__action_result_t *result) { - xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR); - xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR); - const char *id = crm_element_value(dev, F_STONITH_DEVICE); - const char *action = crm_element_value(op, F_STONITH_ACTION); + xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, msg, LOG_ERR); + xmlNode *op = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, msg, + LOG_ERR); + const char *id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); + const char *action = crm_element_value(op, PCMK__XA_ST_DEVICE_ACTION); async_command_t *cmd = NULL; stonith_device_t *device = NULL; @@ -2023,7 +2030,8 @@ search_devices_record_result(struct device_search_s *search, const char *device, return; } } - search->capable = g_list_append(search->capable, strdup(device)); + search->capable = g_list_append(search->capable, + pcmk__str_copy(device)); } if (search->replies_needed == search->replies_received) { @@ -2158,10 +2166,12 @@ can_fence_host_with_device(stonith_device_t *dev, // Check eligibility as specified by pcmk_host_check check_type = target_list_type(dev); alias = g_hash_table_lookup(dev->aliases, target); - if (pcmk__str_eq(check_type, PCMK__VALUE_NONE, pcmk__str_casei)) { + if (pcmk__str_eq(check_type, PCMK_VALUE_NONE, pcmk__str_casei)) { can = TRUE; - } else if (pcmk__str_eq(check_type, "static-list", pcmk__str_casei)) { + } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATIC_LIST, + pcmk__str_casei)) { + if (pcmk__str_in_list(target, dev->targets, pcmk__str_casei)) { can = TRUE; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP) @@ -2169,7 +2179,8 @@ can_fence_host_with_device(stonith_device_t *dev, can = TRUE; } - } else if (pcmk__str_eq(check_type, "dynamic-list", pcmk__str_casei)) { + } else if (pcmk__str_eq(check_type, PCMK_VALUE_DYNAMIC_LIST, + pcmk__str_casei)) { time_t now = time(NULL); if (dev->targets == NULL || dev->targets_age + 60 < now) { @@ -2177,8 +2188,10 @@ can_fence_host_with_device(stonith_device_t *dev, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { - crm_notice("Since the pcmk_list_timeout(%ds) parameter of %s is larger than stonith-timeout(%ds), timeout may occur", - device_timeout, dev_id, search->per_device_timeout); + crm_notice("Since the pcmk_list_timeout (%ds) parameter of %s " + "is larger than " PCMK_OPT_STONITH_TIMEOUT + " (%ds), timeout may occur", + device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", @@ -2196,12 +2209,14 @@ can_fence_host_with_device(stonith_device_t *dev, can = TRUE; } - } else if (pcmk__str_eq(check_type, "status", pcmk__str_casei)) { + } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATUS, pcmk__str_casei)) { int device_timeout = get_action_timeout(dev, check_type, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { - crm_notice("Since the pcmk_status_timeout(%ds) parameter of %s is larger than stonith-timeout(%ds), timeout may occur", - device_timeout, dev_id, search->per_device_timeout); + crm_notice("Since the pcmk_status_timeout (%ds) parameter of %s is " + "larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), " + "timeout may occur", + device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", @@ -2246,16 +2261,10 @@ get_capable_devices(const char *host, const char *action, int timeout, bool suic return; } - search = calloc(1, sizeof(struct device_search_s)); - if (!search) { - crm_crit("Cannot search for capable fence devices: %s", - strerror(ENOMEM)); - callback(NULL, user_data); - return; - } + search = pcmk__assert_alloc(1, sizeof(struct device_search_s)); - pcmk__str_update(&search->host, host); - pcmk__str_update(&search->action, action); + search->host = pcmk__str_copy(host); + search->action = pcmk__str_copy(action); search->per_device_timeout = timeout; search->allow_suicide = suicide; search->callback = callback; @@ -2303,28 +2312,31 @@ add_action_specific_attributes(xmlNode *xml, const char *action, CRM_CHECK(xml && action && device, return); + // PCMK__XA_ST_REQUIRED is currently used only for unfencing if (is_action_required(action, device)) { crm_trace("Action '%s' is required using %s", action, device->id); - crm_xml_add_int(xml, F_STONITH_DEVICE_REQUIRED, 1); + crm_xml_add_int(xml, PCMK__XA_ST_REQUIRED, 1); } + // pcmk__timeout if configured action_specific_timeout = get_action_timeout(device, action, 0); if (action_specific_timeout) { - crm_trace("Action '%s' has timeout %dms using %s", + crm_trace("Action '%s' has timeout %ds using %s", action, action_specific_timeout, device->id); - crm_xml_add_int(xml, F_STONITH_ACTION_TIMEOUT, action_specific_timeout); + crm_xml_add_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, + action_specific_timeout); } delay_max = get_action_delay_max(device, action); if (delay_max > 0) { crm_trace("Action '%s' has maximum random delay %ds using %s", action, delay_max, device->id); - crm_xml_add_int(xml, F_STONITH_DELAY_MAX, delay_max); + crm_xml_add_int(xml, PCMK__XA_ST_DELAY_MAX, delay_max); } delay_base = get_action_delay_base(device, action, target); if (delay_base > 0) { - crm_xml_add_int(xml, F_STONITH_DELAY_BASE, delay_base); + crm_xml_add_int(xml, PCMK__XA_ST_DELAY_BASE, delay_base); } if ((delay_max > 0) && (delay_base == 0)) { @@ -2357,7 +2369,7 @@ add_disallowed(xmlNode *xml, const char *action, const stonith_device_t *device, if (!localhost_is_eligible(device, action, target, allow_suicide)) { crm_trace("Action '%s' using %s is disallowed for local host", action, device->id); - pcmk__xe_set_bool_attr(xml, F_STONITH_ACTION_DISALLOWED, true); + pcmk__xe_set_bool_attr(xml, PCMK__XA_ST_ACTION_DISALLOWED, true); } } @@ -2376,9 +2388,9 @@ add_action_reply(xmlNode *xml, const char *action, const stonith_device_t *device, const char *target, gboolean allow_suicide) { - xmlNode *child = create_xml_node(xml, F_STONITH_ACTION); + xmlNode *child = pcmk__xe_create(xml, PCMK__XE_ST_DEVICE_ACTION); - crm_xml_add(child, XML_ATTR_ID, action); + crm_xml_add(child, PCMK_XA_ID, action); add_action_specific_attributes(child, action, device, target); add_disallowed(child, action, device, target, allow_suicide); } @@ -2402,8 +2414,11 @@ stonith_send_reply(const xmlNode *reply, int call_options, if (remote_peer == NULL) { do_local_reply(reply, client, call_options); } else { - send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, - reply, FALSE); + const crm_node_t *node = + pcmk__get_node(0, remote_peer, NULL, + pcmk__node_search_cluster_member); + + pcmk__cluster_send_message(node, crm_msg_stonith_ng, reply); } } @@ -2412,7 +2427,7 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) { struct st_query_data *query = user_data; int available_devices = 0; - xmlNode *dev = NULL; + xmlNode *wrapper = NULL; xmlNode *list = NULL; GList *lpc = NULL; pcmk__client_t *client = NULL; @@ -2426,12 +2441,15 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) } } - /* Pack the results into XML */ - list = create_xml_node(NULL, __func__); - crm_xml_add(list, F_STONITH_TARGET, query->target); + // Pack the results into XML + wrapper = pcmk__xe_create(query->reply, PCMK__XE_ST_CALLDATA); + list = pcmk__xe_create(wrapper, __func__); + crm_xml_add(list, PCMK__XA_ST_TARGET, query->target); + for (lpc = devices; lpc != NULL; lpc = lpc->next) { stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data); const char *action = query->action; + xmlNode *dev = NULL; if (!device) { /* It is possible the device got unregistered while @@ -2441,12 +2459,15 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) available_devices++; - dev = create_xml_node(list, F_STONITH_DEVICE); - crm_xml_add(dev, XML_ATTR_ID, device->id); - crm_xml_add(dev, "namespace", device->namespace); - crm_xml_add(dev, "agent", device->agent); - crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified); - crm_xml_add_int(dev, F_STONITH_DEVICE_SUPPORT_FLAGS, device->flags); + dev = pcmk__xe_create(list, PCMK__XE_ST_DEVICE_ID); + crm_xml_add(dev, PCMK_XA_ID, device->id); + crm_xml_add(dev, PCMK__XA_NAMESPACE, device->namespace); + crm_xml_add(dev, PCMK_XA_AGENT, device->agent); + + // Has had successful monitor, list, or status on this node + crm_xml_add_int(dev, PCMK__XA_ST_MONITOR_VERIFIED, device->verified); + + crm_xml_add_int(dev, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, device->flags); /* If the originating fencer wants to reboot the node, and we have a * capable device that doesn't support "reboot", remap to "off" instead. @@ -2482,13 +2503,13 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) /* A query without a target wants device parameters */ if (query->target == NULL) { - xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS); + xmlNode *attrs = pcmk__xe_create(dev, PCMK__XE_ATTRIBUTES); g_hash_table_foreach(device->params, hash2field, attrs); } } - crm_xml_add_int(list, F_STONITH_AVAILABLE_DEVICES, available_devices); + crm_xml_add_int(list, PCMK__XA_ST_AVAILABLE_DEVICES, available_devices); if (query->target) { crm_debug("Found %d matching device%s for target '%s'", available_devices, pcmk__plural_s(available_devices), @@ -2498,10 +2519,7 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) available_devices, pcmk__plural_s(available_devices)); } - if (list != NULL) { - crm_log_xml_trace(list, "Add query results"); - add_message_xml(query->reply, F_STONITH_CALLDATA, list); - } + crm_log_xml_trace(list, "query-result"); stonith_send_reply(query->reply, query->call_options, query->remote_peer, client); @@ -2513,7 +2531,6 @@ done: free(query->target); free(query->action); free(query); - free_xml(list); g_list_free_full(devices, free); } @@ -2542,14 +2559,16 @@ log_async_result(const async_command_t *cmd, if (pcmk__result_ok(result)) { log_level = (cmd->target == NULL)? LOG_DEBUG : LOG_NOTICE; if ((result->action_stdout != NULL) - && !pcmk__str_eq(cmd->action, "metadata", pcmk__str_none)) { + && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, + pcmk__str_none)) { output_log_level = LOG_DEBUG; } next = NULL; } else { log_level = (cmd->target == NULL)? LOG_NOTICE : LOG_ERR; if ((result->action_stdout != NULL) - && !pcmk__str_eq(cmd->action, "metadata", pcmk__str_none)) { + && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, + pcmk__str_none)) { output_log_level = LOG_WARNING; } } @@ -2633,7 +2652,7 @@ send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result reply = construct_async_reply(cmd, result); if (merged) { - pcmk__xe_set_bool_attr(reply, F_STONITH_MERGED, true); + pcmk__xe_set_bool_attr(reply, PCMK__XA_ST_OP_MERGED, true); } if (!stand_alone && pcmk__is_fencing_action(cmd->action) @@ -2643,9 +2662,9 @@ send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result */ crm_trace("Broadcast '%s' result for %s (target was also originator)", cmd->action, cmd->target); - crm_xml_add(reply, F_SUBTYPE, "broadcast"); - crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY); - send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE); + crm_xml_add(reply, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); + crm_xml_add(reply, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); + pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, reply); } else { // Reply only to the originator stonith_send_reply(reply, cmd->options, cmd->origin, client); @@ -2656,18 +2675,19 @@ send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result if (stand_alone) { /* Do notification with a clean data object */ - xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE); + xmlNode *notify_data = pcmk__xe_create(NULL, PCMK__XE_ST_NOTIFY_FENCE); stonith__xe_set_result(notify_data, result); - crm_xml_add(notify_data, F_STONITH_TARGET, cmd->target); - crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op); - crm_xml_add(notify_data, F_STONITH_DELEGATE, "localhost"); - crm_xml_add(notify_data, F_STONITH_DEVICE, cmd->device); - crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id); - crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client); + crm_xml_add(notify_data, PCMK__XA_ST_TARGET, cmd->target); + crm_xml_add(notify_data, PCMK__XA_ST_OP, cmd->op); + crm_xml_add(notify_data, PCMK__XA_ST_DELEGATE, "localhost"); + crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ID, cmd->device); + crm_xml_add(notify_data, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id); + crm_xml_add(notify_data, PCMK__XA_ST_ORIGIN, cmd->client); - fenced_send_notification(T_STONITH_NOTIFY_FENCE, result, notify_data); - fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL); + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_FENCE, result, + notify_data); + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } } @@ -2890,7 +2910,7 @@ fence_locally(xmlNode *msg, pcmk__action_result_t *result) CRM_CHECK((msg != NULL) && (result != NULL), return); - dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR); + dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_ERR); cmd = create_async_command(msg); if (cmd == NULL) { @@ -2899,7 +2919,7 @@ fence_locally(xmlNode *msg, pcmk__action_result_t *result) return; } - device_id = crm_element_value(dev, F_STONITH_DEVICE); + device_id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (device_id != NULL) { device = g_hash_table_lookup(device_list, device_id); if (device == NULL) { @@ -2911,14 +2931,16 @@ fence_locally(xmlNode *msg, pcmk__action_result_t *result) schedule_stonith_command(cmd, device); } else { - const char *host = crm_element_value(dev, F_STONITH_TARGET); + const char *host = crm_element_value(dev, PCMK__XA_ST_TARGET); if (pcmk_is_set(cmd->options, st_opt_cs_nodeid)) { int nodeid = 0; crm_node_t *node = NULL; pcmk__scan_min_int(host, &nodeid, 0); - node = pcmk__search_known_node_cache(nodeid, NULL, CRM_GET_PEER_ANY); + node = pcmk__search_node_caches(nodeid, NULL, + pcmk__node_search_any + |pcmk__node_search_cluster_cib); if (node != NULL) { host = node->uname; } @@ -2953,10 +2975,10 @@ fenced_construct_reply(const xmlNode *request, xmlNode *data, { xmlNode *reply = NULL; - reply = create_xml_node(NULL, T_STONITH_REPLY); + reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); - crm_xml_add(reply, "st_origin", __func__); - crm_xml_add(reply, F_TYPE, T_STONITH_NG); + crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__); + crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); stonith__xe_set_result(reply, result); if (request == NULL) { @@ -2976,12 +2998,12 @@ fenced_construct_reply(const xmlNode *request, xmlNode *data, // Attributes to copy from request to reply const char *names[] = { - F_STONITH_OPERATION, - F_STONITH_CALLID, - F_STONITH_CLIENTID, - F_STONITH_CLIENTNAME, - F_STONITH_REMOTE_OP_ID, - F_STONITH_CALLOPTS + PCMK__XA_ST_OP, + PCMK__XA_ST_CALLID, + PCMK__XA_ST_CLIENTID, + PCMK__XA_ST_CLIENTNAME, + PCMK__XA_ST_REMOTE_OP, + PCMK__XA_ST_CALLOPT, }; for (int lpc = 0; lpc < PCMK__NELEM(names); lpc++) { @@ -2990,7 +3012,9 @@ fenced_construct_reply(const xmlNode *request, xmlNode *data, crm_xml_add(reply, name, value); } if (data != NULL) { - add_message_xml(reply, F_STONITH_CALLDATA, data); + xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_ST_CALLDATA); + + pcmk__xml_copy(wrapper, data); } } return reply; @@ -3007,20 +3031,20 @@ static xmlNode * construct_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result) { - xmlNode *reply = create_xml_node(NULL, T_STONITH_REPLY); - - crm_xml_add(reply, "st_origin", __func__); - crm_xml_add(reply, F_TYPE, T_STONITH_NG); - crm_xml_add(reply, F_STONITH_OPERATION, cmd->op); - crm_xml_add(reply, F_STONITH_DEVICE, cmd->device); - crm_xml_add(reply, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id); - crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client); - crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name); - crm_xml_add(reply, F_STONITH_TARGET, cmd->target); - crm_xml_add(reply, F_STONITH_ACTION, cmd->op); - crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin); - crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id); - crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options); + xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); + + crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__); + crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); + crm_xml_add(reply, PCMK__XA_ST_OP, cmd->op); + crm_xml_add(reply, PCMK__XA_ST_DEVICE_ID, cmd->device); + crm_xml_add(reply, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id); + crm_xml_add(reply, PCMK__XA_ST_CLIENTID, cmd->client); + crm_xml_add(reply, PCMK__XA_ST_CLIENTNAME, cmd->client_name); + crm_xml_add(reply, PCMK__XA_ST_TARGET, cmd->target); + crm_xml_add(reply, PCMK__XA_ST_DEVICE_ACTION, cmd->op); + crm_xml_add(reply, PCMK__XA_ST_ORIGIN, cmd->origin); + crm_xml_add_int(reply, PCMK__XA_ST_CALLID, cmd->id); + crm_xml_add_int(reply, PCMK__XA_ST_CALLOPT, cmd->options); stonith__xe_set_result(reply, result); return reply; @@ -3081,7 +3105,8 @@ check_alternate_host(const char *target) static void remove_relay_op(xmlNode * request) { - xmlNode *dev = get_xpath_object("//@" F_STONITH_ACTION, request, LOG_TRACE); + xmlNode *dev = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, request, + LOG_TRACE); const char *relay_op_id = NULL; const char *op_id = NULL; const char *client_name = NULL; @@ -3089,12 +3114,12 @@ remove_relay_op(xmlNode * request) remote_fencing_op_t *relay_op = NULL; if (dev) { - target = crm_element_value(dev, F_STONITH_TARGET); + target = crm_element_value(dev, PCMK__XA_ST_TARGET); } - relay_op_id = crm_element_value(request, F_STONITH_REMOTE_OP_ID_RELAY); - op_id = crm_element_value(request, F_STONITH_REMOTE_OP_ID); - client_name = crm_element_value(request, F_STONITH_CLIENTNAME); + relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP_RELAY); + op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP); + client_name = crm_element_value(request, PCMK__XA_ST_CLIENTNAME); /* Delete RELAY operation. */ if (relay_op_id && target && pcmk__str_eq(target, stonith_our_uname, pcmk__str_casei)) { @@ -3162,11 +3187,11 @@ is_privileged(const pcmk__client_t *c, const char *op) static xmlNode * handle_register_request(pcmk__request_t *request) { - xmlNode *reply = create_xml_node(NULL, "reply"); + xmlNode *reply = pcmk__xe_create(NULL, "reply"); CRM_ASSERT(request->ipc_client != NULL); - crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER); - crm_xml_add(reply, F_STONITH_CLIENTID, request->ipc_client->id); + crm_xml_add(reply, PCMK__XA_ST_OP, CRM_OP_REGISTER); + crm_xml_add(reply, PCMK__XA_ST_CLIENTID, request->ipc_client->id); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); return reply; @@ -3187,11 +3212,12 @@ handle_agent_request(pcmk__request_t *request) static xmlNode * handle_update_timeout_request(pcmk__request_t *request) { - const char *call_id = crm_element_value(request->xml, F_STONITH_CALLID); - const char *client_id = crm_element_value(request->xml, F_STONITH_CLIENTID); + const char *call_id = crm_element_value(request->xml, PCMK__XA_ST_CALLID); + const char *client_id = crm_element_value(request->xml, + PCMK__XA_ST_CLIENTID); int op_timeout = 0; - crm_element_value_int(request->xml, F_STONITH_TIMEOUT, &op_timeout); + crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &op_timeout); do_stonith_async_timeout_update(client_id, call_id, op_timeout); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; @@ -3205,7 +3231,8 @@ handle_query_request(pcmk__request_t *request) xmlNode *dev = NULL; const char *action = NULL; const char *target = NULL; - const char *client_id = crm_element_value(request->xml, F_STONITH_CLIENTID); + const char *client_id = crm_element_value(request->xml, + PCMK__XA_ST_CLIENTID); struct st_query_data *query = NULL; if (request->peer != NULL) { @@ -3218,51 +3245,51 @@ handle_query_request(pcmk__request_t *request) pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); - dev = get_xpath_object("//@" F_STONITH_ACTION, request->xml, LOG_NEVER); + dev = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, request->xml, + LOG_NEVER); if (dev != NULL) { - const char *device = crm_element_value(dev, F_STONITH_DEVICE); + const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (pcmk__str_eq(device, "manual_ack", pcmk__str_casei)) { return NULL; // No query or reply necessary } - target = crm_element_value(dev, F_STONITH_TARGET); - action = crm_element_value(dev, F_STONITH_ACTION); + target = crm_element_value(dev, PCMK__XA_ST_TARGET); + action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION); } crm_log_xml_trace(request->xml, "Query"); - query = calloc(1, sizeof(struct st_query_data)); - CRM_ASSERT(query != NULL); + query = pcmk__assert_alloc(1, sizeof(struct st_query_data)); query->reply = fenced_construct_reply(request->xml, NULL, &request->result); - pcmk__str_update(&query->remote_peer, request->peer); - pcmk__str_update(&query->client_id, client_id); - pcmk__str_update(&query->target, target); - pcmk__str_update(&query->action, action); + query->remote_peer = pcmk__str_copy(request->peer); + query->client_id = pcmk__str_copy(client_id); + query->target = pcmk__str_copy(target); + query->action = pcmk__str_copy(action); query->call_options = request->call_options; - crm_element_value_int(request->xml, F_STONITH_TIMEOUT, &timeout); + crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &timeout); get_capable_devices(target, action, timeout, pcmk_is_set(query->call_options, st_opt_allow_suicide), query, stonith_query_capable_device_cb, st_device_supports_none); return NULL; } -// T_STONITH_NOTIFY +// STONITH_OP_NOTIFY static xmlNode * handle_notify_request(pcmk__request_t *request) { const char *flag_name = NULL; CRM_ASSERT(request->ipc_client != NULL); - flag_name = crm_element_value(request->xml, F_STONITH_NOTIFY_ACTIVATE); + flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_ACTIVATE); if (flag_name != NULL) { crm_debug("Enabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); pcmk__set_client_flags(request->ipc_client, get_stonith_flag(flag_name)); } - flag_name = crm_element_value(request->xml, F_STONITH_NOTIFY_DEACTIVATE); + flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_DEACTIVATE); if (flag_name != NULL) { crm_debug("Disabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); @@ -3273,22 +3300,23 @@ handle_notify_request(pcmk__request_t *request) pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); - return pcmk__ipc_create_ack(request->ipc_flags, "ack", NULL, CRM_EX_OK); + return pcmk__ipc_create_ack(request->ipc_flags, PCMK__XE_ACK, NULL, + CRM_EX_OK); } // STONITH_OP_RELAY static xmlNode * handle_relay_request(pcmk__request_t *request) { - xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request->xml, + xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request->xml, LOG_TRACE); crm_notice("Received forwarded fencing request from " "%s %s to fence (%s) peer %s", pcmk__request_origin_type(request), pcmk__request_origin(request), - crm_element_value(dev, F_STONITH_ACTION), - crm_element_value(dev, F_STONITH_TARGET)); + crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION), + crm_element_value(dev, PCMK__XA_ST_TARGET)); if (initiate_remote_stonith_op(NULL, request->xml, FALSE) == NULL) { fenced_set_protocol_error(&request->result); @@ -3324,11 +3352,11 @@ handle_fence_request(pcmk__request_t *request) } else { const char *alternate_host = NULL; - xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request->xml, + xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request->xml, LOG_TRACE); - const char *target = crm_element_value(dev, F_STONITH_TARGET); - const char *action = crm_element_value(dev, F_STONITH_ACTION); - const char *device = crm_element_value(dev, F_STONITH_DEVICE); + const char *target = crm_element_value(dev, PCMK__XA_ST_TARGET); + const char *action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION); + const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (request->ipc_client != NULL) { int tolerance = 0; @@ -3336,7 +3364,7 @@ handle_fence_request(pcmk__request_t *request) crm_notice("Client %s wants to fence (%s) %s using %s", pcmk__request_origin(request), action, target, (device? device : "any device")); - crm_element_value_int(dev, F_STONITH_TOLERANCE, &tolerance); + crm_element_value_int(dev, PCMK__XA_ST_TOLERANCE, &tolerance); if (stonith_check_fence_tolerance(tolerance, target, action)) { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); @@ -3354,9 +3382,12 @@ handle_fence_request(pcmk__request_t *request) if (alternate_host != NULL) { const char *client_id = NULL; remote_fencing_op_t *op = NULL; + crm_node_t *node = pcmk__get_node(0, alternate_host, NULL, + pcmk__node_search_cluster_member); if (request->ipc_client->id == 0) { - client_id = crm_element_value(request->xml, F_STONITH_CLIENTID); + client_id = crm_element_value(request->xml, + PCMK__XA_ST_CLIENTID); } else { client_id = request->ipc_client->id; } @@ -3367,12 +3398,11 @@ handle_fence_request(pcmk__request_t *request) */ op = create_remote_stonith_op(client_id, request->xml, FALSE); - crm_xml_add(request->xml, F_STONITH_OPERATION, STONITH_OP_RELAY); - crm_xml_add(request->xml, F_STONITH_CLIENTID, + crm_xml_add(request->xml, PCMK__XA_ST_OP, STONITH_OP_RELAY); + crm_xml_add(request->xml, PCMK__XA_ST_CLIENTID, request->ipc_client->id); - crm_xml_add(request->xml, F_STONITH_REMOTE_OP_ID, op->id); - send_cluster_message(crm_get_peer(0, alternate_host), - crm_msg_stonith_ng, request->xml, FALSE); + crm_xml_add(request->xml, PCMK__XA_ST_REMOTE_OP, op->id); + pcmk__cluster_send_message(node, crm_msg_stonith_ng, request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); @@ -3416,8 +3446,8 @@ handle_history_request(pcmk__request_t *request) static xmlNode * handle_device_add_request(pcmk__request_t *request) { - const char *op = crm_element_value(request->xml, F_STONITH_OPERATION); - xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request->xml, + const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); + xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, request->xml, LOG_ERR); if (is_privileged(request->ipc_client, op)) { @@ -3432,8 +3462,8 @@ handle_device_add_request(pcmk__request_t *request) PCMK_EXEC_INVALID, "Unprivileged users must register device via CIB"); } - fenced_send_device_notification(op, &request->result, - (dev == NULL)? NULL : ID(dev)); + fenced_send_config_notification(op, &request->result, + (dev == NULL)? NULL : pcmk__xe_id(dev)); return fenced_construct_reply(request->xml, NULL, &request->result); } @@ -3441,10 +3471,10 @@ handle_device_add_request(pcmk__request_t *request) static xmlNode * handle_device_delete_request(pcmk__request_t *request) { - xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request->xml, + xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, request->xml, LOG_ERR); - const char *device_id = crm_element_value(dev, XML_ATTR_ID); - const char *op = crm_element_value(request->xml, F_STONITH_OPERATION); + const char *device_id = crm_element_value(dev, PCMK_XA_ID); + const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { stonith_device_remove(device_id, false); @@ -3454,7 +3484,7 @@ handle_device_delete_request(pcmk__request_t *request) PCMK_EXEC_INVALID, "Unprivileged users must delete device via CIB"); } - fenced_send_device_notification(op, &request->result, device_id); + fenced_send_config_notification(op, &request->result, device_id); return fenced_construct_reply(request->xml, NULL, &request->result); } @@ -3463,7 +3493,7 @@ static xmlNode * handle_level_add_request(pcmk__request_t *request) { char *desc = NULL; - const char *op = crm_element_value(request->xml, F_STONITH_OPERATION); + const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_register_level(request->xml, &desc, &request->result); @@ -3473,7 +3503,7 @@ handle_level_add_request(pcmk__request_t *request) PCMK_EXEC_INVALID, "Unprivileged users must add level via CIB"); } - fenced_send_level_notification(op, &request->result, desc); + fenced_send_config_notification(op, &request->result, desc); free(desc); return fenced_construct_reply(request->xml, NULL, &request->result); } @@ -3483,7 +3513,7 @@ static xmlNode * handle_level_delete_request(pcmk__request_t *request) { char *desc = NULL; - const char *op = crm_element_value(request->xml, F_STONITH_OPERATION); + const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_unregister_level(request->xml, &desc, &request->result); @@ -3493,7 +3523,7 @@ handle_level_delete_request(pcmk__request_t *request) PCMK_EXEC_INVALID, "Unprivileged users must delete level via CIB"); } - fenced_send_level_notification(op, &request->result, desc); + fenced_send_config_notification(op, &request->result, desc); free(desc); return fenced_construct_reply(request->xml, NULL, &request->result); } @@ -3505,9 +3535,9 @@ handle_cache_request(pcmk__request_t *request) int node_id = 0; const char *name = NULL; - crm_element_value_int(request->xml, XML_ATTR_ID, &node_id); - name = crm_element_value(request->xml, XML_ATTR_UNAME); - reap_crm_member(node_id, name); + crm_element_value_int(request->xml, PCMK_XA_ID, &node_id); + name = crm_element_value(request->xml, PCMK_XA_UNAME); + pcmk__cluster_forget_cluster_node(node_id, name); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } @@ -3531,7 +3561,7 @@ fenced_register_handlers(void) { STONITH_OP_EXEC, handle_agent_request }, { STONITH_OP_TIMEOUT_UPDATE, handle_update_timeout_request }, { STONITH_OP_QUERY, handle_query_request }, - { T_STONITH_NOTIFY, handle_notify_request }, + { STONITH_OP_NOTIFY, handle_notify_request }, { STONITH_OP_RELAY, handle_relay_request }, { STONITH_OP_FENCE, handle_fence_request }, { STONITH_OP_FENCE_HISTORY, handle_history_request }, @@ -3596,12 +3626,15 @@ static void handle_reply(pcmk__client_t *client, xmlNode *request, const char *remote_peer) { // Copy, because request might be freed before we want to log this - char *op = crm_element_value_copy(request, F_STONITH_OPERATION); + char *op = crm_element_value_copy(request, PCMK__XA_ST_OP); if (pcmk__str_eq(op, STONITH_OP_QUERY, pcmk__str_none)) { process_remote_stonith_query(request); - } else if (pcmk__str_any_of(op, T_STONITH_NOTIFY, STONITH_OP_FENCE, NULL)) { + + } else if (pcmk__str_any_of(op, STONITH_OP_NOTIFY, STONITH_OP_FENCE, + NULL)) { fenced_process_fencing_reply(request); + } else { crm_err("Ignoring unknown %s reply from %s %s", pcmk__s(op, "untyped"), ((client == NULL)? "peer" : "client"), @@ -3635,13 +3668,13 @@ stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags, CRM_CHECK(message != NULL, return); - if (get_xpath_object("//" T_STONITH_REPLY, message, LOG_NEVER) != NULL) { + if (get_xpath_object("//" PCMK__XE_ST_REPLY, message, LOG_NEVER) != NULL) { is_reply = true; } - crm_element_value_int(message, F_STONITH_CALLOPTS, &call_options); + crm_element_value_int(message, PCMK__XA_ST_CALLOPT, &call_options); crm_debug("Processing %ssynchronous %s %s %u from %s %s", pcmk_is_set(call_options, st_opt_sync_call)? "" : "a", - crm_element_value(message, F_STONITH_OPERATION), + crm_element_value(message, PCMK__XA_ST_OP), (is_reply? "reply" : "request"), id, ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); @@ -3663,7 +3696,7 @@ stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags, .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, F_STONITH_OPERATION); + request.op = crm_element_value_copy(request.xml, PCMK__XA_ST_OP); CRM_CHECK(request.op != NULL, return); if (pcmk_is_set(request.call_options, st_opt_sync_call)) { diff --git a/daemons/fenced/fenced_history.c b/daemons/fenced/fenced_history.c index a766477..5fcdb1f 100644 --- a/daemons/fenced/fenced_history.c +++ b/daemons/fenced/fenced_history.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2022 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -42,23 +41,22 @@ stonith_send_broadcast_history(xmlNode *history, int callopts, const char *target) { - xmlNode *bcast = create_xml_node(NULL, "stonith_command"); - xmlNode *data = create_xml_node(NULL, __func__); - - if (target) { - crm_xml_add(data, F_STONITH_TARGET, target); - } - crm_xml_add(bcast, F_TYPE, T_STONITH_NG); - crm_xml_add(bcast, F_SUBTYPE, "broadcast"); - crm_xml_add(bcast, F_STONITH_OPERATION, STONITH_OP_FENCE_HISTORY); - crm_xml_add_int(bcast, F_STONITH_CALLOPTS, callopts); - if (history) { - add_node_copy(data, history); + xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); + xmlNode *wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA); + xmlNode *call_data = pcmk__xe_create(wrapper, __func__); + + crm_xml_add(bcast, PCMK__XA_T, PCMK__VALUE_STONITH_NG); + crm_xml_add(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); + crm_xml_add(bcast, PCMK__XA_ST_OP, STONITH_OP_FENCE_HISTORY); + crm_xml_add_int(bcast, PCMK__XA_ST_CALLOPT, callopts); + + pcmk__xml_copy(call_data, history); + if (target != NULL) { + crm_xml_add(call_data, PCMK__XA_ST_TARGET, target); } - add_message_xml(bcast, F_STONITH_CALLDATA, data); - send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE); - free_xml(data); + pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, bcast); + free_xml(bcast); } @@ -100,7 +98,7 @@ stonith_fence_history_cleanup(const char *target, g_hash_table_foreach_remove(stonith_remote_op_list, stonith_remove_history_entry, (gpointer) target); - fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL); + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } } @@ -133,34 +131,68 @@ stonith_fence_history_cleanup(const char *target, * situations where it would be handy to have it probably. */ - -static int -op_time_sort(const void *a_voidp, const void *b_voidp) +/*! + * \internal + * \brief Compare two remote fencing operations by status and completion time + * + * A pending operation is ordered before a completed operation. If both + * operations have completed, then the more recently completed operation is + * ordered first. Two pending operations are considered equal. + * + * \param[in] a First \c remote_fencing_op_t to compare + * \param[in] b Second \c remote_fencing_op_t to compare + * + * \return Standard comparison result (a negative integer if \p a is lesser, + * 0 if the values are equal, and a positive integer if \p a is greater) + */ +static gint +cmp_op_by_completion(gconstpointer a, gconstpointer b) { - const remote_fencing_op_t **a = (const remote_fencing_op_t **) a_voidp; - const remote_fencing_op_t **b = (const remote_fencing_op_t **) b_voidp; - gboolean a_pending = ((*a)->state != st_failed) && ((*a)->state != st_done); - gboolean b_pending = ((*b)->state != st_failed) && ((*b)->state != st_done); + const remote_fencing_op_t *op1 = a; + const remote_fencing_op_t *op2 = b; + bool op1_pending = stonith__op_state_pending(op1->state); + bool op2_pending = stonith__op_state_pending(op2->state); - if (a_pending && b_pending) { + if (op1_pending && op2_pending) { return 0; - } else if (a_pending) { + } + if (op1_pending) { return -1; - } else if (b_pending) { + } + if (op2_pending) { return 1; - } else if ((*b)->completed == (*a)->completed) { - if ((*b)->completed_nsec > (*a)->completed_nsec) { - return 1; - } else if ((*b)->completed_nsec == (*a)->completed_nsec) { - return 0; - } - } else if ((*b)->completed > (*a)->completed) { + } + if (op1->completed > op2->completed) { + return -1; + } + if (op1->completed < op2->completed) { return 1; } - - return -1; + if (op1->completed_nsec > op2->completed_nsec) { + return -1; + } + if (op1->completed_nsec < op2->completed_nsec) { + return 1; + } + return 0; } +/*! + * \internal + * \brief Remove a completed operation from \c stonith_remote_op_list + * + * \param[in] data \c remote_fencing_op_t to remove + * \param[in] user_data Ignored + */ +static void +remove_completed_remote_op(gpointer data, gpointer user_data) +{ + const remote_fencing_op_t *op = data; + + if (!stonith__op_state_pending(op->state)) { + g_hash_table_remove(stonith_remote_op_list, op->id); + } +} /*! * \internal @@ -170,43 +202,24 @@ op_time_sort(const void *a_voidp, const void *b_voidp) void stonith_fence_history_trim(void) { - guint num_ops; - - if (!stonith_remote_op_list) { + if (stonith_remote_op_list == NULL) { return; } - num_ops = g_hash_table_size(stonith_remote_op_list); - if (num_ops > MAX_STONITH_HISTORY) { - remote_fencing_op_t *ops[num_ops]; - remote_fencing_op_t *op = NULL; - GHashTableIter iter; - int i; - crm_trace("Fencing History growing beyond limit of %d so purge " - "half of failed/successful attempts", MAX_STONITH_HISTORY); + if (g_hash_table_size(stonith_remote_op_list) > MAX_STONITH_HISTORY) { + GList *ops = g_hash_table_get_values(stonith_remote_op_list); - /* write all ops into an array */ - i = 0; - g_hash_table_iter_init(&iter, stonith_remote_op_list); - while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) { - ops[i++] = op; - } - /* run quicksort over the array so that we get pending ops - * first and then sorted most recent to oldest - */ - qsort(ops, num_ops, sizeof(remote_fencing_op_t *), op_time_sort); - /* purgest oldest half of the history entries */ - for (i = MAX_STONITH_HISTORY / 2; i < num_ops; i++) { - /* keep pending ops even if they shouldn't fill more than - * half of our buffer - */ - if ((ops[i]->state == st_failed) || (ops[i]->state == st_done)) { - g_hash_table_remove(stonith_remote_op_list, ops[i]->id); - } - } - /* we've just purged valid data from the list so there is no need - * to create a notification - if displayed it can stay - */ + crm_trace("More than %d entries in fencing history, purging oldest " + "completed operations", MAX_STONITH_HISTORY); + + ops = g_list_sort(ops, cmp_op_by_completion); + + // Always keep pending ops regardless of number of entries + g_list_foreach(g_list_nth(ops, MAX_STONITH_HISTORY / 2), + remove_completed_remote_op, NULL); + + // No need for a notification after purging old data + g_list_free(ops); } } @@ -228,10 +241,11 @@ stonith_xml_history_to_list(const xmlNode *history) CRM_LOG_ASSERT(rv != NULL); - for (xml_op = pcmk__xml_first_child(history); xml_op != NULL; - xml_op = pcmk__xml_next(xml_op)) { + for (xml_op = pcmk__xe_first_child(history, NULL, NULL, NULL); + xml_op != NULL; xml_op = pcmk__xe_next(xml_op)) { + remote_fencing_op_t *op = NULL; - char *id = crm_element_value_copy(xml_op, F_STONITH_REMOTE_OP_ID); + char *id = crm_element_value_copy(xml_op, PCMK__XA_ST_REMOTE_OP); int state; int exit_status = CRM_EX_OK; int execution_status = PCMK_EXEC_DONE; @@ -245,35 +259,37 @@ stonith_xml_history_to_list(const xmlNode *history) crm_trace("Attaching op %s to hashtable", id); - op = calloc(1, sizeof(remote_fencing_op_t)); + op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t)); op->id = id; - op->target = crm_element_value_copy(xml_op, F_STONITH_TARGET); - op->action = crm_element_value_copy(xml_op, F_STONITH_ACTION); - op->originator = crm_element_value_copy(xml_op, F_STONITH_ORIGIN); - op->delegate = crm_element_value_copy(xml_op, F_STONITH_DELEGATE); - op->client_name = crm_element_value_copy(xml_op, F_STONITH_CLIENTNAME); - crm_element_value_ll(xml_op, F_STONITH_DATE, &completed); + op->target = crm_element_value_copy(xml_op, PCMK__XA_ST_TARGET); + op->action = crm_element_value_copy(xml_op, PCMK__XA_ST_DEVICE_ACTION); + op->originator = crm_element_value_copy(xml_op, PCMK__XA_ST_ORIGIN); + op->delegate = crm_element_value_copy(xml_op, PCMK__XA_ST_DELEGATE); + op->client_name = crm_element_value_copy(xml_op, + PCMK__XA_ST_CLIENTNAME); + crm_element_value_ll(xml_op, PCMK__XA_ST_DATE, &completed); op->completed = (time_t) completed; - crm_element_value_ll(xml_op, F_STONITH_DATE_NSEC, &completed_nsec); + crm_element_value_ll(xml_op, PCMK__XA_ST_DATE_NSEC, &completed_nsec); op->completed_nsec = completed_nsec; - crm_element_value_int(xml_op, F_STONITH_STATE, &state); + crm_element_value_int(xml_op, PCMK__XA_ST_STATE, &state); op->state = (enum op_state) state; /* @COMPAT We can't use stonith__xe_get_result() here because * fencers <2.1.3 didn't include results, leading it to assume an error * status. Instead, set an unknown status in that case. */ - if ((crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &exit_status) < 0) - || (crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, + if ((crm_element_value_int(xml_op, PCMK__XA_RC_CODE, &exit_status) < 0) + || (crm_element_value_int(xml_op, PCMK__XA_OP_STATUS, &execution_status) < 0)) { exit_status = CRM_EX_INDETERMINATE; execution_status = PCMK_EXEC_UNKNOWN; } pcmk__set_result(&op->result, exit_status, execution_status, - crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON)); + crm_element_value(xml_op, PCMK_XA_EXIT_REASON)); pcmk__set_result_output(&op->result, - crm_element_value_copy(xml_op, F_STONITH_OUTPUT), + crm_element_value_copy(xml_op, + PCMK__XA_ST_OUTPUT), NULL); @@ -309,7 +325,7 @@ stonith_local_history_diff_and_merge(GHashTable *remote_history, if (stonith_remote_op_list) { char *id = NULL; - history = create_xml_node(NULL, F_STONITH_HISTORY_LIST); + history = pcmk__xe_create(NULL, PCMK__XE_ST_HISTORY); g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, (void **)&id, (void **)&op)) { @@ -361,18 +377,19 @@ stonith_local_history_diff_and_merge(GHashTable *remote_history, cnt++; crm_trace("Attaching op %s", op->id); - entry = create_xml_node(history, STONITH_OP_EXEC); + entry = pcmk__xe_create(history, STONITH_OP_EXEC); if (add_id) { - crm_xml_add(entry, F_STONITH_REMOTE_OP_ID, op->id); + crm_xml_add(entry, PCMK__XA_ST_REMOTE_OP, op->id); } - crm_xml_add(entry, F_STONITH_TARGET, op->target); - crm_xml_add(entry, F_STONITH_ACTION, op->action); - crm_xml_add(entry, F_STONITH_ORIGIN, op->originator); - crm_xml_add(entry, F_STONITH_DELEGATE, op->delegate); - crm_xml_add(entry, F_STONITH_CLIENTNAME, op->client_name); - crm_xml_add_ll(entry, F_STONITH_DATE, op->completed); - crm_xml_add_ll(entry, F_STONITH_DATE_NSEC, op->completed_nsec); - crm_xml_add_int(entry, F_STONITH_STATE, op->state); + crm_xml_add(entry, PCMK__XA_ST_TARGET, op->target); + crm_xml_add(entry, PCMK__XA_ST_DEVICE_ACTION, op->action); + crm_xml_add(entry, PCMK__XA_ST_ORIGIN, op->originator); + crm_xml_add(entry, PCMK__XA_ST_DELEGATE, op->delegate); + crm_xml_add(entry, PCMK__XA_ST_CLIENTNAME, op->client_name); + crm_xml_add_ll(entry, PCMK__XA_ST_DATE, op->completed); + crm_xml_add_ll(entry, PCMK__XA_ST_DATE_NSEC, + op->completed_nsec); + crm_xml_add_int(entry, PCMK__XA_ST_STATE, op->state); stonith__xe_set_result(entry, &op->result); } } @@ -418,7 +435,7 @@ stonith_local_history_diff_and_merge(GHashTable *remote_history, if (updated) { stonith_fence_history_trim(); - fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL); + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } if (cnt == 0) { @@ -459,17 +476,19 @@ stonith_fence_history(xmlNode *msg, xmlNode **output, const char *remote_peer, int options) { const char *target = NULL; - xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_NEVER); + xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_NEVER); xmlNode *out_history = NULL; if (dev) { - target = crm_element_value(dev, F_STONITH_TARGET); + target = crm_element_value(dev, PCMK__XA_ST_TARGET); if (target && (options & st_opt_cs_nodeid)) { int nodeid; crm_node_t *node; pcmk__scan_min_int(target, &nodeid, 0); - node = pcmk__search_known_node_cache(nodeid, NULL, CRM_GET_PEER_ANY); + node = pcmk__search_node_caches(nodeid, NULL, + pcmk__node_search_any + |pcmk__node_search_cluster_cib); if (node) { target = node->uname; } @@ -477,18 +496,20 @@ stonith_fence_history(xmlNode *msg, xmlNode **output, } if (options & st_opt_cleanup) { + const char *call_id = crm_element_value(msg, PCMK__XA_ST_CALLID); + crm_trace("Cleaning up operations on %s in %p", target, stonith_remote_op_list); + stonith_fence_history_cleanup(target, (call_id != NULL)); - stonith_fence_history_cleanup(target, - crm_element_value(msg, F_STONITH_CALLID) != NULL); } else if (options & st_opt_broadcast) { /* there is no clear sign atm for when a history sync is done so send a notification for anything that smells like history-sync */ - fenced_send_notification(T_STONITH_NOTIFY_HISTORY_SYNCED, NULL, NULL); - if (crm_element_value(msg, F_STONITH_CALLID)) { + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY_SYNCED, NULL, + NULL); + if (crm_element_value(msg, PCMK__XA_ST_CALLID) != NULL) { /* this is coming from the stonith-API * * craft a broadcast with node's history @@ -502,8 +523,8 @@ stonith_fence_history(xmlNode *msg, xmlNode **output, NULL); } else if (remote_peer && !pcmk__str_eq(remote_peer, stonith_our_uname, pcmk__str_casei)) { - xmlNode *history = get_xpath_object("//" F_STONITH_HISTORY_LIST, - msg, LOG_NEVER); + xmlNode *history = get_xpath_object("//" PCMK__XE_ST_HISTORY, msg, + LOG_NEVER); /* either a broadcast created directly upon stonith-API request * or a diff as response to such a thing @@ -514,7 +535,9 @@ stonith_fence_history(xmlNode *msg, xmlNode **output, * otherwise broadcast what we have on top * marking as differential and merge in afterwards */ - if (!history || !pcmk__xe_attr_is_true(history, F_STONITH_DIFFERENTIAL)) { + if (!history + || !pcmk__xe_attr_is_true(history, PCMK__XA_ST_DIFFERENTIAL)) { + GHashTable *received_history = NULL; if (history != NULL) { @@ -524,7 +547,8 @@ stonith_fence_history(xmlNode *msg, xmlNode **output, stonith_local_history_diff_and_merge(received_history, TRUE, NULL); if (out_history) { crm_trace("Broadcasting history-diff to peers"); - pcmk__xe_set_bool_attr(out_history, F_STONITH_DIFFERENTIAL, true); + pcmk__xe_set_bool_attr(out_history, + PCMK__XA_ST_DIFFERENTIAL, true); stonith_send_broadcast_history(out_history, st_opt_broadcast | st_opt_discard_reply, NULL); diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c index 843b3d4..f87eeb6 100644 --- a/daemons/fenced/fenced_remote.c +++ b/daemons/fenced/fenced_remote.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2023 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -24,7 +24,6 @@ #include #include -#include #include #include #include @@ -370,24 +369,25 @@ undo_op_remap(remote_fencing_op_t *op) * \internal * \brief Create notification data XML for a fencing operation result * - * \param[in] op Fencer operation that completed + * \param[in,out] parent Parent XML element for newly created element + * \param[in] op Fencer operation that completed * * \return Newly created XML to add as notification data * \note The caller is responsible for freeing the result. */ static xmlNode * -fencing_result2xml(const remote_fencing_op_t *op) +fencing_result2xml(xmlNode *parent, const remote_fencing_op_t *op) { - xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE); + xmlNode *notify_data = pcmk__xe_create(parent, PCMK__XE_ST_NOTIFY_FENCE); - crm_xml_add_int(notify_data, "state", op->state); - crm_xml_add(notify_data, F_STONITH_TARGET, op->target); - crm_xml_add(notify_data, F_STONITH_ACTION, op->action); - crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate); - crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, op->id); - crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator); - crm_xml_add(notify_data, F_STONITH_CLIENTID, op->client_id); - crm_xml_add(notify_data, F_STONITH_CLIENTNAME, op->client_name); + crm_xml_add_int(notify_data, PCMK_XA_STATE, op->state); + crm_xml_add(notify_data, PCMK__XA_ST_TARGET, op->target); + crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ACTION, op->action); + crm_xml_add(notify_data, PCMK__XA_ST_DELEGATE, op->delegate); + crm_xml_add(notify_data, PCMK__XA_ST_REMOTE_OP, op->id); + crm_xml_add(notify_data, PCMK__XA_ST_ORIGIN, op->originator); + crm_xml_add(notify_data, PCMK__XA_ST_CLIENTID, op->client_id); + crm_xml_add(notify_data, PCMK__XA_ST_CLIENTNAME, op->client_name); return notify_data; } @@ -403,25 +403,26 @@ void fenced_broadcast_op_result(const remote_fencing_op_t *op, bool op_merged) { static int count = 0; - xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY); - xmlNode *notify_data = fencing_result2xml(op); + xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); + xmlNode *wrapper = NULL; + xmlNode *notify_data = NULL; count++; crm_trace("Broadcasting result to peers"); - crm_xml_add(bcast, F_TYPE, T_STONITH_NOTIFY); - crm_xml_add(bcast, F_SUBTYPE, "broadcast"); - crm_xml_add(bcast, F_STONITH_OPERATION, T_STONITH_NOTIFY); - crm_xml_add_int(bcast, "count", count); + crm_xml_add(bcast, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); + crm_xml_add(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); + crm_xml_add(bcast, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); + crm_xml_add_int(bcast, PCMK_XA_COUNT, count); if (op_merged) { - pcmk__xe_set_bool_attr(bcast, F_STONITH_MERGED, true); + pcmk__xe_set_bool_attr(bcast, PCMK__XA_ST_OP_MERGED, true); } + wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA); + notify_data = fencing_result2xml(wrapper, op); stonith__xe_set_result(notify_data, &op->result); - add_message_xml(bcast, F_STONITH_CALLDATA, notify_data); - send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE); - free_xml(notify_data); + pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, bcast); free_xml(bcast); return; @@ -447,12 +448,12 @@ handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data) } /* Do notification with a clean data object */ - crm_xml_add_int(data, "state", op->state); - crm_xml_add(data, F_STONITH_TARGET, op->target); - crm_xml_add(data, F_STONITH_OPERATION, op->action); + crm_xml_add_int(data, PCMK_XA_STATE, op->state); + crm_xml_add(data, PCMK__XA_ST_TARGET, op->target); + crm_xml_add(data, PCMK__XA_ST_OP, op->action); reply = fenced_construct_reply(op->request, data, &op->result); - crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate); + crm_xml_add(reply, PCMK__XA_ST_DELEGATE, op->delegate); /* Send fencing OP reply to local client that initiated fencing */ client = pcmk__find_client_by_id(op->client_id); @@ -463,10 +464,11 @@ handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data) } /* bcast to all local clients that the fencing operation happend */ - notify_data = fencing_result2xml(op); - fenced_send_notification(T_STONITH_NOTIFY_FENCE, &op->result, notify_data); + notify_data = fencing_result2xml(NULL, op); + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_FENCE, &op->result, + notify_data); free_xml(notify_data); - fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL); + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); /* mark this op as having notify's already sent */ op->notify_sent = TRUE; @@ -509,12 +511,13 @@ finalize_op_duplicates(remote_fencing_op_t *op, xmlNode *data) static char * delegate_from_xml(xmlNode *xml) { - xmlNode *match = get_xpath_object("//@" F_STONITH_DELEGATE, xml, LOG_NEVER); + xmlNode *match = get_xpath_object("//@" PCMK__XA_ST_DELEGATE, xml, + LOG_NEVER); if (match == NULL) { - return crm_element_value_copy(xml, F_ORIG); + return crm_element_value_copy(xml, PCMK__XA_SRC); } else { - return crm_element_value_copy(match, F_STONITH_DELEGATE); + return crm_element_value_copy(match, PCMK__XA_ST_DELEGATE); } } @@ -564,7 +567,7 @@ finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup) undo_op_remap(op); if (data == NULL) { - data = create_xml_node(NULL, "remote-op"); + data = pcmk__xe_create(NULL, "remote-op"); local_data = data; } else if (op->delegate == NULL) { @@ -584,15 +587,15 @@ finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup) } } - if (dup || (crm_element_value(data, F_STONITH_MERGED) != NULL)) { + if (dup || (crm_element_value(data, PCMK__XA_ST_OP_MERGED) != NULL)) { op_merged = true; } /* Tell everyone the operation is done, we will continue * with doing the local notifications once we receive * the broadcast back. */ - subt = crm_element_value(data, F_SUBTYPE); - if (!dup && !pcmk__str_eq(subt, "broadcast", pcmk__str_casei)) { + subt = crm_element_value(data, PCMK__XA_SUBT); + if (!dup && !pcmk__str_eq(subt, PCMK__VALUE_BROADCAST, pcmk__str_none)) { /* Defer notification until the bcast message arrives */ fenced_broadcast_op_result(op, op_merged); free_xml(local_data); @@ -800,7 +803,8 @@ add_required_device(remote_fencing_op_t *op, const char *device) sort_strings); if (!match) { - op->automatic_list = g_list_prepend(op->automatic_list, strdup(device)); + op->automatic_list = g_list_prepend(op->automatic_list, + pcmk__str_copy(device)); } } @@ -833,7 +837,10 @@ set_op_device_list(remote_fencing_op_t * op, GList *devices) op->devices_list = NULL; } for (lpc = devices; lpc != NULL; lpc = lpc->next) { - op->devices_list = g_list_append(op->devices_list, strdup(lpc->data)); + const char *device = lpc->data; + + op->devices_list = g_list_append(op->devices_list, + pcmk__str_copy(device)); } op->devices = op->devices_list; } @@ -1001,6 +1008,7 @@ merge_duplicates(remote_fencing_op_t *op) g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) { const char *other_action = op_requested_action(other); + crm_node_t *node = NULL; if (!strcmp(op->id, other->id)) { continue; // Don't compare against self @@ -1030,7 +1038,11 @@ merge_duplicates(remote_fencing_op_t *op) op->id, other->id, other->target); continue; } - if (!fencing_peer_active(crm_get_peer(0, other->originator))) { + + node = pcmk__get_node(0, other->originator, NULL, + pcmk__node_search_cluster_member); + + if (!fencing_peer_active(node)) { crm_notice("Failing action '%s' targeting %s originating from " "client %s@%s: Originator is dead " CRM_XS " id=%.8s", other->action, other->target, other->client_name, @@ -1042,8 +1054,8 @@ merge_duplicates(remote_fencing_op_t *op) } if ((other->total_timeout > 0) && (now > (other->total_timeout + other->created))) { - crm_trace("%.8s not duplicate of %.8s: old (%ld vs. %ld + %d)", - op->id, other->id, now, other->created, + crm_trace("%.8s not duplicate of %.8s: old (%lld vs. %lld + %ds)", + op->id, other->id, (long long)now, (long long)other->created, other->total_timeout); continue; } @@ -1055,7 +1067,7 @@ merge_duplicates(remote_fencing_op_t *op) if (other->total_timeout == 0) { other->total_timeout = op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL); - crm_trace("Best guess as to timeout used for %.8s: %d", + crm_trace("Best guess as to timeout used for %.8s: %ds", other->id, other->total_timeout); } crm_notice("Merging fencing action '%s' targeting %s originating from " @@ -1097,12 +1109,12 @@ int fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg) { remote_fencing_op_t *op = NULL; - xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR); + xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_ERR); CRM_CHECK(dev != NULL, return EPROTO); crm_notice("Received manual confirmation that %s has been fenced", - pcmk__s(crm_element_value(dev, F_STONITH_TARGET), + pcmk__s(crm_element_value(dev, PCMK__XA_ST_TARGET), "unknown target")); op = initiate_remote_stonith_op(client, msg, TRUE); if (op == NULL) { @@ -1110,7 +1122,7 @@ fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg) } op->state = st_done; set_fencing_completed(op); - op->delegate = strdup("a human"); + op->delegate = pcmk__str_copy("a human"); // For the fencer's purposes, the fencing operation is done pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); @@ -1137,7 +1149,8 @@ void * create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) { remote_fencing_op_t *op = NULL; - xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_NEVER); + xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request, + LOG_NEVER); int call_options = 0; const char *operation = NULL; @@ -1146,7 +1159,7 @@ create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) /* If this operation is owned by another node, check to make * sure we haven't already created this operation. */ if (peer && dev) { - const char *op_id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID); + const char *op_id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(op_id != NULL, return NULL); @@ -1158,15 +1171,14 @@ create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) } } - op = calloc(1, sizeof(remote_fencing_op_t)); - CRM_ASSERT(op != NULL); + op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t)); - crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout)); + crm_element_value_int(request, PCMK__XA_ST_TIMEOUT, &(op->base_timeout)); // Value -1 means disable any static/random fencing delays - crm_element_value_int(request, F_STONITH_DELAY, &(op->client_delay)); + crm_element_value_int(request, PCMK__XA_ST_DELAY, &(op->client_delay)); if (peer && dev) { - op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID); + op->id = crm_element_value_copy(dev, PCMK__XA_ST_REMOTE_OP); } else { op->id = crm_generate_uuid(); } @@ -1175,41 +1187,49 @@ create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) op->state = st_query; op->replies_expected = fencing_active_peers(); - op->action = crm_element_value_copy(dev, F_STONITH_ACTION); - op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN); - op->delegate = crm_element_value_copy(dev, F_STONITH_DELEGATE); /* May not be set */ - op->created = time(NULL); + op->action = crm_element_value_copy(dev, PCMK__XA_ST_DEVICE_ACTION); + /* The node initiating the stonith operation. If an operation is relayed, + * this is the last node the operation lands on. When in standalone mode, + * origin is the ID of the client that originated the operation. + * + * Or may be the name of the function that created the operation. + */ + op->originator = crm_element_value_copy(dev, PCMK__XA_ST_ORIGIN); if (op->originator == NULL) { /* Local or relayed request */ - op->originator = strdup(stonith_our_uname); + op->originator = pcmk__str_copy(stonith_our_uname); } - CRM_LOG_ASSERT(client != NULL); - if (client) { - op->client_id = strdup(client); - } + // Delegate may not be set + op->delegate = crm_element_value_copy(dev, PCMK__XA_ST_DELEGATE); + op->created = time(NULL); + CRM_LOG_ASSERT(client != NULL); + op->client_id = pcmk__str_copy(client); /* For a RELAY operation, set fenced on the client. */ - operation = crm_element_value(request, F_STONITH_OPERATION); + operation = crm_element_value(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { op->client_name = crm_strdup_printf("%s.%lu", crm_system_name, (unsigned long) getpid()); } else { - op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME); + op->client_name = crm_element_value_copy(request, + PCMK__XA_ST_CLIENTNAME); } - op->target = crm_element_value_copy(dev, F_STONITH_TARGET); - op->request = copy_xml(request); /* TODO: Figure out how to avoid this */ - crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); + op->target = crm_element_value_copy(dev, PCMK__XA_ST_TARGET); + + // @TODO Figure out how to avoid copying XML here + op->request = pcmk__xml_copy(NULL, request); + crm_element_value_int(request, PCMK__XA_ST_CALLOPT, &call_options); op->call_options = call_options; - crm_element_value_int(request, F_STONITH_CALLID, &(op->client_callid)); + crm_element_value_int(request, PCMK__XA_ST_CALLID, &(op->client_callid)); crm_trace("%s new fencing op %s ('%s' targeting %s for client %s, " - "base timeout %d, %u %s expected)", + "base timeout %ds, %u %s expected)", (peer && dev)? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name, op->base_timeout, op->replies_expected, @@ -1220,14 +1240,15 @@ create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) crm_node_t *node; pcmk__scan_min_int(op->target, &nodeid, 0); - node = pcmk__search_known_node_cache(nodeid, NULL, CRM_GET_PEER_ANY); + node = pcmk__search_node_caches(nodeid, NULL, + pcmk__node_search_any + |pcmk__node_search_cluster_cib); /* Ensure the conversion only happens once */ stonith__clear_call_options(op->call_options, op->id, st_opt_cs_nodeid); if (node && node->uname) { - free(op->target); - op->target = strdup(node->uname); + pcmk__str_update(&(op->target), node->uname); } else { crm_warn("Could not expand nodeid '%s' into a host name", op->target); @@ -1239,7 +1260,7 @@ create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) if (op->state != st_duplicate) { /* kick history readers */ - fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL); + fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } /* safe to trim as long as that doesn't touch pending ops */ @@ -1272,7 +1293,7 @@ initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request, if (client) { client_id = client->id; } else { - client_id = crm_element_value(request, F_STONITH_CLIENTID); + client_id = crm_element_value(request, PCMK__XA_ST_CLIENTID); } CRM_LOG_ASSERT(client_id != NULL); @@ -1305,7 +1326,7 @@ initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request, default: crm_notice("Requesting peer fencing (%s) targeting %s " - CRM_XS " id=%.8s state=%s base_timeout=%d", + CRM_XS " id=%.8s state=%s base_timeout=%ds", op->action, op->target, op->id, stonith_op_state_str(op->state), op->base_timeout); } @@ -1313,24 +1334,24 @@ initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request, query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY, NULL, op->call_options); - crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id); - crm_xml_add(query, F_STONITH_TARGET, op->target); - crm_xml_add(query, F_STONITH_ACTION, op_requested_action(op)); - crm_xml_add(query, F_STONITH_ORIGIN, op->originator); - crm_xml_add(query, F_STONITH_CLIENTID, op->client_id); - crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name); - crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout); + crm_xml_add(query, PCMK__XA_ST_REMOTE_OP, op->id); + crm_xml_add(query, PCMK__XA_ST_TARGET, op->target); + crm_xml_add(query, PCMK__XA_ST_DEVICE_ACTION, op_requested_action(op)); + crm_xml_add(query, PCMK__XA_ST_ORIGIN, op->originator); + crm_xml_add(query, PCMK__XA_ST_CLIENTID, op->client_id); + crm_xml_add(query, PCMK__XA_ST_CLIENTNAME, op->client_name); + crm_xml_add_int(query, PCMK__XA_ST_TIMEOUT, op->base_timeout); /* In case of RELAY operation, RELAY information is added to the query to delete the original operation of RELAY. */ - operation = crm_element_value(request, F_STONITH_OPERATION); + operation = crm_element_value(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { - relay_op_id = crm_element_value(request, F_STONITH_REMOTE_OP_ID); + relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP); if (relay_op_id) { - crm_xml_add(query, F_STONITH_REMOTE_OP_ID_RELAY, relay_op_id); + crm_xml_add(query, PCMK__XA_ST_REMOTE_OP_RELAY, relay_op_id); } } - send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE); + pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, query); free_xml(query); query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR; @@ -1348,6 +1369,16 @@ enum find_best_peer_options { FIND_PEER_VERIFIED_ONLY = 0x0004, }; +static bool +is_watchdog_fencing(const remote_fencing_op_t *op, const char *device) +{ + return (stonith_watchdog_timeout_ms > 0 + // Only an explicit mismatch is considered not a watchdog fencing. + && pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_null_matches) + && pcmk__is_fencing_action(op->action) + && node_does_watchdog_fencing(op->target)); +} + static peer_device_info_t * find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options) { @@ -1443,10 +1474,10 @@ stonith_choose_peer(remote_fencing_op_t * op) && pcmk_is_set(op->call_options, st_opt_topology) && (advance_topology_level(op, false) == pcmk_rc_ok)); - if ((stonith_watchdog_timeout_ms > 0) - && pcmk__is_fencing_action(op->action) - && pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_none) - && node_does_watchdog_fencing(op->target)) { + /* With a simple watchdog fencing configuration without a topology, + * "device" is NULL here. Consider it should be done with watchdog fencing. + */ + if (is_watchdog_fencing(op, device)) { crm_info("Couldn't contact watchdog-fencing target-node (%s)", op->target); /* check_watchdog_fencing_and_wait will log additional info */ @@ -1457,33 +1488,70 @@ stonith_choose_peer(remote_fencing_op_t * op) return NULL; } +static int +valid_fencing_timeout(int specified_timeout, bool action_specific, + const remote_fencing_op_t *op, const char *device) +{ + int timeout = specified_timeout; + + if (!is_watchdog_fencing(op, device)) { + return timeout; + } + + timeout = (int) QB_MIN(QB_MAX(specified_timeout, + stonith_watchdog_timeout_ms / 1000), INT_MAX); + + if (timeout > specified_timeout) { + if (action_specific) { + crm_warn("pcmk_%s_timeout %ds for %s is too short (must be >= " + PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " + "instead", + op->action, specified_timeout, device? device : "watchdog", + timeout, timeout); + + } else { + crm_warn("Fencing timeout %ds is too short (must be >= " + PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " + "instead", + specified_timeout, timeout, timeout); + } + } + + return timeout; +} + static int get_device_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer, const char *device, bool with_delay) { + int timeout = op->base_timeout; device_properties_t *props; - int delay = 0; + + timeout = valid_fencing_timeout(op->base_timeout, false, op, device); if (!peer || !device) { - return op->base_timeout; + return timeout; } props = g_hash_table_lookup(peer->devices, device); if (!props) { - return op->base_timeout; + return timeout; + } + + if (props->custom_action_timeout[op->phase]) { + timeout = valid_fencing_timeout(props->custom_action_timeout[op->phase], + true, op, device); } // op->client_delay < 0 means disable any static/random fencing delays if (with_delay && (op->client_delay >= 0)) { // delay_base is eventually limited by delay_max - delay = (props->delay_max[op->phase] > 0 ? - props->delay_max[op->phase] : props->delay_base[op->phase]); + timeout += (props->delay_max[op->phase] > 0 ? + props->delay_max[op->phase] : props->delay_base[op->phase]); } - return (props->custom_action_timeout[op->phase]? - props->custom_action_timeout[op->phase] : op->base_timeout) - + delay; + return timeout; } struct timeout_data { @@ -1532,7 +1600,7 @@ static int get_op_total_timeout(const remote_fencing_op_t *op, const peer_device_info_t *chosen_peer) { - int total_timeout = 0; + long long total_timeout = 0; stonith_topology_t *tp = find_topology_for_host(op->target); if (pcmk_is_set(op->call_options, st_opt_topology) && tp) { @@ -1558,17 +1626,7 @@ get_op_total_timeout(const remote_fencing_op_t *op, continue; } for (device_list = tp->levels[i]; device_list; device_list = device_list->next) { - /* in case of watchdog-device we add the timeout to the budget - regardless of if we got a reply or not - */ - if ((stonith_watchdog_timeout_ms > 0) - && pcmk__is_fencing_action(op->action) - && pcmk__str_eq(device_list->data, STONITH_WATCHDOG_ID, - pcmk__str_none) - && node_does_watchdog_fencing(op->target)) { - total_timeout += stonith_watchdog_timeout_ms / 1000; - continue; - } + bool found = false; for (iter = op->query_results; iter != NULL; iter = iter->next) { const peer_device_info_t *peer = iter->data; @@ -1586,9 +1644,17 @@ get_op_total_timeout(const remote_fencing_op_t *op, total_timeout += get_device_timeout(op, peer, device_list->data, true); + found = true; break; } } /* End Loop3: match device with peer that owns device, find device's timeout period */ + + /* in case of watchdog-device we add the timeout to the budget + if didn't get a reply + */ + if (!found && is_watchdog_fencing(op, device_list->data)) { + total_timeout += stonith_watchdog_timeout_ms / 1000; + } } /* End Loop2: iterate through devices at a specific level */ } /*End Loop1: iterate through fencing levels */ @@ -1612,15 +1678,23 @@ get_op_total_timeout(const remote_fencing_op_t *op, } else if (chosen_peer) { total_timeout = get_peer_timeout(op, chosen_peer); + } else { + total_timeout = valid_fencing_timeout(op->base_timeout, false, op, + NULL); + } + + if (total_timeout <= 0) { total_timeout = op->base_timeout; } /* Take any requested fencing delay into account to prevent it from eating * up the total timeout. */ - return ((total_timeout ? total_timeout : op->base_timeout) - + ((op->client_delay > 0)? op->client_delay : 0)); + if (op->client_delay > 0) { + total_timeout += op->client_delay; + } + return (int) QB_MIN(total_timeout, INT_MAX); } static void @@ -1643,9 +1717,9 @@ report_timeout_period(remote_fencing_op_t * op, int op_timeout) } crm_trace("Reporting timeout for %s (id=%.8s)", op->client_name, op->id); - client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE); - call_id = crm_element_value(op->request, F_STONITH_CALLID); - client_id = crm_element_value(op->request, F_STONITH_CLIENTID); + client_node = crm_element_value(op->request, PCMK__XA_ST_CLIENTNODE); + call_id = crm_element_value(op->request, PCMK__XA_ST_CALLID); + client_id = crm_element_value(op->request, PCMK__XA_ST_CLIENTID); if (!client_node || !call_id || !client_id) { return; } @@ -1658,12 +1732,14 @@ report_timeout_period(remote_fencing_op_t * op, int op_timeout) /* The client is connected to another node, relay this update to them */ update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0); - crm_xml_add(update, F_STONITH_REMOTE_OP_ID, op->id); - crm_xml_add(update, F_STONITH_CLIENTID, client_id); - crm_xml_add(update, F_STONITH_CALLID, call_id); - crm_xml_add_int(update, F_STONITH_TIMEOUT, op_timeout); + crm_xml_add(update, PCMK__XA_ST_REMOTE_OP, op->id); + crm_xml_add(update, PCMK__XA_ST_CLIENTID, client_id); + crm_xml_add(update, PCMK__XA_ST_CALLID, call_id); + crm_xml_add_int(update, PCMK__XA_ST_TIMEOUT, op_timeout); - send_cluster_message(crm_get_peer(0, client_node), crm_msg_stonith_ng, update, FALSE); + pcmk__cluster_send_message(pcmk__get_node(0, client_node, NULL, + pcmk__node_search_cluster_member), + crm_msg_stonith_ng, update); free_xml(update); @@ -1742,17 +1818,18 @@ static gboolean check_watchdog_fencing_and_wait(remote_fencing_op_t * op) { if (node_does_watchdog_fencing(op->target)) { + guint timeout_ms = QB_MIN(stonith_watchdog_timeout_ms, UINT_MAX); - crm_notice("Waiting %lds for %s to self-fence (%s) for " + crm_notice("Waiting %s for %s to self-fence (%s) for " "client %s " CRM_XS " id=%.8s", - (stonith_watchdog_timeout_ms / 1000), - op->target, op->action, op->client_name, op->id); + pcmk__readable_interval(timeout_ms), op->target, op->action, + op->client_name, op->id); if (op->op_timer_one) { g_source_remove(op->op_timer_one); } - op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, - remote_op_watchdog_done, op); + op->op_timer_one = g_timeout_add(timeout_ms, remote_op_watchdog_done, + op); return TRUE; } else { crm_debug("Skipping fallback to watchdog-fencing as %s is " @@ -1819,7 +1896,7 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, peer); op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op); report_timeout_period(op, op->total_timeout); - crm_info("Total timeout set to %d for peer's fencing targeting %s for %s" + crm_info("Total timeout set to %ds for peer's fencing targeting %s for %s" CRM_XS "id=%.8s", op->total_timeout, op->target, op->client_name, op->id); } @@ -1846,6 +1923,9 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) if (peer) { int timeout_one = 0; xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0); + const crm_node_t *peer_node = + pcmk__get_node(0, peer->host, NULL, + pcmk__node_search_cluster_member); if (op->client_delay > 0) { /* Take requested fencing delay into account to prevent it from @@ -1854,15 +1934,15 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay; } - crm_xml_add(remote_op, F_STONITH_REMOTE_OP_ID, op->id); - crm_xml_add(remote_op, F_STONITH_TARGET, op->target); - crm_xml_add(remote_op, F_STONITH_ACTION, op->action); - crm_xml_add(remote_op, F_STONITH_ORIGIN, op->originator); - crm_xml_add(remote_op, F_STONITH_CLIENTID, op->client_id); - crm_xml_add(remote_op, F_STONITH_CLIENTNAME, op->client_name); - crm_xml_add_int(remote_op, F_STONITH_TIMEOUT, timeout); - crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options); - crm_xml_add_int(remote_op, F_STONITH_DELAY, op->client_delay); + crm_xml_add(remote_op, PCMK__XA_ST_REMOTE_OP, op->id); + crm_xml_add(remote_op, PCMK__XA_ST_TARGET, op->target); + crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ACTION, op->action); + crm_xml_add(remote_op, PCMK__XA_ST_ORIGIN, op->originator); + crm_xml_add(remote_op, PCMK__XA_ST_CLIENTID, op->client_id); + crm_xml_add(remote_op, PCMK__XA_ST_CLIENTNAME, op->client_name); + crm_xml_add_int(remote_op, PCMK__XA_ST_TIMEOUT, timeout); + crm_xml_add_int(remote_op, PCMK__XA_ST_CALLOPT, op->call_options); + crm_xml_add_int(remote_op, PCMK__XA_ST_DELAY, op->client_delay); if (device) { timeout_one += TIMEOUT_MULTIPLY_FACTOR * @@ -1871,14 +1951,15 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) "using %s " CRM_XS " for client %s (%ds)", peer->host, op->action, op->target, device, op->client_name, timeout_one); - crm_xml_add(remote_op, F_STONITH_DEVICE, device); + crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ID, device); } else { timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer); crm_notice("Requesting that %s perform '%s' action targeting %s " - CRM_XS " for client %s (%ds, %lds)", + CRM_XS " for client %s (%ds, %s)", peer->host, op->action, op->target, op->client_name, - timeout_one, stonith_watchdog_timeout_ms); + timeout_one, + pcmk__readable_interval(stonith_watchdog_timeout_ms)); } op->state = st_exec; @@ -1887,11 +1968,8 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) op->op_timer_one = 0; } - if (!((stonith_watchdog_timeout_ms > 0) - && (pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_none) - || (pcmk__str_eq(peer->host, op->target, pcmk__str_casei) - && pcmk__is_fencing_action(op->action))) - && check_watchdog_fencing_and_wait(op))) { + if (!is_watchdog_fencing(op, device) + || !check_watchdog_fencing_and_wait(op)) { /* Some thoughts about self-fencing cases reaching this point: - Actually check in check_watchdog_fencing_and_wait @@ -1907,8 +1985,8 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) enabled for a node but the watchdog-fencing-device isn't explicitly chosen for suicide. Local pe-execution in sbd may detect the node as unclean and lead to timely suicide. - Otherwise the selection of stonith-watchdog-timeout at - least is questionable. + Otherwise the selection of PCMK_OPT_STONITH_WATCHDOG_TIMEOUT + at least is questionable. */ /* coming here we're not waiting for watchdog timeout - @@ -1916,7 +1994,7 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op); } - send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, remote_op, FALSE); + pcmk__cluster_send_message(peer_node, crm_msg_stonith_ng, remote_op); peer->tried = TRUE; free_xml(remote_op); return; @@ -1948,11 +2026,15 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) * but we have all the expected replies, then no devices * are available to execute the fencing operation. */ - if(stonith_watchdog_timeout_ms > 0 && pcmk__str_eq(device, - STONITH_WATCHDOG_ID, pcmk__str_null_matches)) { - if (check_watchdog_fencing_and_wait(op)) { - return; - } + if (is_watchdog_fencing(op, device) + && check_watchdog_fencing_and_wait(op)) { + /* Consider a watchdog fencing targeting an offline node executing + * once it starts waiting for the target to self-fence. So that when + * the query timer pops, remote_op_query_timeout() considers the + * fencing already in progress. + */ + op->state = st_exec; + return; } if (op->state == st_query) { @@ -2078,24 +2160,25 @@ parse_action_specific(const xmlNode *xml, const char *peer, const char *device, enum st_remap_phase phase, device_properties_t *props) { props->custom_action_timeout[phase] = 0; - crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT, + crm_element_value_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, &props->custom_action_timeout[phase]); if (props->custom_action_timeout[phase]) { - crm_trace("Peer %s with device %s returned %s action timeout %d", + crm_trace("Peer %s with device %s returned %s action timeout %ds", peer, device, action, props->custom_action_timeout[phase]); } props->delay_max[phase] = 0; - crm_element_value_int(xml, F_STONITH_DELAY_MAX, &props->delay_max[phase]); + crm_element_value_int(xml, PCMK__XA_ST_DELAY_MAX, &props->delay_max[phase]); if (props->delay_max[phase]) { - crm_trace("Peer %s with device %s returned maximum of random delay %d for %s", + crm_trace("Peer %s with device %s returned maximum of random delay %ds for %s", peer, device, props->delay_max[phase], action); } props->delay_base[phase] = 0; - crm_element_value_int(xml, F_STONITH_DELAY_BASE, &props->delay_base[phase]); + crm_element_value_int(xml, PCMK__XA_ST_DELAY_BASE, + &props->delay_base[phase]); if (props->delay_base[phase]) { - crm_trace("Peer %s with device %s returned base delay %d for %s", + crm_trace("Peer %s with device %s returned base delay %ds for %s", peer, device, props->delay_base[phase], action); } @@ -2103,7 +2186,7 @@ parse_action_specific(const xmlNode *xml, const char *peer, const char *device, if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { int required = 0; - crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required); + crm_element_value_int(xml, PCMK__XA_ST_REQUIRED, &required); if (required) { crm_trace("Peer %s requires device %s to execute for action %s", peer, device, action); @@ -2114,7 +2197,7 @@ parse_action_specific(const xmlNode *xml, const char *peer, const char *device, /* If a reboot is remapped to off+on, it's possible that a node is allowed * to perform one action but not another. */ - if (pcmk__xe_attr_is_true(xml, F_STONITH_ACTION_DISALLOWED)) { + if (pcmk__xe_attr_is_true(xml, PCMK__XA_ST_ACTION_DISALLOWED)) { props->disallowed[phase] = TRUE; crm_trace("Peer %s is disallowed from executing %s for device %s", peer, action, device); @@ -2136,37 +2219,39 @@ add_device_properties(const xmlNode *xml, remote_fencing_op_t *op, { xmlNode *child; int verified = 0; - device_properties_t *props = calloc(1, sizeof(device_properties_t)); + device_properties_t *props = + pcmk__assert_alloc(1, sizeof(device_properties_t)); int flags = st_device_supports_on; /* Old nodes that don't set the flag assume they support the on action */ /* Add a new entry to this peer's devices list */ - CRM_ASSERT(props != NULL); - g_hash_table_insert(peer->devices, strdup(device), props); + g_hash_table_insert(peer->devices, pcmk__str_copy(device), props); /* Peers with verified (monitored) access will be preferred */ - crm_element_value_int(xml, F_STONITH_DEVICE_VERIFIED, &verified); + crm_element_value_int(xml, PCMK__XA_ST_MONITOR_VERIFIED, &verified); if (verified) { crm_trace("Peer %s has confirmed a verified device %s", peer->host, device); props->verified = TRUE; } - crm_element_value_int(xml, F_STONITH_DEVICE_SUPPORT_FLAGS, &flags); + crm_element_value_int(xml, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, &flags); props->device_support_flags = flags; /* Parse action-specific device properties */ parse_action_specific(xml, peer->host, device, op_requested_action(op), op, st_phase_requested, props); - for (child = pcmk__xml_first_child(xml); child != NULL; - child = pcmk__xml_next(child)) { + for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; + child = pcmk__xe_next(child)) { /* Replies for "reboot" operations will include the action-specific * values for "off" and "on" in child elements, just in case the reboot * winds up getting remapped. */ - if (pcmk__str_eq(ID(child), PCMK_ACTION_OFF, pcmk__str_none)) { + if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_OFF, pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF, op, st_phase_off, props); - } else if (pcmk__str_eq(ID(child), PCMK_ACTION_ON, pcmk__str_none)) { + + } else if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_ON, + pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_ON, op, st_phase_on, props); } @@ -2188,19 +2273,17 @@ static peer_device_info_t * add_result(remote_fencing_op_t *op, const char *host, int ndevices, const xmlNode *xml) { - peer_device_info_t *peer = calloc(1, sizeof(peer_device_info_t)); + peer_device_info_t *peer = pcmk__assert_alloc(1, + sizeof(peer_device_info_t)); xmlNode *child; - // cppcheck seems not to understand the abort logic in CRM_CHECK - // cppcheck-suppress memleak - CRM_CHECK(peer != NULL, return NULL); - peer->host = strdup(host); + peer->host = pcmk__str_copy(host); peer->devices = pcmk__strkey_table(free, free); /* Each child element describes one capable device available to the peer */ - for (child = pcmk__xml_first_child(xml); child != NULL; - child = pcmk__xml_next(child)) { - const char *device = ID(child); + for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; + child = pcmk__xe_next(child)) { + const char *device = pcmk__xe_id(child); if (device) { add_device_properties(child, op, peer, device); @@ -2241,16 +2324,16 @@ process_remote_stonith_query(xmlNode *msg) remote_fencing_op_t *op = NULL; peer_device_info_t *peer = NULL; uint32_t replies_expected; - xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR); + xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); - id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID); + id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return -EPROTO); - dev = get_xpath_object("//@" F_STONITH_AVAILABLE_DEVICES, msg, LOG_ERR); + dev = get_xpath_object("//@" PCMK__XA_ST_AVAILABLE_DEVICES, msg, LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); - crm_element_value_int(dev, F_STONITH_AVAILABLE_DEVICES, &ndevices); + crm_element_value_int(dev, PCMK__XA_ST_AVAILABLE_DEVICES, &ndevices); op = g_hash_table_lookup(stonith_remote_op_list, id); if (op == NULL) { @@ -2266,7 +2349,7 @@ process_remote_stonith_query(xmlNode *msg) if ((++op->replies >= replies_expected) && (op->state == st_query)) { have_all_replies = TRUE; } - host = crm_element_value(msg, F_ORIG); + host = crm_element_value(msg, PCMK__XA_SRC); host_is_target = pcmk__str_eq(host, op->target, pcmk__str_casei); crm_info("Query result %d of %d from %s for %s/%s (%d device%s) %s", @@ -2339,12 +2422,12 @@ fenced_process_fencing_reply(xmlNode *msg) const char *id = NULL; const char *device = NULL; remote_fencing_op_t *op = NULL; - xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR); + xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR); pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(dev != NULL, return); - id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID); + id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return); dev = stonith__find_xe_with_result(msg); @@ -2352,7 +2435,7 @@ fenced_process_fencing_reply(xmlNode *msg) stonith__xe_get_result(dev, &result); - device = crm_element_value(dev, F_STONITH_DEVICE); + device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID); if (stonith_remote_op_list) { op = g_hash_table_lookup(stonith_remote_op_list, id); @@ -2360,7 +2443,7 @@ fenced_process_fencing_reply(xmlNode *msg) if ((op == NULL) && pcmk__result_ok(&result)) { /* Record successful fencing operations */ - const char *client_id = crm_element_value(dev, F_STONITH_CLIENTID); + const char *client_id = crm_element_value(dev, PCMK__XA_ST_CLIENTID); op = create_remote_stonith_op(client_id, dev, TRUE); } @@ -2383,7 +2466,9 @@ fenced_process_fencing_reply(xmlNode *msg) return; } - if (pcmk__str_eq(crm_element_value(msg, F_SUBTYPE), "broadcast", pcmk__str_casei)) { + if (pcmk__str_eq(crm_element_value(msg, PCMK__XA_SUBT), + PCMK__VALUE_BROADCAST, pcmk__str_none)) { + if (pcmk__result_ok(&op->result)) { op->state = st_done; } else { @@ -2412,7 +2497,7 @@ fenced_process_fencing_reply(xmlNode *msg) return; } - device = crm_element_value(msg, F_STONITH_DEVICE); + device = crm_element_value(msg, PCMK__XA_ST_DEVICE_ID); if ((op->phase == 2) && !pcmk__result_ok(&op->result)) { /* A remapped "on" failed, but the node was already turned off diff --git a/daemons/fenced/fenced_scheduler.c b/daemons/fenced/fenced_scheduler.c index 27d990f..69e16fa 100644 --- a/daemons/fenced/fenced_scheduler.c +++ b/daemons/fenced/fenced_scheduler.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2023 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -124,14 +124,14 @@ register_if_fencing_device(gpointer data, gpointer user_data) if (rsc->children != NULL) { for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { register_if_fencing_device(iter->data, NULL); - if (pe_rsc_is_clone(rsc)) { + if (pcmk__is_clone(rsc)) { return; // Only one instance needs to be checked for clones } } return; } - rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); + rclass = crm_element_value(rsc->xml, PCMK_XA_CLASS); if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { return; // Not a fencing device } @@ -163,8 +163,7 @@ register_if_fencing_device(gpointer data, gpointer user_data) } // If device is in a group, check whether local node is allowed for group - if ((rsc->parent != NULL) - && (rsc->parent->variant == pcmk_rsc_variant_group)) { + if (pcmk__is_group(rsc->parent)) { pcmk_node_t *group_node = local_node_allowed_for(rsc->parent); if ((group_node != NULL) && (group_node->weight < 0)) { @@ -177,8 +176,12 @@ register_if_fencing_device(gpointer data, gpointer user_data) crm_debug("Reloading configuration of fencing device %s", rsc->id); - agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE); + agent = crm_element_value(rsc->xml, PCMK_XA_TYPE); + /* @COMPAT Support for node attribute expressions in rules for resource + * meta-attributes is deprecated. When we can break behavioral backward + * compatibility, replace node with NULL here. + */ get_meta_attributes(rsc->meta, rsc, node, scheduler); rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES); diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c index 7c69fb8..5ba97b5 100644 --- a/daemons/fenced/pacemaker-fenced.c +++ b/daemons/fenced/pacemaker-fenced.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2023 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -22,7 +22,6 @@ #include // PRIu32, PRIx32 #include -#include #include #include #include @@ -42,7 +41,7 @@ #define SUMMARY "daemon for executing fencing devices in a Pacemaker cluster" char *stonith_our_uname = NULL; -long stonith_watchdog_timeout_ms = 0; +long long stonith_watchdog_timeout_ms = 0; GList *stonith_watchdog_targets = NULL; static GMainLoop *mainloop = NULL; @@ -75,11 +74,11 @@ st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) if (stonith_shutdown_flag) { crm_info("Ignoring new client [%d] during shutdown", pcmk__client_pid(c)); - return -EPERM; + return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { - return -EIO; + return -ENOMEM; } return 0; } @@ -102,34 +101,31 @@ st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) request = pcmk__client_data2xml(c, data, &id, &flags); if (request == NULL) { - pcmk__ipc_send_ack(c, id, flags, "nack", NULL, CRM_EX_PROTOCOL); + pcmk__ipc_send_ack(c, id, flags, PCMK__XE_NACK, NULL, CRM_EX_PROTOCOL); return 0; } - op = crm_element_value(request, F_CRM_TASK); + op = crm_element_value(request, PCMK__XA_CRM_TASK); if(pcmk__str_eq(op, CRM_OP_RM_NODE_CACHE, pcmk__str_casei)) { - crm_xml_add(request, F_TYPE, T_STONITH_NG); - crm_xml_add(request, F_STONITH_OPERATION, op); - crm_xml_add(request, F_STONITH_CLIENTID, c->id); - crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c)); - crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname); + crm_xml_add(request, PCMK__XA_T, PCMK__VALUE_STONITH_NG); + crm_xml_add(request, PCMK__XA_ST_OP, op); + crm_xml_add(request, PCMK__XA_ST_CLIENTID, c->id); + crm_xml_add(request, PCMK__XA_ST_CLIENTNAME, pcmk__client_name(c)); + crm_xml_add(request, PCMK__XA_ST_CLIENTNODE, stonith_our_uname); - send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE); + pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, request); free_xml(request); return 0; } if (c->name == NULL) { - const char *value = crm_element_value(request, F_STONITH_CLIENTNAME); + const char *value = crm_element_value(request, PCMK__XA_ST_CLIENTNAME); - if (value == NULL) { - value = "unknown"; - } - c->name = crm_strdup_printf("%s.%u", value, c->pid); + c->name = crm_strdup_printf("%s.%u", pcmk__s(value, "unknown"), c->pid); } - crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); + crm_element_value_int(request, PCMK__XA_ST_CALLOPT, &call_options); crm_trace("Flags %#08" PRIx32 "/%#08x for command %" PRIu32 " from client %s", flags, call_options, id, pcmk__client_name(c)); @@ -139,9 +135,9 @@ st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) c->request_id = id; /* Reply only to the last one */ } - crm_xml_add(request, F_STONITH_CLIENTID, c->id); - crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c)); - crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname); + crm_xml_add(request, PCMK__XA_ST_CLIENTID, c->id); + crm_xml_add(request, PCMK__XA_ST_CLIENTNAME, pcmk__client_name(c)); + crm_xml_add(request, PCMK__XA_ST_CLIENTNODE, stonith_our_uname); crm_log_xml_trace(request, "ipc-received"); stonith_command(c, id, flags, request, NULL); @@ -177,10 +173,10 @@ st_ipc_destroy(qb_ipcs_connection_t * c) static void stonith_peer_callback(xmlNode * msg, void *private_data) { - const char *remote_peer = crm_element_value(msg, F_ORIG); - const char *op = crm_element_value(msg, F_STONITH_OPERATION); + const char *remote_peer = crm_element_value(msg, PCMK__XA_SRC); + const char *op = crm_element_value(msg, PCMK__XA_ST_OP); - if (pcmk__str_eq(op, "poke", pcmk__str_none)) { + if (pcmk__str_eq(op, STONITH_OP_POKE, pcmk__str_none)) { return; } @@ -197,20 +193,19 @@ stonith_peer_ais_callback(cpg_handle_t handle, uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; - char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); + char *data = pcmk__cpg_message_data(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { - xml = string2xml(data); + xml = pcmk__xml_parse(data); if (xml == NULL) { crm_err("Invalid XML: '%.120s'", data); free(data); return; } - crm_xml_add(xml, F_ORIG, from); - /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */ + crm_xml_add(xml, PCMK__XA_SRC, from); stonith_peer_callback(xml, NULL); } @@ -257,7 +252,7 @@ do_local_reply(const xmlNode *notify_src, pcmk__client_t *client, uint64_t get_stonith_flag(const char *name) { - if (pcmk__str_eq(name, T_STONITH_NOTIFY_FENCE, pcmk__str_casei)) { + if (pcmk__str_eq(name, PCMK__VALUE_ST_NOTIFY_FENCE, pcmk__str_none)) { return st_callback_notify_fence; } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_ADD, pcmk__str_casei)) { @@ -266,10 +261,12 @@ get_stonith_flag(const char *name) } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_DEL, pcmk__str_casei)) { return st_callback_device_del; - } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY, pcmk__str_casei)) { + } else if (pcmk__str_eq(name, PCMK__VALUE_ST_NOTIFY_HISTORY, + pcmk__str_none)) { return st_callback_notify_history; - } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED, pcmk__str_casei)) { + } else if (pcmk__str_eq(name, PCMK__VALUE_ST_NOTIFY_HISTORY_SYNCED, + pcmk__str_none)) { return st_callback_notify_history_synced; } @@ -287,7 +284,7 @@ stonith_notify_client(gpointer key, gpointer value, gpointer user_data) CRM_CHECK(client != NULL, return); CRM_CHECK(update_msg != NULL, return); - type = crm_element_value(update_msg, F_SUBTYPE); + type = crm_element_value(update_msg, PCMK__XA_SUBT); CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return); if (client->ipcs == NULL) { @@ -325,10 +322,10 @@ do_stonith_async_timeout_update(const char *client_id, const char *call_id, int return; } - notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE); - crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE); - crm_xml_add(notify_data, F_STONITH_CALLID, call_id); - crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout); + notify_data = pcmk__xe_create(NULL, PCMK__XE_ST_ASYNC_TIMEOUT_VALUE); + crm_xml_add(notify_data, PCMK__XA_T, PCMK__VALUE_ST_ASYNC_TIMEOUT_VALUE); + crm_xml_add(notify_data, PCMK__XA_ST_CALLID, call_id); + crm_xml_add_int(notify_data, PCMK__XA_ST_TIMEOUT, timeout); crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id); @@ -352,17 +349,19 @@ fenced_send_notification(const char *type, const pcmk__action_result_t *result, xmlNode *data) { /* TODO: Standardize the contents of data */ - xmlNode *update_msg = create_xml_node(NULL, "notify"); + xmlNode *update_msg = pcmk__xe_create(NULL, PCMK__XE_NOTIFY); CRM_LOG_ASSERT(type != NULL); - crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY); - crm_xml_add(update_msg, F_SUBTYPE, type); - crm_xml_add(update_msg, F_STONITH_OPERATION, type); + crm_xml_add(update_msg, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); + crm_xml_add(update_msg, PCMK__XA_SUBT, type); + crm_xml_add(update_msg, PCMK__XA_ST_OP, type); stonith__xe_set_result(update_msg, result); if (data != NULL) { - add_message_xml(update_msg, F_STONITH_CALLDATA, data); + xmlNode *wrapper = pcmk__xe_create(update_msg, PCMK__XE_ST_CALLDATA); + + pcmk__xml_copy(wrapper, data); } crm_trace("Notifying clients"); @@ -375,60 +374,25 @@ fenced_send_notification(const char *type, const pcmk__action_result_t *result, * \internal * \brief Send notifications for a configuration change to subscribed clients * - * \param[in] op Notification type (STONITH_OP_DEVICE_ADD, - * STONITH_OP_DEVICE_DEL, STONITH_OP_LEVEL_ADD, or - * STONITH_OP_LEVEL_DEL) - * \param[in] result Operation result - * \param[in] desc Description of what changed - * \param[in] active Current number of devices or topologies in use - */ -static void -send_config_notification(const char *op, const pcmk__action_result_t *result, - const char *desc, int active) -{ - xmlNode *notify_data = create_xml_node(NULL, op); - - CRM_CHECK(notify_data != NULL, return); - - crm_xml_add(notify_data, F_STONITH_DEVICE, desc); - crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active); - - fenced_send_notification(op, result, notify_data); - free_xml(notify_data); -} - -/*! - * \internal - * \brief Send notifications for a device change to subscribed clients - * - * \param[in] op Notification type (STONITH_OP_DEVICE_ADD or - * STONITH_OP_DEVICE_DEL) + * \param[in] op Notification type (\c STONITH_OP_DEVICE_ADD, + * \c STONITH_OP_DEVICE_DEL, \c STONITH_OP_LEVEL_ADD, or + * \c STONITH_OP_LEVEL_DEL) * \param[in] result Operation result - * \param[in] desc ID of device that changed + * \param[in] desc Description of what changed (either device ID or string + * representation of level + * ([])) */ void -fenced_send_device_notification(const char *op, +fenced_send_config_notification(const char *op, const pcmk__action_result_t *result, const char *desc) { - send_config_notification(op, result, desc, g_hash_table_size(device_list)); -} + xmlNode *notify_data = pcmk__xe_create(NULL, op); -/*! - * \internal - * \brief Send notifications for a topology level change to subscribed clients - * - * \param[in] op Notification type (STONITH_OP_LEVEL_ADD or - * STONITH_OP_LEVEL_DEL) - * \param[in] result Operation result - * \param[in] desc String representation of level ([]) - */ -void -fenced_send_level_notification(const char *op, - const pcmk__action_result_t *result, - const char *desc) -{ - send_config_notification(op, result, desc, g_hash_table_size(topology)); + crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ID, desc); + + fenced_send_notification(op, result, notify_data); + free_xml(notify_data); } /*! @@ -466,7 +430,7 @@ stonith_cleanup(void) qb_ipcs_destroy(ipcs); } - crm_peer_destroy(); + pcmk__cluster_destroy_node_caches(); pcmk__client_cleanup(); free_stonith_remote_op_list(); free_topology_list(); @@ -512,221 +476,34 @@ st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void * This is a hack until we can send to a nodeid and/or we fix node name lookups * These messages are ignored in stonith_peer_callback() */ - xmlNode *query = create_xml_node(NULL, "stonith_command"); + xmlNode *query = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); - crm_xml_add(query, F_XML_TAGNAME, "stonith_command"); - crm_xml_add(query, F_TYPE, T_STONITH_NG); - crm_xml_add(query, F_STONITH_OPERATION, "poke"); + crm_xml_add(query, PCMK__XA_T, PCMK__VALUE_STONITH_NG); + crm_xml_add(query, PCMK__XA_ST_OP, STONITH_OP_POKE); crm_debug("Broadcasting our uname because of node %u", node->id); - send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE); + pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, query); free_xml(query); } } -static pcmk__cluster_option_t fencer_options[] = { - /* name, old name, type, allowed values, - * default value, validator, - * short description, - * long description - */ - { - PCMK_STONITH_HOST_ARGUMENT, NULL, "string", NULL, "port", NULL, - N_("Advanced use only: An alternate parameter to supply instead of 'port'"), - N_("some devices do not support the " - "standard 'port' parameter or may provide additional ones. Use " - "this to specify an alternate, device-specific, parameter " - "that should indicate the machine to be fenced. A value of " - "none can be used to tell the cluster not to supply any " - "additional parameters.") - }, - { - PCMK_STONITH_HOST_MAP,NULL, "string", NULL, "", NULL, - N_("A mapping of host names to ports numbers for devices that do not support host names."), - N_("Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2") - }, - { - PCMK_STONITH_HOST_LIST,NULL, "string", NULL, "", NULL, - N_("Eg. node1,node2,node3"), - N_("A list of machines controlled by " - "this device (Optional unless pcmk_host_list=static-list)") - }, - { - PCMK_STONITH_HOST_CHECK,NULL, "string", NULL, "dynamic-list", NULL, - N_("How to determine which machines are controlled by the device."), - N_("Allowed values: dynamic-list " - "(query the device via the 'list' command), static-list " - "(check the pcmk_host_list attribute), status " - "(query the device via the 'status' command), " - "none (assume every device can fence every " - "machine)") - }, - { - PCMK_STONITH_DELAY_MAX,NULL, "time", NULL, "0s", NULL, - N_("Enable a base delay for fencing actions and specify base delay value."), - N_("Enable a delay of no more than the " - "time specified before executing fencing actions. Pacemaker " - "derives the overall delay by taking the value of " - "pcmk_delay_base and adding a random delay value such " - "that the sum is kept below this maximum.") - }, - { - PCMK_STONITH_DELAY_BASE,NULL, "string", NULL, "0s", NULL, - N_("Enable a base delay for " - "fencing actions and specify base delay value."), - N_("This enables a static delay for " - "fencing actions, which can help avoid \"death matches\" where " - "two nodes try to fence each other at the same time. If " - "pcmk_delay_max is also used, a random delay will be " - "added such that the total delay is kept below that value." - "This can be set to a single time value to apply to any node " - "targeted by this device (useful if a separate device is " - "configured for each target), or to a node map (for example, " - "\"node1:1s;node2:5\") to set a different value per target.") - }, - { - PCMK_STONITH_ACTION_LIMIT,NULL, "integer", NULL, "1", NULL, - N_("The maximum number of actions can be performed in parallel on this device"), - N_("Cluster property concurrent-fencing=true needs to be configured first." - "Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.") - }, - { - "pcmk_reboot_action", NULL, "string", NULL, - PCMK_ACTION_REBOOT, NULL, - N_("Advanced use only: An alternate command to run instead of 'reboot'"), - N_("Some devices do not support the standard commands or may provide additional ones.\n" - "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.") - }, - { - "pcmk_reboot_timeout",NULL, "time", NULL, "60s", NULL, - N_("Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout"), - N_("Some devices need much more/less time to complete than normal." - "Use this to specify an alternate, device-specific, timeout for \'reboot\' actions.") - }, - { - "pcmk_reboot_retries",NULL, "integer", NULL, "2", NULL, - N_("Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period"), - N_("Some devices do not support multiple connections." - " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." - " Use this option to alter the number of times Pacemaker retries \'reboot\' actions before giving up.") - }, - { - "pcmk_off_action", NULL, "string", NULL, - PCMK_ACTION_OFF, NULL, - N_("Advanced use only: An alternate command to run instead of \'off\'"), - N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'off\' action.") - }, - { - "pcmk_off_timeout",NULL, "time", NULL, "60s", NULL, - N_("Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout"), - N_("Some devices need much more/less time to complete than normal." - "Use this to specify an alternate, device-specific, timeout for \'off\' actions.") - }, - { - "pcmk_off_retries",NULL, "integer", NULL, "2", NULL, - N_("Advanced use only: The maximum number of times to retry the 'off' command within the timeout period"), - N_("Some devices do not support multiple connections." - " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." - " Use this option to alter the number of times Pacemaker retries \'off\' actions before giving up.") - }, - { - "pcmk_on_action", NULL, "string", NULL, - PCMK_ACTION_ON, NULL, - N_("Advanced use only: An alternate command to run instead of 'on'"), - N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'on\' action.") - }, - { - "pcmk_on_timeout",NULL, "time", NULL, "60s", NULL, - N_("Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout"), - N_("Some devices need much more/less time to complete than normal." - "Use this to specify an alternate, device-specific, timeout for \'on\' actions.") - }, - { - "pcmk_on_retries",NULL, "integer", NULL, "2", NULL, - N_("Advanced use only: The maximum number of times to retry the 'on' command within the timeout period"), - N_("Some devices do not support multiple connections." - " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." - " Use this option to alter the number of times Pacemaker retries \'on\' actions before giving up.") - }, - { - "pcmk_list_action",NULL, "string", NULL, - PCMK_ACTION_LIST, NULL, - N_("Advanced use only: An alternate command to run instead of \'list\'"), - N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'list\' action.") - }, - { - "pcmk_list_timeout",NULL, "time", NULL, "60s", NULL, - N_("Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout"), - N_("Some devices need much more/less time to complete than normal." - "Use this to specify an alternate, device-specific, timeout for \'list\' actions.") - }, - { - "pcmk_list_retries",NULL, "integer", NULL, "2", NULL, - N_("Advanced use only: The maximum number of times to retry the \'list\' command within the timeout period"), - N_("Some devices do not support multiple connections." - " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." - " Use this option to alter the number of times Pacemaker retries \'list\' actions before giving up.") - }, - { - "pcmk_monitor_action", NULL, "string", NULL, - PCMK_ACTION_MONITOR, NULL, - N_("Advanced use only: An alternate command to run instead of \'monitor\'"), - N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'monitor\' action.") - }, - { - "pcmk_monitor_timeout",NULL, "time", NULL, "60s", NULL, - N_("Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout"), - N_("Some devices need much more/less time to complete than normal.\n" - "Use this to specify an alternate, device-specific, timeout for \'monitor\' actions.") - }, - { - "pcmk_monitor_retries",NULL, "integer", NULL, "2", NULL, - N_("Advanced use only: The maximum number of times to retry the \'monitor\' command within the timeout period"), - N_("Some devices do not support multiple connections." - " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." - " Use this option to alter the number of times Pacemaker retries \'monitor\' actions before giving up.") - }, - { - "pcmk_status_action", NULL, "string", NULL, - PCMK_ACTION_STATUS, NULL, - N_("Advanced use only: An alternate command to run instead of \'status\'"), - N_("Some devices do not support the standard commands or may provide additional ones." - "Use this to specify an alternate, device-specific, command that implements the \'status\' action.") - }, - { - "pcmk_status_timeout",NULL, "time", NULL, "60s", NULL, - N_("Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout"), - N_("Some devices need much more/less time to complete than normal." - "Use this to specify an alternate, device-specific, timeout for \'status\' actions.") - }, - { - "pcmk_status_retries",NULL, "integer", NULL, "2", NULL, - N_("Advanced use only: The maximum number of times to retry the \'status\' command within the timeout period"), - N_("Some devices do not support multiple connections." - " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." - " Use this option to alter the number of times Pacemaker retries \'status\' actions before giving up.") - }, -}; - -void +/* @COMPAT Deprecated since 2.1.8. Use pcmk_list_fence_attrs() or + * crm_resource --list-options=fencing instead of querying daemon metadata. + */ +static int fencer_metadata(void) { + const char *name = "pacemaker-fenced"; const char *desc_short = N_("Instance attributes available for all " - "\"stonith\"-class resources"); - const char *desc_long = N_("Instance attributes available for all \"stonith\"-" - "class resources and used by Pacemaker's fence " - "daemon, formerly known as stonithd"); - - gchar *s = pcmk__format_option_metadata("pacemaker-fenced", desc_short, - desc_long, fencer_options, - PCMK__NELEM(fencer_options)); - printf("%s", s); - g_free(s); + "\"stonith\"-class resources"); + const char *desc_long = N_("Instance attributes available for all " + "\"stonith\"-class resources and used by " + "Pacemaker's fence daemon, formerly known as " + "stonithd"); + + return pcmk__daemon_metadata(out, name, desc_short, desc_long, + pcmk__opt_fencing); } static GOptionEntry entries[] = { @@ -747,8 +524,7 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; - context = pcmk__build_arg_context(args, "text (default), xml", group, - "[metadata]"); + context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); pcmk__add_main_args(context, entries); return context; } @@ -757,7 +533,7 @@ int main(int argc, char **argv) { int rc = pcmk_rc_ok; - crm_cluster_t *cluster = NULL; + pcmk_cluster_t *cluster = NULL; crm_ipc_t *old_instance = NULL; GError *error = NULL; @@ -791,7 +567,13 @@ main(int argc, char **argv) if ((g_strv_length(processed_args) >= 2) && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) { - fencer_metadata(); + + rc = fencer_metadata(); + if (rc != pcmk_rc_ok) { + exit_code = CRM_EX_FATAL; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Unable to display metadata: %s", pcmk_rc_str(rc)); + } goto done; } @@ -826,7 +608,7 @@ main(int argc, char **argv) mainloop_add_signal(SIGTERM, stonith_shutdown); - crm_peer_init(); + pcmk__cluster_init_node_caches(); rc = fenced_scheduler_init(); if (rc != pcmk_rc_ok) { @@ -840,16 +622,16 @@ main(int argc, char **argv) if (!stand_alone) { #if SUPPORT_COROSYNC - if (is_corosync_cluster()) { - cluster->destroy = stonith_peer_cs_destroy; - cluster->cpg.cpg_deliver_fn = stonith_peer_ais_callback; - cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; + if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { + pcmk_cluster_set_destroy_fn(cluster, stonith_peer_cs_destroy); + pcmk_cpg_set_deliver_fn(cluster, stonith_peer_ais_callback); + pcmk_cpg_set_confchg_fn(cluster, pcmk__cpg_confchg_cb); } #endif // SUPPORT_COROSYNC - crm_set_status_callback(&st_peer_update_callback); + pcmk__cluster_set_status_callback(&st_peer_update_callback); - if (crm_cluster_connect(cluster) == FALSE) { + if (pcmk_cluster_connect(cluster) != pcmk_rc_ok) { exit_code = CRM_EX_FATAL; crm_crit("Cannot sign in to the cluster... terminating"); goto done; diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h index 220978a..2d8047c 100644 --- a/daemons/fenced/pacemaker-fenced.h +++ b/daemons/fenced/pacemaker-fenced.h @@ -1,5 +1,5 @@ /* - * Copyright 2009-2023 the Pacemaker project contributors + * Copyright 2009-2024 the Pacemaker project contributors * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. @@ -111,8 +111,8 @@ typedef struct remote_fencing_op_s { /*! * Fencing delay (in seconds) requested by API client (used by controller to - * implement priority-fencing-delay). A value of -1 means disable all - * configured delays. + * implement \c PCMK_OPT_PRIORITY_FENCING_DELAY). A value of -1 means + * disable all configured delays. */ int client_delay; @@ -253,12 +253,9 @@ void void fenced_send_notification(const char *type, const pcmk__action_result_t *result, xmlNode *data); -void fenced_send_device_notification(const char *op, +void fenced_send_config_notification(const char *op, const pcmk__action_result_t *result, const char *desc); -void fenced_send_level_notification(const char *op, - const pcmk__action_result_t *result, - const char *desc); remote_fencing_op_t *initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request, @@ -281,7 +278,6 @@ void set_fencing_completed(remote_fencing_op_t * op); int fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg); -void fencer_metadata(void); const char *fenced_device_reboot_action(const char *device_id); bool fenced_device_supports_on(const char *device_id); @@ -327,7 +323,7 @@ extern char *stonith_our_uname; extern gboolean stand_alone; extern GHashTable *device_list; extern GHashTable *topology; -extern long stonith_watchdog_timeout_ms; +extern long long stonith_watchdog_timeout_ms; extern GList *stonith_watchdog_targets; extern GHashTable *stonith_remote_op_list; extern crm_exit_t exit_code; diff --git a/daemons/pacemakerd/Makefile.am b/daemons/pacemakerd/Makefile.am index 78e7c37..7571a6c 100644 --- a/daemons/pacemakerd/Makefile.am +++ b/daemons/pacemakerd/Makefile.am @@ -20,7 +20,8 @@ EXTRA_DIST = pacemakerd.8.inc ## SOURCES -noinst_HEADERS = pacemakerd.h +noinst_HEADERS = pacemakerd.h \ + pcmkd_corosync.h pacemakerd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemakerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c index 365b743..5d5a1db 100644 --- a/daemons/pacemakerd/pacemakerd.c +++ b/daemons/pacemakerd/pacemakerd.c @@ -1,5 +1,5 @@ /* - * Copyright 2010-2023 the Pacemaker project contributors + * Copyright 2010-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,6 +10,10 @@ #include #include "pacemakerd.h" +#if SUPPORT_COROSYNC +#include "pcmkd_corosync.h" +#endif + #include #include #include @@ -21,8 +25,8 @@ #include #include /* indirectly: CRM_EX_* */ -#include #include +#include #include #include #include @@ -60,19 +64,21 @@ static int pacemakerd_features_xml(pcmk__output_t *out, va_list args) { gchar **feature_list = g_strsplit(CRM_FEATURES, " ", 0); - pcmk__output_xml_create_parent(out, "pacemakerd", - "version", PACEMAKER_VERSION, - "build", BUILD_VERSION, - "feature_set", CRM_FEATURE_SET, + pcmk__output_xml_create_parent(out, PCMK_XE_PACEMAKERD, + PCMK_XA_VERSION, PACEMAKER_VERSION, + PCMK_XA_BUILD, BUILD_VERSION, + PCMK_XA_FEATURE_SET, CRM_FEATURE_SET, NULL); - out->begin_list(out, NULL, NULL, "features"); + out->begin_list(out, NULL, NULL, PCMK_XE_FEATURES); for (char **s = feature_list; *s != NULL; s++) { - pcmk__output_create_xml_text_node(out, "feature", *s); + pcmk__output_create_xml_text_node(out, PCMK_XE_FEATURE, *s); } out->end_list(out); + pcmk__output_xml_pop_parent(out); + g_strfreev(feature_list); return pcmk_rc_ok; } @@ -92,7 +98,7 @@ pid_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **er static gboolean standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.standby = TRUE; - pcmk__set_env_option(PCMK__ENV_NODE_START_STATE, "standby", false); + pcmk__set_env_option(PCMK__ENV_NODE_START_STATE, PCMK_VALUE_STANDBY, false); return TRUE; } @@ -297,8 +303,6 @@ main(int argc, char **argv) goto done; } - pcmk__force_args(context, &error, "%s --xml-simple-list", g_get_prgname()); - pcmk__register_messages(out, fmt_functions); if (options.features) { @@ -313,7 +317,7 @@ main(int argc, char **argv) } // @COMPAT Drop at 3.0.0; likely last used in 1.1.24 - pcmk__set_env_option(PCMK__ENV_MCP, "true", true); + pcmk__set_env_option(PCMK__ENV_MCP, PCMK_VALUE_TRUE, true); if (options.shutdown) { pcmk__cli_init_logging("pacemakerd", args->verbosity); @@ -402,7 +406,7 @@ main(int argc, char **argv) { const char *facility = pcmk__env_option(PCMK__ENV_LOGFACILITY); - if (!pcmk__str_eq(facility, PCMK__VALUE_NONE, + if (!pcmk__str_eq(facility, PCMK_VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__set_env_option("LOGFACILITY", facility, true); } @@ -444,7 +448,7 @@ main(int argc, char **argv) if ((running_with_sbd) && pcmk__get_sbd_sync_resource_startup()) { crm_notice("Waiting for startup-trigger from SBD."); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_WAITPING; + pacemakerd_state = PCMK__VALUE_WAIT_FOR_PING; startup_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, init_children_processes, NULL); } else { if (running_with_sbd) { @@ -452,7 +456,7 @@ main(int argc, char **argv) "by your SBD version) improve reliability of " "interworking between SBD & pacemaker."); } - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS; + pacemakerd_state = PCMK__VALUE_STARTING_DAEMONS; init_children_processes(NULL); } diff --git a/daemons/pacemakerd/pacemakerd.h b/daemons/pacemakerd/pacemakerd.h index ee6facf..51e32b1 100644 --- a/daemons/pacemakerd/pacemakerd.h +++ b/daemons/pacemakerd/pacemakerd.h @@ -9,27 +9,19 @@ #include -#include - #define MAX_RESPAWN 100 extern GMainLoop *mainloop; extern struct qb_ipcs_service_handlers pacemakerd_ipc_callbacks; extern const char *pacemakerd_state; extern gboolean running_with_sbd; -extern unsigned int shutdown_complete_state_reported_to; extern gboolean shutdown_complete_state_reported_client_closed; +extern unsigned int shutdown_complete_state_reported_to; extern crm_trigger_t *shutdown_trigger; extern crm_trigger_t *startup_trigger; extern time_t subdaemon_check_progress; -gboolean pacemakerd_read_config(void); - -gboolean cluster_connect_cfg(void); -void cluster_disconnect_cfg(void); int find_and_track_existing_processes(void); gboolean init_children_processes(void *user_data); -void restart_cluster_subdaemons(void); void pcmk_shutdown(int nsig); -void pcmkd_shutdown_corosync(void); -bool pcmkd_corosync_connected(void); +void restart_cluster_subdaemons(void); diff --git a/daemons/pacemakerd/pcmkd_corosync.c b/daemons/pacemakerd/pcmkd_corosync.c index 8a1a867..43f6231 100644 --- a/daemons/pacemakerd/pcmkd_corosync.c +++ b/daemons/pacemakerd/pcmkd_corosync.c @@ -9,6 +9,7 @@ #include #include "pacemakerd.h" +#include "pcmkd_corosync.h" #include #include /* for calls to stat() */ @@ -271,7 +272,8 @@ pacemakerd_read_config(void) gid_t found_gid = 0; pid_t found_pid = 0; int rv; - enum cluster_type_e stack; + enum pcmk_cluster_layer cluster_layer = pcmk_cluster_layer_unknown; + const char *cluster_layer_s = NULL; // There can be only one possibility do { @@ -318,19 +320,21 @@ pacemakerd_read_config(void) return FALSE; } - stack = get_cluster_type(); - if (stack != pcmk_cluster_corosync) { + cluster_layer = pcmk_get_cluster_layer(); + cluster_layer_s = pcmk_cluster_layer_text(cluster_layer); + + if (cluster_layer != pcmk_cluster_layer_corosync) { crm_crit("Expected Corosync cluster layer but detected %s " - CRM_XS " stack=%d", name_for_cluster_type(stack), stack); + CRM_XS " cluster_layer=%d", + cluster_layer_s, cluster_layer); return FALSE; } - crm_info("Reading configuration for %s stack", - name_for_cluster_type(stack)); - pcmk__set_env_option(PCMK__ENV_CLUSTER_TYPE, "corosync", true); + crm_info("Reading configuration for %s cluster layer", cluster_layer_s); + pcmk__set_env_option(PCMK__ENV_CLUSTER_TYPE, PCMK_VALUE_COROSYNC, true); // @COMPAT Drop at 3.0.0; added unused in 1.1.9 - pcmk__set_env_option(PCMK__ENV_QUORUM_TYPE, "corosync", true); + pcmk__set_env_option(PCMK__ENV_QUORUM_TYPE, PCMK_VALUE_COROSYNC, true); // If debug logging is not configured, check whether corosync has it if (pcmk__env_option(PCMK__ENV_DEBUG) == NULL) { diff --git a/daemons/pacemakerd/pcmkd_corosync.h b/daemons/pacemakerd/pcmkd_corosync.h new file mode 100644 index 0000000..8c4a1e1 --- /dev/null +++ b/daemons/pacemakerd/pcmkd_corosync.h @@ -0,0 +1,16 @@ +/* + * Copyright 2010-2023 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU General Public License version 2 + * or later (GPLv2+) WITHOUT ANY WARRANTY. + */ + +#include + +gboolean cluster_connect_cfg(void); +void cluster_disconnect_cfg(void); +gboolean pacemakerd_read_config(void); +bool pcmkd_corosync_connected(void); +void pcmkd_shutdown_corosync(void); diff --git a/daemons/pacemakerd/pcmkd_messages.c b/daemons/pacemakerd/pcmkd_messages.c index 4e6f822..9837d5a 100644 --- a/daemons/pacemakerd/pcmkd_messages.c +++ b/daemons/pacemakerd/pcmkd_messages.c @@ -1,5 +1,5 @@ /* - * Copyright 2010-2022 the Pacemaker project contributors + * Copyright 2010-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -11,7 +11,7 @@ #include "pacemakerd.h" #include -#include +#include #include #include @@ -30,7 +30,7 @@ handle_node_cache_request(pcmk__request_t *request) pcmk__client_name(request->ipc_client)); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, - "ack", NULL, CRM_EX_OK); + PCMK__XE_ACK, NULL, CRM_EX_OK); return NULL; } @@ -42,23 +42,24 @@ handle_ping_request(pcmk__request_t *request) const char *value = NULL; xmlNode *ping = NULL; xmlNode *reply = NULL; - const char *from = crm_element_value(msg, F_CRM_SYS_FROM); + const char *from = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM); /* Pinged for status */ - crm_trace("Pinged from " F_CRM_SYS_FROM "='%s' " F_CRM_ORIGIN "='%s'", + crm_trace("Pinged from " PCMK__XA_CRM_SYS_FROM "='%s' " + PCMK_XA_ORIGIN "='%s'", pcmk__s(from, ""), - pcmk__s(crm_element_value(msg, F_CRM_ORIGIN), "")); + pcmk__s(crm_element_value(msg, PCMK_XA_ORIGIN), "")); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, - "ack", NULL, CRM_EX_INDETERMINATE); + PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); - ping = create_xml_node(NULL, XML_CRM_TAG_PING); - value = crm_element_value(msg, F_CRM_SYS_TO); - crm_xml_add(ping, XML_PING_ATTR_SYSFROM, value); - crm_xml_add(ping, XML_PING_ATTR_PACEMAKERDSTATE, pacemakerd_state); - crm_xml_add_ll(ping, XML_ATTR_TSTAMP, + ping = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE); + value = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); + crm_xml_add(ping, PCMK__XA_CRM_SUBSYSTEM, value); + crm_xml_add(ping, PCMK__XA_PACEMAKERD_STATE, pacemakerd_state); + crm_xml_add_ll(ping, PCMK_XA_CRM_TIMESTAMP, (long long) subdaemon_check_progress); - crm_xml_add(ping, XML_PING_ATTR_STATUS, "ok"); + crm_xml_add(ping, PCMK_XA_RESULT, "ok"); reply = create_reply(msg, ping); free_xml(ping); @@ -73,16 +74,18 @@ handle_ping_request(pcmk__request_t *request) /* just proceed state on sbd pinging us */ if (from && strstr(from, "sbd")) { - if (pcmk__str_eq(pacemakerd_state, XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE, pcmk__str_none)) { + if (pcmk__str_eq(pacemakerd_state, PCMK__VALUE_SHUTDOWN_COMPLETE, + pcmk__str_none)) { if (pcmk__get_sbd_sync_resource_startup()) { crm_notice("Shutdown-complete-state passed to SBD."); } shutdown_complete_state_reported_to = request->ipc_client->pid; - } else if (pcmk__str_eq(pacemakerd_state, XML_PING_ATTR_PACEMAKERDSTATE_WAITPING, pcmk__str_none)) { + } else if (pcmk__str_eq(pacemakerd_state, PCMK__VALUE_WAIT_FOR_PING, + pcmk__str_none)) { crm_notice("Received startup-trigger from SBD."); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS; + pacemakerd_state = PCMK__VALUE_STARTING_DAEMONS; mainloop_set_trigger(startup_trigger); } } @@ -105,19 +108,19 @@ handle_shutdown_request(pcmk__request_t *request) bool allowed = pcmk_is_set(request->ipc_client->flags, pcmk__client_privileged); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, - "ack", NULL, CRM_EX_INDETERMINATE); + PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); - shutdown = create_xml_node(NULL, XML_CIB_ATTR_SHUTDOWN); + shutdown = pcmk__xe_create(NULL, PCMK__XE_SHUTDOWN); if (allowed) { crm_notice("Shutting down in response to IPC request %s from %s", - crm_element_value(msg, F_CRM_REFERENCE), - crm_element_value(msg, F_CRM_ORIGIN)); - crm_xml_add_int(shutdown, XML_LRM_ATTR_OPSTATUS, CRM_EX_OK); + crm_element_value(msg, PCMK_XA_REFERENCE), + crm_element_value(msg, PCMK_XA_ORIGIN)); + crm_xml_add_int(shutdown, PCMK__XA_OP_STATUS, CRM_EX_OK); } else { crm_warn("Ignoring shutdown request from unprivileged client %s", pcmk__client_name(request->ipc_client)); - crm_xml_add_int(shutdown, XML_LRM_ATTR_OPSTATUS, CRM_EX_INSUFFICIENT_PRIV); + crm_xml_add_int(shutdown, PCMK__XA_OP_STATUS, CRM_EX_INSUFFICIENT_PRIV); } reply = create_reply(msg, shutdown); @@ -142,7 +145,7 @@ static xmlNode * handle_unknown_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, - "ack", NULL, CRM_EX_INVALID_PARAM); + PCMK__XE_ACK, NULL, CRM_EX_INVALID_PARAM); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", @@ -168,7 +171,7 @@ pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { - return -EIO; + return -ENOMEM; } return 0; } @@ -217,7 +220,7 @@ pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) msg = pcmk__client_data2xml(c, data, &id, &flags); if (msg == NULL) { - pcmk__ipc_send_ack(c, id, flags, "ack", NULL, CRM_EX_PROTOCOL); + pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } else { @@ -235,7 +238,7 @@ pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, F_CRM_TASK); + request.op = crm_element_value_copy(request.xml, PCMK__XA_CRM_TASK); CRM_CHECK(request.op != NULL, return 0); reply = pcmk__process_request(&request, pcmkd_handlers); diff --git a/daemons/pacemakerd/pcmkd_subdaemons.c b/daemons/pacemakerd/pcmkd_subdaemons.c index 21e432e..5bd3512 100644 --- a/daemons/pacemakerd/pcmkd_subdaemons.c +++ b/daemons/pacemakerd/pcmkd_subdaemons.c @@ -1,5 +1,5 @@ /* - * Copyright 2010-2023 the Pacemaker project contributors + * Copyright 2010-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,6 +10,10 @@ #include #include "pacemakerd.h" +#if SUPPORT_COROSYNC +#include "pcmkd_corosync.h" +#endif + #include #include #include @@ -21,22 +25,25 @@ #include #include -#include +#include + +enum child_daemon_flags { + child_none = 0, + child_respawn = 1 << 0, + child_needs_cluster = 1 << 1, + child_needs_retry = 1 << 2, + child_active_before_startup = 1 << 3, +}; typedef struct pcmk_child_s { pid_t pid; int respawn_count; - bool respawn; const char *name; const char *uid; const char *command; const char *endpoint; /* IPC server name */ - bool needs_cluster; int check_count; - - /* Anything below here will be dynamically initialized */ - bool needs_retry; - bool active_before_startup; + uint32_t flags; } pcmk_child_t; #define PCMK_PROCESS_CHECK_INTERVAL 1 @@ -48,34 +55,34 @@ typedef struct pcmk_child_s { static pcmk_child_t pcmk_children[] = { { - 0, 0, true, "pacemaker-based", CRM_DAEMON_USER, + 0, 0, "pacemaker-based", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-based", PCMK__SERVER_BASED_RO, - true + 0, child_respawn | child_needs_cluster }, { - 0, 0, true, "pacemaker-fenced", NULL, + 0, 0, "pacemaker-fenced", NULL, CRM_DAEMON_DIR "/pacemaker-fenced", "stonith-ng", - true + 0, child_respawn | child_needs_cluster }, { - 0, 0, true, "pacemaker-execd", NULL, + 0, 0, "pacemaker-execd", NULL, CRM_DAEMON_DIR "/pacemaker-execd", CRM_SYSTEM_LRMD, - false + 0, child_respawn }, { - 0, 0, true, "pacemaker-attrd", CRM_DAEMON_USER, - CRM_DAEMON_DIR "/pacemaker-attrd", T_ATTRD, - true + 0, 0, "pacemaker-attrd", CRM_DAEMON_USER, + CRM_DAEMON_DIR "/pacemaker-attrd", PCMK__VALUE_ATTRD, + 0, child_respawn | child_needs_cluster }, { - 0, 0, true, "pacemaker-schedulerd", CRM_DAEMON_USER, + 0, 0, "pacemaker-schedulerd", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-schedulerd", CRM_SYSTEM_PENGINE, - false + 0, child_respawn }, { - 0, 0, true, "pacemaker-controld", CRM_DAEMON_USER, + 0, 0, "pacemaker-controld", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-controld", CRM_SYSTEM_CRMD, - true + 0, child_respawn | child_needs_cluster }, }; @@ -103,7 +110,7 @@ unsigned int shutdown_complete_state_reported_to = 0; gboolean shutdown_complete_state_reported_client_closed = FALSE; /* state we report when asked via pacemakerd-api status-ping */ -const char *pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_INIT; +const char *pacemakerd_state = PCMK__VALUE_INIT; gboolean running_with_sbd = FALSE; /* local copy */ GMainLoop *mainloop = NULL; @@ -154,7 +161,7 @@ check_next_subdaemon(gpointer user_data) pcmk_children[next_child].pid), pcmk_children[next_child].check_count); stop_child(&pcmk_children[next_child], SIGKILL); - if (pcmk_children[next_child].respawn) { + if (pcmk_is_set(pcmk_children[next_child].flags, child_respawn)) { /* as long as the respawn-limit isn't reached give it another round of check retries */ @@ -166,7 +173,7 @@ check_next_subdaemon(gpointer user_data) (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[next_child].pid), pcmk_children[next_child].check_count); - if (pcmk_children[next_child].respawn) { + if (pcmk_is_set(pcmk_children[next_child].flags, child_respawn)) { /* as long as the respawn-limit isn't reached and we haven't run out of connect retries we account this as progress we are willing @@ -180,7 +187,7 @@ check_next_subdaemon(gpointer user_data) */ break; case pcmk_rc_ipc_unresponsive: - if (!pcmk_children[next_child].respawn) { + if (!pcmk_is_set(pcmk_children[next_child].flags, child_respawn)) { /* if a subdaemon is down and we don't want it to be restarted this is a success during shutdown. if it isn't restarted anymore @@ -191,7 +198,7 @@ check_next_subdaemon(gpointer user_data) subdaemon_check_progress = time(NULL); } } - if (!pcmk_children[next_child].active_before_startup) { + if (!pcmk_is_set(pcmk_children[next_child].flags, child_active_before_startup)) { crm_trace("found %s[%lld] missing - signal-handler " "will take care of it", pcmk_children[next_child].name, @@ -199,7 +206,7 @@ check_next_subdaemon(gpointer user_data) pcmk_children[next_child].pid)); break; } - if (pcmk_children[next_child].respawn) { + if (pcmk_is_set(pcmk_children[next_child].flags, child_respawn)) { crm_err("%s[%lld] terminated", pcmk_children[next_child].name, (long long) PCMK__SPECIAL_PID_AS_0( @@ -264,14 +271,14 @@ pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitco case CRM_EX_FATAL: crm_warn("Shutting cluster down because %s[%d] had fatal failure", name, pid); - child->respawn = false; + child->flags &= ~child_respawn; fatal_error = TRUE; pcmk_shutdown(SIGTERM); break; case CRM_EX_PANIC: crm_emerg("%s[%d] instructed the machine to reset", name, pid); - child->respawn = false; + child->flags &= ~child_respawn; fatal_error = TRUE; pcmk__panic(__func__); pcmk_shutdown(SIGTERM); @@ -291,20 +298,20 @@ static void pcmk_process_exit(pcmk_child_t * child) { child->pid = 0; - child->active_before_startup = false; + child->flags &= ~child_active_before_startup; child->check_count = 0; child->respawn_count += 1; if (child->respawn_count > MAX_RESPAWN) { crm_err("Child respawn count exceeded by %s", child->name); - child->respawn = false; + child->flags &= ~child_respawn; } if (shutdown_trigger) { /* resume step-wise shutdown (returned TRUE yields no parallelizing) */ mainloop_set_trigger(shutdown_trigger); - } else if (!child->respawn) { + } else if (!pcmk_is_set(child->flags, child_respawn)) { /* nothing to do */ } else if (crm_is_true(pcmk__env_option(PCMK__ENV_FAIL_FAST))) { @@ -316,10 +323,10 @@ pcmk_process_exit(pcmk_child_t * child) " appears alright per %s IPC end-point", child->name, child->endpoint); - } else if (child->needs_cluster && !pcmkd_cluster_connected()) { + } else if (pcmk_is_set(child->flags, child_needs_cluster) && !pcmkd_cluster_connected()) { crm_notice("Not respawning %s subdaemon until cluster returns", child->name); - child->needs_retry = true; + child->flags |= child_needs_retry; } else { crm_notice("Respawning %s subdaemon after unexpected exit", @@ -336,7 +343,7 @@ pcmk_shutdown_worker(gpointer user_data) if (phase == PCMK__NELEM(pcmk_children) - 1) { crm_notice("Shutting down Pacemaker"); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN; + pacemakerd_state = PCMK__VALUE_SHUTTING_DOWN; } for (; phase >= 0; phase--) { @@ -345,7 +352,7 @@ pcmk_shutdown_worker(gpointer user_data) if (child->pid != 0) { time_t now = time(NULL); - if (child->respawn) { + if (pcmk_is_set(child->flags, child_respawn)) { if (child->pid == PCMK__SPECIAL_PID) { crm_warn("The process behind %s IPC cannot be" " terminated, so either wait the graceful" @@ -359,7 +366,7 @@ pcmk_shutdown_worker(gpointer user_data) child->command); } next_log = now + 30; - child->respawn = false; + child->flags &= ~child_respawn; stop_child(child, SIGTERM); if (phase < PCMK_CHILD_CONTROLD) { g_timeout_add(SHUTDOWN_ESCALATION_PERIOD, @@ -381,7 +388,7 @@ pcmk_shutdown_worker(gpointer user_data) } crm_notice("Shutdown complete"); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE; + pacemakerd_state = PCMK__VALUE_SHUTDOWN_COMPLETE; if (!fatal_error && running_with_sbd && pcmk__get_sbd_sync_resource_startup() && !shutdown_complete_state_reported_client_closed) { @@ -393,8 +400,12 @@ pcmk_shutdown_worker(gpointer user_data) { const char *delay = pcmk__env_option(PCMK__ENV_SHUTDOWN_DELAY); if(delay) { + long long delay_ms = crm_get_msec(delay); + sync(); - pcmk__sleep_ms(crm_get_msec(delay)); + if (delay_ms > 0) { + pcmk__sleep_ms((unsigned int) QB_MIN(delay_ms, UINT_MAX)); + } } } @@ -427,7 +438,7 @@ start_child(pcmk_child_t * child) const char *env_valgrind = pcmk__env_option(PCMK__ENV_VALGRIND_ENABLED); const char *env_callgrind = pcmk__env_option(PCMK__ENV_CALLGRIND_ENABLED); - child->active_before_startup = false; + child->flags &= ~child_active_before_startup; child->check_count = 0; if (child->command == NULL) { @@ -481,19 +492,20 @@ start_child(pcmk_child_t * child) (void)setsid(); /* Setup the two alternate arg arrays */ - opts_vgrind[0] = strdup(VALGRIND_BIN); + opts_vgrind[0] = pcmk__str_copy(VALGRIND_BIN); if (use_callgrind) { - opts_vgrind[1] = strdup("--tool=callgrind"); - opts_vgrind[2] = strdup("--callgrind-out-file=" CRM_STATE_DIR "/callgrind.out.%p"); - opts_vgrind[3] = strdup(child->command); + opts_vgrind[1] = pcmk__str_copy("--tool=callgrind"); + opts_vgrind[2] = pcmk__str_copy("--callgrind-out-file=" + CRM_STATE_DIR "/callgrind.out.%p"); + opts_vgrind[3] = pcmk__str_copy(child->command); opts_vgrind[4] = NULL; } else { - opts_vgrind[1] = strdup(child->command); + opts_vgrind[1] = pcmk__str_copy(child->command); opts_vgrind[2] = NULL; opts_vgrind[3] = NULL; opts_vgrind[4] = NULL; } - opts_default[0] = strdup(child->command); + opts_default[0] = pcmk__str_copy(child->command); if(gid) { // Drop root group access if not needed @@ -759,7 +771,7 @@ find_and_track_existing_processes(void) (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[i].pid)); pcmk_children[i].respawn_count = -1; /* 0~keep watching */ - pcmk_children[i].active_before_startup = true; + pcmk_children[i].flags |= child_active_before_startup; break; case pcmk_rc_ipc_pid_only: if (pcmk_children[i].respawn_count == WAIT_TRIES) { @@ -802,7 +814,7 @@ find_and_track_existing_processes(void) gboolean init_children_processes(void *user_data) { - if (is_corosync_cluster()) { + if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { /* Corosync clusters can drop root group access, because we set * uidgid.gid.${gid}=1 via CMAP, which allows these processes to connect * to corosync. @@ -825,8 +837,8 @@ init_children_processes(void *user_data) * * This may be useful for the daemons to know */ - pcmk__set_env_option(PCMK__ENV_RESPAWNED, "true", false); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_RUNNING; + pcmk__set_env_option(PCMK__ENV_RESPAWNED, PCMK_VALUE_TRUE, false); + pacemakerd_state = PCMK__VALUE_RUNNING; return TRUE; } @@ -843,13 +855,13 @@ void restart_cluster_subdaemons(void) { for (int i = 0; i < PCMK__NELEM(pcmk_children); i++) { - if (!pcmk_children[i].needs_retry || pcmk_children[i].pid != 0) { + if (!pcmk_is_set(pcmk_children[i].flags, child_needs_retry) || pcmk_children[i].pid != 0) { continue; } crm_notice("Respawning cluster-based subdaemon: %s", pcmk_children[i].name); if (start_child(&pcmk_children[i])) { - pcmk_children[i].needs_retry = false; + pcmk_children[i].flags &= ~child_needs_retry; } } } diff --git a/daemons/schedulerd/pacemaker-schedulerd.c b/daemons/schedulerd/pacemaker-schedulerd.c index 3f2a3e8..d1b9362 100644 --- a/daemons/schedulerd/pacemaker-schedulerd.c +++ b/daemons/schedulerd/pacemaker-schedulerd.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -31,8 +31,8 @@ struct { } options; pcmk__output_t *logger_out = NULL; -pcmk__output_t *out = NULL; +static pcmk__output_t *out = NULL; static GMainLoop *mainloop = NULL; static qb_ipcs_service_t *ipcs = NULL; static crm_exit_t exit_code = CRM_EX_OK; @@ -46,6 +46,19 @@ pcmk__supported_format_t formats[] = { void pengine_shutdown(int nsig); +/* @COMPAT Deprecated since 2.1.8. Use pcmk_list_cluster_options() or + * crm_attribute --list-options=cluster instead of querying daemon metadata. + */ +static int +scheduler_metadata(pcmk__output_t *out) +{ + return pcmk__daemon_metadata(out, "pacemaker-schedulerd", + "Pacemaker scheduler options", + "Cluster options used by Pacemaker's " + "scheduler", + pcmk__opt_schedulerd); +} + static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; @@ -58,8 +71,7 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { { NULL } }; - context = pcmk__build_arg_context(args, "text (default), xml", group, - "[metadata]"); + context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); pcmk__add_main_args(context, extra_prog_entries); return context; } @@ -98,14 +110,20 @@ main(int argc, char **argv) if (options.remainder) { if (g_strv_length(options.remainder) == 1 && pcmk__str_eq("metadata", options.remainder[0], pcmk__str_casei)) { - pe_metadata(out); - goto done; + + rc = scheduler_metadata(out); + if (rc != pcmk_rc_ok) { + exit_code = CRM_EX_FATAL; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Unable to display metadata: %s", pcmk_rc_str(rc)); + } + } else { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Unsupported extra command line parameters"); - goto done; } + goto done; } if (args->version) { diff --git a/daemons/schedulerd/pacemaker-schedulerd.h b/daemons/schedulerd/pacemaker-schedulerd.h index 75b7d38..a7c56e1 100644 --- a/daemons/schedulerd/pacemaker-schedulerd.h +++ b/daemons/schedulerd/pacemaker-schedulerd.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,7 +14,6 @@ #include extern pcmk__output_t *logger_out; -extern pcmk__output_t *out; extern struct qb_ipcs_service_handlers ipc_callbacks; #endif diff --git a/daemons/schedulerd/schedulerd_messages.c b/daemons/schedulerd/schedulerd_messages.c index 5a97365..5dcec39 100644 --- a/daemons/schedulerd/schedulerd_messages.c +++ b/daemons/schedulerd/schedulerd_messages.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2023 the Pacemaker project contributors + * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include @@ -27,7 +27,7 @@ init_working_set(void) { pcmk_scheduler_t *scheduler = pe_new_working_set(); - CRM_ASSERT(scheduler != NULL); + pcmk__mem_assert(scheduler); crm_config_error = FALSE; crm_config_warning = FALSE; @@ -51,13 +51,14 @@ handle_pecalc_request(pcmk__request_t *request) */ int wrap; } series[] = { - { "pe-error", "pe-error-series-max", -1 }, - { "pe-warn", "pe-warn-series-max", 5000 }, - { "pe-input", "pe-input-series-max", 4000 }, + { "pe-error", PCMK_OPT_PE_ERROR_SERIES_MAX, -1 }, + { "pe-warn", PCMK_OPT_PE_WARN_SERIES_MAX, 5000 }, + { "pe-input", PCMK_OPT_PE_INPUT_SERIES_MAX, 4000 }, }; xmlNode *msg = request->xml; - xmlNode *xml_data = get_message_xml(msg, F_CRM_DATA); + xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL); + xmlNode *xml_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); static char *last_digest = NULL; static char *filename = NULL; @@ -75,15 +76,15 @@ handle_pecalc_request(pcmk__request_t *request) pcmk_scheduler_t *scheduler = init_working_set(); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, - "ack", NULL, CRM_EX_INDETERMINATE); + PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); digest = calculate_xml_versioned_digest(xml_data, FALSE, FALSE, CRM_FEATURE_SET); - converted = copy_xml(xml_data); - if (!cli_config_update(&converted, NULL, TRUE)) { - scheduler->graph = create_xml_node(NULL, XML_TAG_GRAPH); + converted = pcmk__xml_copy(NULL, xml_data); + if (pcmk_update_configured_schema(&converted, true) != pcmk_rc_ok) { + scheduler->graph = pcmk__xe_create(NULL, PCMK__XE_TRANSITION_GRAPH); crm_xml_add_int(scheduler->graph, "transition_id", 0); - crm_xml_add_int(scheduler->graph, "cluster-delay", 0); + crm_xml_add_int(scheduler->graph, PCMK_OPT_CLUSTER_DELAY, 0); process = false; free(digest); @@ -104,15 +105,16 @@ handle_pecalc_request(pcmk__request_t *request) } // Get appropriate index into series[] array - if (was_processing_error) { + if (was_processing_error || crm_config_error) { series_id = 0; - } else if (was_processing_warning) { + } else if (was_processing_warning || crm_config_warning) { series_id = 1; } else { series_id = 2; } - value = pe_pref(scheduler->config_hash, series[series_id].param); + value = pcmk__cluster_option(scheduler->config_hash, + series[series_id].param); if ((value == NULL) || (pcmk__scan_min_int(value, &series_wrap, -1) != pcmk_rc_ok)) { series_wrap = series[series_id].wrap; @@ -146,7 +148,7 @@ handle_pecalc_request(pcmk__request_t *request) series[series_id].name, seq, true); } - crm_xml_add(reply, F_CRM_TGRAPH_INPUT, filename); + crm_xml_add(reply, PCMK__XA_CRM_TGRAPH_IN, filename); crm_xml_add_int(reply, PCMK__XA_GRAPH_ERRORS, was_processing_error); crm_xml_add_int(reply, PCMK__XA_GRAPH_WARNINGS, was_processing_warning); crm_xml_add_int(reply, PCMK__XA_CONFIG_ERRORS, crm_config_error); @@ -162,8 +164,9 @@ handle_pecalc_request(pcmk__request_t *request) } else { unlink(filename); - crm_xml_add_ll(xml_data, "execution-date", (long long) execution_date); - write_xml_file(xml_data, filename, TRUE); + crm_xml_add_ll(xml_data, PCMK_XA_EXECUTION_DATE, + (long long) execution_date); + pcmk__xml_write_file(xml_data, filename, true, NULL); pcmk__write_series_sequence(PE_STATE_DIR, series[series_id].name, ++seq, series_wrap); } @@ -181,7 +184,7 @@ static xmlNode * handle_unknown_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, - "ack", NULL, CRM_EX_INVALID_PARAM); + PCMK__XE_ACK, NULL, CRM_EX_INVALID_PARAM); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", @@ -193,7 +196,7 @@ static xmlNode * handle_hello_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, - "ack", NULL, CRM_EX_INDETERMINATE); + PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); crm_trace("Received IPC hello from %s", pcmk__client_name(request->ipc_client)); @@ -218,7 +221,7 @@ pe_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { - return -EIO; + return -ENOMEM; } return 0; } @@ -240,19 +243,21 @@ pe_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) msg = pcmk__client_data2xml(c, data, &id, &flags); if (msg == NULL) { - pcmk__ipc_send_ack(c, id, flags, "ack", NULL, CRM_EX_PROTOCOL); + pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } - sys_to = crm_element_value(msg, F_CRM_SYS_TO); + sys_to = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); - if (pcmk__str_eq(crm_element_value(msg, F_CRM_MSG_TYPE), - XML_ATTR_RESPONSE, pcmk__str_none)) { - pcmk__ipc_send_ack(c, id, flags, "ack", NULL, CRM_EX_INDETERMINATE); + if (pcmk__str_eq(crm_element_value(msg, PCMK__XA_SUBT), + PCMK__VALUE_RESPONSE, pcmk__str_none)) { + pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, + CRM_EX_INDETERMINATE); crm_info("Ignoring IPC reply from %s", pcmk__client_name(c)); } else if (!pcmk__str_eq(sys_to, CRM_SYSTEM_PENGINE, pcmk__str_none)) { - pcmk__ipc_send_ack(c, id, flags, "ack", NULL, CRM_EX_INDETERMINATE); + pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, + CRM_EX_INDETERMINATE); crm_info("Ignoring invalid IPC message: to '%s' not " CRM_SYSTEM_PENGINE, pcmk__s(sys_to, "")); @@ -271,7 +276,7 @@ pe_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, F_CRM_TASK); + request.op = crm_element_value_copy(request.xml, PCMK__XA_CRM_TASK); CRM_CHECK(request.op != NULL, return 0); reply = pcmk__process_request(&request, schedulerd_handlers); diff --git a/devel/Makefile.am b/devel/Makefile.am index b50f097..15012f6 100644 --- a/devel/Makefile.am +++ b/devel/Makefile.am @@ -165,6 +165,8 @@ coverity-clean: ## cppcheck GLIB_CFLAGS ?= $(pkg-config --cflags glib-2.0) +GLIB_INCL_DEF_CFLAGS = $(shell echo $(GLIB_CFLAGS) \ + | tr ' ' '\n' | grep '^-[IDU]' | paste -d ' ') # Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.: # --enable={warning,style,performance,portability,information,all} @@ -181,7 +183,7 @@ cppcheck: --output-file=$(CPPCHECK_OUT) \ --max-configs=30 --inline-suppr -q \ --library=posix --library=gnu --library=gtk \ - $(GLIB_CFLAGS) -D__GNUC__ \ + $(GLIB_INCL_DEF_CFLAGS) -D__GNUC__ \ $(foreach dir,$(CPPCHECK_DIRS),$(top_srcdir)/$(dir)) @echo "Done: See $(CPPCHECK_OUT)" @echo "When no longer needed, make cppcheck-clean" diff --git a/doc/README.md b/doc/README.md index c406fad..07f546e 100644 --- a/doc/README.md +++ b/doc/README.md @@ -4,13 +4,12 @@ Pacemaker has multiple forms of documentation: * The primary end-user documentation is a series of "books": - * Clusters From Scratch: Simplified walk-through of setting up a - cluster for the first time - * Pacemaker Administration: Tips for managing a cluster - * Pacemaker Development: How to work on the Pacemaker code base - * Pacemaker Explained: Configuration reference guide - * Pacemaker Remote: Configuration and walk-throughs for extended - clusters + * *Clusters From Scratch*: Simplified walk-through of setting up a cluster + for the first time + * *Pacemaker Administration*: Tips for managing a cluster + * *Pacemaker Development*: How to work on the Pacemaker code base + * *Pacemaker Explained*: Configuration reference guide + * *Pacemaker Remote*: Configuration and walk-throughs for extended clusters The source for these is kept in this directory's sphinx subdirectory. Generated versions are available online in epub, PDF, and HTML format at: @@ -51,20 +50,7 @@ Pacemaker has multiple forms of documentation: but do still have some useful information. The plan is to incorporate an updated version of them into the books. -## Editing the Books +## Editing the Documentation -The sphinx subdirectory has a subdirectory for each book by title. Each book's -directory contains .rst files, which are the chapter sources in -reStructuredText format (with index.rst as the starting point). - -Once you have edited the sources as desired, run "make" here or in the sphinx -subdirectory to generate all the books locally. You can view the results by -pointing your web browser to (replacing PATH\_TO\_CHECKOUT and BOOK\_TITLE -appropriately): - - file:///PATH_TO_CHECKOUT/doc/sphinx/BOOK_TITLE/_build/html/index.html - -See the comments at the top of doc/sphinx/Makefile.am for various options you -can use. For a guide to sphinx-flavored reStructuredText, see: - - https://www.sphinx-doc.org/en/master/usage/restructuredtext/ +If you wish to contribute documentation changes, please see the "Documentation" +chapter of *Pacemaker Development*. diff --git a/doc/sphinx/Makefile.am b/doc/sphinx/Makefile.am index e48e19a..b95f47b 100644 --- a/doc/sphinx/Makefile.am +++ b/doc/sphinx/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2003-2023 the Pacemaker project contributors +# Copyright 2003-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -84,7 +84,7 @@ EXTRA_DIST = $(wildcard */*.rst) $(DOTS) $(SVGS) \ # don't cross filesystems, sparse, show progress RSYNC_OPTS = -rlptvzxS --progress -PACKAGE_SERIES=$(shell echo "$VERSION" | awk -F. '{ print $1"."$2 }') +PACKAGE_SERIES=$(shell echo "$(VERSION)" | awk -F. '{ print $$1"."$$2 }') BOOK_RSYNC_DEST = $(RSYNC_DEST)/$(PACKAGE)/doc/$(PACKAGE_SERIES) @@ -120,20 +120,22 @@ INKSCAPE_CMD = $(INKSCAPE) --export-dpi=90 -C # Create the book directory in case this is a VPATH build. $(BOOKS:%=%/conf.py): conf.py.in $(AM_V_at)-$(MKDIR_P) "$(@:%/conf.py=%)" - $(AM_V_GEN)sed \ - -e 's/%VERSION%/$(VERSION)/g' \ - -e 's/%BOOK_ID%/$(@:%/conf.py=%)/g' \ - -e 's/%BOOK_TITLE%/$(subst _, ,$(@:%/conf.py=%))/g' \ - -e 's#%SRC_DIR%#$(abs_srcdir)#g' \ + $(AM_V_GEN)sed \ + -e 's/%VERSION%/$(VERSION)/g' \ + -e 's/%BOOK_ID%/$(@:%/conf.py=%)/g' \ + -e 's/%BOOK_TITLE%/$(subst _, ,$(@:%/conf.py=%))/g' \ + -e 's#%SRC_DIR%#$(abs_srcdir)#g' \ -e 's#%ABS_TOP_SRCDIR%#$(abs_top_srcdir)#g' \ -e 's#%CONFIGDIR%#@CONFIGDIR@#g' \ -e 's#%CRM_BLACKBOX_DIR%#@CRM_BLACKBOX_DIR@#g' \ + -e 's#%CRM_CONFIG_DIR%#@CRM_CONFIG_DIR@#g' \ -e 's#%CRM_DAEMON_GROUP%#@CRM_DAEMON_GROUP@#g' \ -e 's#%CRM_DAEMON_USER%#@CRM_DAEMON_USER@#g' \ -e 's#%CRM_LOG_DIR%#@CRM_LOG_DIR@#g' \ -e 's#%CRM_SCHEMA_DIRECTORY%#@CRM_SCHEMA_DIRECTORY@#g' \ -e 's#%PACEMAKER_CONFIG_DIR%#@PACEMAKER_CONFIG_DIR@#g' \ -e 's#%PCMK_GNUTLS_PRIORITIES%#@PCMK_GNUTLS_PRIORITIES@#g' \ + -e 's#%PCMK__REMOTE_SCHEMA_DIR%#@PCMK__REMOTE_SCHEMA_DIR@#g' \ $(<) > "$@" $(BOOK)/_build: $(STATIC_FILES) $(BOOK)/conf.py $(DEPS_$(BOOK)) $(wildcard $(srcdir)/$(BOOK)/*.rst) @@ -176,6 +178,15 @@ if BUILD_SPHINX_DOCS "$(RSYNC_DEST)/$(PACKAGE)/doc" endif +.PHONY: vars +vars: + @echo "BOOK_FORMATS='$(BOOK_FORMATS)'" + @echo "PAPER='$(PAPER)'" + @echo "SPHINXFLAGS='$(SPHINXFLAGS)'" + @echo "RSYNC_DEST='$(RSYNC_DEST)'" + @echo "VERSION='$(VERSION)'" + @echo "PACKAGE_SERIES='$(PACKAGE_SERIES)'" + .PHONY: all-local all-local: if BUILD_SPHINX_DOCS diff --git a/doc/sphinx/Pacemaker_Administration/agents.rst b/doc/sphinx/Pacemaker_Administration/agents.rst index e5b17e2..34bea60 100644 --- a/doc/sphinx/Pacemaker_Administration/agents.rst +++ b/doc/sphinx/Pacemaker_Administration/agents.rst @@ -53,123 +53,143 @@ _______ All OCF resource agents are required to implement the following actions. -.. table:: **Required Actions for OCF Agents** - - +--------------+-------------+------------------------------------------------+ - | Action | Description | Instructions | - +==============+=============+================================================+ - | start | Start the | .. index:: | - | | resource | single: OCF resource agent; start | - | | | single: start action | - | | | | - | | | Return 0 on success and an appropriate | - | | | error code otherwise. Must not report | - | | | success until the resource is fully | - | | | active. | - +--------------+-------------+------------------------------------------------+ - | stop | Stop the | .. index:: | - | | resource | single: OCF resource agent; stop | - | | | single: stop action | - | | | | - | | | Return 0 on success and an appropriate | - | | | error code otherwise. Must not report | - | | | success until the resource is fully | - | | | stopped. | - +--------------+-------------+------------------------------------------------+ - | monitor | Check the | .. index:: | - | | resource's | single: OCF resource agent; monitor | - | | state | single: monitor action | - | | | | - | | | Exit 0 if the resource is running, 7 | - | | | if it is stopped, and any other OCF | - | | | exit code if it is failed. NOTE: The | - | | | monitor script should test the state | - | | | of the resource on the local machine | - | | | only. | - +--------------+-------------+------------------------------------------------+ - | meta-data | Describe | .. index:: | - | | the | single: OCF resource agent; meta-data | - | | resource | single: meta-data action | - | | | | - | | | Provide information about this | - | | | resource in the XML format defined by | - | | | the OCF standard. Exit with 0. NOTE: | - | | | This is *not* required to be performed | - | | | as root. | - +--------------+-------------+------------------------------------------------+ +.. list-table:: **Required Actions for OCF Agents** + :class: longtable + :widths: 1 4 3 + :header-rows: 1 + + * - Action + - Description + - Instructions + * - .. _start_action: + + .. index:: + single: OCF resource agent; start + single: start action + + start + - Start the resource + - Return :ref:`OCF_SUCCESS ` on success and an appropriate + error code otherwise. Must not report success until the resource is fully + active. + * - .. _stop_action: + + .. index:: + single: OCF resource agent; stop + single: stop action + + stop + - Stop the resource + - Return :ref:`OCF_SUCCESS ` on success and an appropriate + error code otherwise. Must not report success until the resource is fully + stopped. + * - .. _monitor_action: + + .. index:: + single: OCF resource agent; monitor + single: monitor action + + monitor + - Check the resource's state + - Return :ref:`OCF_SUCCESS ` if the resource is running, + :ref:`OCF_NOT_RUNNING ` if it is stopped, and any other + :ref:`OCF exit code ` if it is failed. **Note:** The + monitor action should test the state of the resource on the local machine + only. + * - .. _meta_data_action: + + .. index:: + single: OCF resource agent; meta-data + single: meta-data action + + meta-data + - Describe the resource + - Provide information about this resource in the XML format defined by the + OCF standard. Return :ref:`OCF_SUCCESS `. **Note:** This is + *not* required to be performed as root. OCF resource agents may optionally implement additional actions. Some are used only with advanced resource types such as clones. -.. table:: **Optional Actions for OCF Resource Agents** - - +--------------+-------------+------------------------------------------------+ - | Action | Description | Instructions | - +==============+=============+================================================+ - | validate-all | This should | .. index:: | - | | validate | single: OCF resource agent; validate-all | - | | the | single: validate-all action | - | | instance | | - | | parameters | Return 0 if parameters are valid, 2 if | - | | provided. | not valid, and 6 if resource is not | - | | | configured. | - +--------------+-------------+------------------------------------------------+ - | promote | Bring the | .. index:: | - | | local | single: OCF resource agent; promote | - | | instance of | single: promote action | - | | a promotable| | - | | clone | Return 0 on success | - | | resource to | | - | | the promoted| | - | | role. | | - +--------------+-------------+------------------------------------------------+ - | demote | Bring the | .. index:: | - | | local | single: OCF resource agent; demote | - | | instance of | single: demote action | - | | a promotable| | - | | clone | Return 0 on success | - | | resource to | | - | | the | | - | | unpromoted | | - | | role. | | - +--------------+-------------+------------------------------------------------+ - | notify | Used by the | .. index:: | - | | cluster to | single: OCF resource agent; notify | - | | send | single: notify action | - | | the agent | | - | | pre- and | Must not fail. Must exit with 0 | - | | post- | | - | | notification| | - | | events | | - | | telling the | | - | | resource | | - | | what has | | - | | happened and| | - | | will happen.| | - +--------------+-------------+------------------------------------------------+ - | reload | Reload the | .. index:: | - | | service's | single: OCF resource agent; reload | - | | own | single: reload action | - | | config. | | - | | | Not used by Pacemaker | - +--------------+-------------+------------------------------------------------+ - | reload-agent | Make | .. index:: | - | | effective | single: OCF resource agent; reload-agent | - | | any changes | single: reload-agent action | - | | in instance | | - | | parameters | This is used when the agent can handle a | - | | marked as | change in some of its parameters more | - | | reloadable | efficiently than stopping and starting the | - | | in the | resource. | - | | agent's | | - | | meta-data. | | - +--------------+-------------+------------------------------------------------+ - | recover | Restart the | .. index:: | - | | service. | single: OCF resource agent; recover | - | | | single: recover action | - | | | | - | | | Not used by Pacemaker | - +--------------+-------------+------------------------------------------------+ +.. list-table:: **Optional Actions for OCF Resource Agents** + :class: longtable: + :widths: 1 4 3 + :header-rows: 1 + + * - Action + - Description + - Instructions + * - .. _validate_all_action: + + .. index:: + single: OCF resource agent; validate-all + single: validate-all action + + validate-all + - Validate the instance parameters provided. + - Return :ref:`OCF_SUCCESS ` if parameters are valid, + :ref:`OCF_ERR_ARGS ` if not valid, and + :ref:`OCF_ERR_CONFIGURED ` if resource is not + configured. + * - .. _promote_action: + + .. index:: + single: OCF resource agent; promote + single: promote action + + promote + - Bring the local instance of a promotable clone resource to the promoted + role. + - Return :ref:`OCF_SUCCESS ` on success. + * - .. _demote_action: + + .. index:: + single: OCF resource agent; demote + single: demote action + + demote + - Bring the local instance of a promotable clone resource to the unpromoted + role. + - Return :ref:`OCF_SUCCESS ` on success. + * - .. _notify_action: + + .. index:: + single: OCF resource agent; notify + single: notify action + + notify + - Used by the cluster to send the agent pre- and post-notification events + telling the resource what has happened and what will happen. + - Must not fail. Must return :ref:`OCF_SUCCESS `. + * - .. _reload_action: + + .. index:: + single: OCF resource agent; reload + single: reload action + + reload + - Reload the service's own configuration. + - Not used by Pacemaker. + * - .. _reload_agent_action: + + .. index:: + single: OCF resource agent; reload-agent + single: reload-agent action + + reload-agent + - Make effective any changes in instance parameters marked as reloadable in + the agent's meta-data. + - This is used when the agent can handle a change in some of its parameters + more efficiently than stopping and starting the resource. + * - .. _recover_action: + + .. index:: + single: OCF resource agent; recover + single: recover action + + recover + - Restart the service. + - Not used by Pacemaker. .. important:: @@ -180,159 +200,214 @@ only with advanced resource types such as clones. .. index:: single: OCF resource agent; return code -How are OCF Return Codes Interpreted? +How Are OCF Return Codes Interpreted? _____________________________________ -The first thing the cluster does is to check the return code against -the expected result. If the result does not match the expected value, -then the operation is considered to have failed, and recovery action is -initiated. +The first thing the cluster does is to check the return code against the +expected result. If the result does not match the expected value, then the +operation is considered to have failed, and recovery action is initiated. There are three types of failure recovery: -.. table:: **Types of recovery performed by the cluster** - - +-------+--------------------------------------------+--------------------------------------+ - | Type | Description | Action Taken by the Cluster | - +=======+============================================+======================================+ - | soft | .. index:: | Restart the resource or move it to a | - | | single: OCF resource agent; soft error | new location | - | | | | - | | A transient error occurred | | - +-------+--------------------------------------------+--------------------------------------+ - | hard | .. index:: | Move the resource elsewhere and | - | | single: OCF resource agent; hard error | prevent it from being retried on the | - | | | current node | - | | A non-transient error that | | - | | may be specific to the | | - | | current node | | - +-------+--------------------------------------------+--------------------------------------+ - | fatal | .. index:: | Stop the resource and prevent it | - | | single: OCF resource agent; fatal error | from being started on any cluster | - | | | node | - | | A non-transient error that | | - | | will be common to all | | - | | cluster nodes (e.g. a bad | | - | | configuration was specified) | | - +-------+--------------------------------------------+--------------------------------------+ +.. list-table:: **Types of Recovery Performed by the Cluster** + :class: longtable + :widths: 1 5 5 + :header-rows: 1 + + * - Type + - Description + - Action Taken by the Cluster + * - .. _soft_error: + + .. index:: + single: OCF resource agent; soft error + + soft + - A transient error + - Restart the resource or move it to a new location + * - .. _hard_error: + + .. index:: + single: OCF resource agent; hard error + + hard + - A non-transient error that may be specific to the current node + - Move the resource elsewhere and prevent it from being retried on the + current node + * - .. _fatal_error: + + .. index:: + single: OCF resource agent; fatal error + + fatal + - A non-transient error that will be common to all cluster nodes (for + example, a bad configuration was specified) + - Stop the resource and prevent it from being started on any cluster node .. _ocf_return_codes: OCF Return Codes ________________ -The following table outlines the different OCF return codes and the type of +The following table outlines the various OCF return codes and the type of recovery the cluster will initiate when a failure code is received. Although -counterintuitive, even actions that return 0 (aka. ``OCF_SUCCESS``) can be -considered to have failed, if 0 was not the expected return value. - -.. table:: **OCF Exit Codes and their Recovery Types** - - +-------+-----------------------+---------------------------------------------------+----------+ - | Exit | OCF Alias | Description | Recovery | - | Code | | | | - +=======+=======================+===================================================+==========+ - | 0 | OCF_SUCCESS | .. index:: | soft | - | | | single: OCF_SUCCESS | | - | | | single: OCF return code; OCF_SUCCESS | | - | | | pair: OCF return code; 0 | | - | | | | | - | | | Success. The command completed successfully. | | - | | | This is the expected result for all start, | | - | | | stop, promote and demote commands. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 1 | OCF_ERR_GENERIC | .. index:: | soft | - | | | single: OCF_ERR_GENERIC | | - | | | single: OCF return code; OCF_ERR_GENERIC | | - | | | pair: OCF return code; 1 | | - | | | | | - | | | Generic "there was a problem" error code. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 2 | OCF_ERR_ARGS | .. index:: | hard | - | | | single: OCF_ERR_ARGS | | - | | | single: OCF return code; OCF_ERR_ARGS | | - | | | pair: OCF return code; 2 | | - | | | | | - | | | The resource's parameter values are not valid on | | - | | | this machine (for example, a value refers to a | | - | | | file not found on the local host). | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 3 | OCF_ERR_UNIMPLEMENTED | .. index:: | hard | - | | | single: OCF_ERR_UNIMPLEMENTED | | - | | | single: OCF return code; OCF_ERR_UNIMPLEMENTED | | - | | | pair: OCF return code; 3 | | - | | | | | - | | | The requested action is not implemented. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 4 | OCF_ERR_PERM | .. index:: | hard | - | | | single: OCF_ERR_PERM | | - | | | single: OCF return code; OCF_ERR_PERM | | - | | | pair: OCF return code; 4 | | - | | | | | - | | | The resource agent does not have | | - | | | sufficient privileges to complete the task. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 5 | OCF_ERR_INSTALLED | .. index:: | hard | - | | | single: OCF_ERR_INSTALLED | | - | | | single: OCF return code; OCF_ERR_INSTALLED | | - | | | pair: OCF return code; 5 | | - | | | | | - | | | The tools required by the resource are | | - | | | not installed on this machine. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 6 | OCF_ERR_CONFIGURED | .. index:: | fatal | - | | | single: OCF_ERR_CONFIGURED | | - | | | single: OCF return code; OCF_ERR_CONFIGURED | | - | | | pair: OCF return code; 6 | | - | | | | | - | | | The resource's parameter values are inherently | | - | | | invalid (for example, a required parameter was | | - | | | not given). | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 7 | OCF_NOT_RUNNING | .. index:: | N/A | - | | | single: OCF_NOT_RUNNING | | - | | | single: OCF return code; OCF_NOT_RUNNING | | - | | | pair: OCF return code; 7 | | - | | | | | - | | | The resource is safely stopped. This should only | | - | | | be returned by monitor actions, not stop actions. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 8 | OCF_RUNNING_PROMOTED | .. index:: | soft | - | | | single: OCF_RUNNING_PROMOTED | | - | | | single: OCF return code; OCF_RUNNING_PROMOTED | | - | | | pair: OCF return code; 8 | | - | | | | | - | | | The resource is running in the promoted role. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 9 | OCF_FAILED_PROMOTED | .. index:: | soft | - | | | single: OCF_FAILED_PROMOTED | | - | | | single: OCF return code; OCF_FAILED_PROMOTED | | - | | | pair: OCF return code; 9 | | - | | | | | - | | | The resource is (or might be) in the promoted | | - | | | role but has failed. The resource will be | | - | | | demoted, stopped and then started (and possibly | | - | | | promoted) again. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 190 | OCF_DEGRADED | .. index:: | none | - | | | single: OCF_DEGRADED | | - | | | single: OCF return code; OCF_DEGRADED | | - | | | pair: OCF return code; 190 | | - | | | | | - | | | The resource is properly active, but in such a | | - | | | condition that future failures are more likely. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | 191 | OCF_DEGRADED_PROMOTED | .. index:: | none | - | | | single: OCF_DEGRADED_PROMOTED | | - | | | single: OCF return code; OCF_DEGRADED_PROMOTED | | - | | | pair: OCF return code; 191 | | - | | | | | - | | | The resource is properly active in the promoted | | - | | | role, but in such a condition that future | | - | | | failures are more likely. | | - +-------+-----------------------+---------------------------------------------------+----------+ - | other | *none* | Custom error code. | soft | - +-------+-----------------------+---------------------------------------------------+----------+ +counterintuitive, even actions that return ``OCF_SUCCESS`` can be considered to +have failed, if ``OCF_SUCCESS`` was not the expected return value. + +.. list-table:: **OCF Exit Codes and Their Recovery Types** + :class: longtable + :widths: 1 3 6 2 + :header-rows: 1 + + * - Exit Code + - OCF Alias + - Description + - Recovery + * - .. _OCF_SUCCESS: + + .. index:: + single: OCF_SUCCESS + single: OCF return code; OCF_SUCCESS + pair: OCF return code; 0 + + 0 + - OCF_SUCCESS + - Success. The command completed successfully. This is the expected result + for all start, stop, promote, and demote actions. + - :ref:`soft ` + * - .. _OCF_ERR_GENERIC: + + .. index:: + single: OCF_ERR_GENERIC + single: OCF return code; OCF_ERR_GENERIC + pair: OCF return code; 1 + + 1 + - OCF_ERR_GENERIC + - Generic "there was a problem" error code. + - :ref:`hard ` + * - .. _OCF_ERR_ARGS: + + .. index:: + single: OCF_ERR_ARGS + single: OCF return code; OCF_ERR_ARGS + pair: OCF return code; 2 + + 2 + - OCF_ERR_ARGS + - The resource's parameter values are not valid on this machine (for + example, a value refers to a file not found on the local host). + - :ref:`hard ` + * - .. _OCF_ERR_UNIMPLEMENTED: + + .. index:: + single: OCF_ERR_UNIMPLEMENTED + single: OCF return code; OCF_ERR_UNIMPLEMENTED + pair: OCF return code; 3 + + 3 + - OCF_ERR_UNIMPLEMENTED + - The requested action is not implemented. + - :ref:`hard ` + * - .. _OCF_ERR_PERM: + + .. index:: + single: OCF_ERR_PERM + single: OCF return code; OCF_ERR_PERM + pair: OCF return code; 4 + + 4 + - OCF_ERR_PERM + - The resource agent does not have sufficient privileges to complete the + task. + - :ref:`hard ` + * - .. _OCF_ERR_INSTALLED: + + .. index:: + single: OCF_ERR_INSTALLED + single: OCF return code; OCF_ERR_INSTALLED + pair: OCF return code; 5 + + 5 + - OCF_ERR_INSTALLED + - The tools required by the resource are not installed on this machine. + - :ref:`hard ` + * - .. _OCF_ERR_CONFIGURED: + + .. index:: + single: OCF_ERR_CONFIGURED + single: OCF return code; OCF_ERR_CONFIGURED + pair: OCF return code; 6 + + 6 + - OCF_ERR_CONFIGURED + - The resource's parameter values are inherently invalid (for example, a + required parameter was not given). + - :ref:`fatal ` + * - .. _OCF_NOT_RUNNING: + + .. index:: + single: OCF_NOT_RUNNING + single: OCF return code; OCF_NOT_RUNNING + pair: OCF return code; 7 + + 7 + - OCF_NOT_RUNNING + - The resource is safely stopped. This should only be returned by monitor + actions, not stop actions. + - N/A + * - .. _OCF_RUNNING_PROMOTED: + + .. index:: + single: OCF_RUNNING_PROMOTED + single: OCF return code; OCF_RUNNING_PROMOTED + pair: OCF return code; 8 + + 8 + - OCF_RUNNING_PROMOTED + - The resource is running in the promoted role. + - :ref:`soft ` + * - .. _OCF_FAILED_PROMOTED: + + .. index:: + single: OCF_FAILED_PROMOTED + single: OCF return code; OCF_FAILED_PROMOTED + pair: OCF return code; 9 + + 9 + - OCF_FAILED_PROMOTED + - The resource is (or might be) in the promoted role but has failed. The + resource will be demoted, stopped, and then started (and possibly + promoted) again. + - :ref:`soft ` + * - .. _OCF_DEGRADED: + + .. index:: + single: OCF_DEGRADED + single: OCF return code; OCF_DEGRADED + pair: OCF return code; 190 + + 190 + - OCF_DEGRADED + - The resource is properly active, but in such a condition that future + failures are more likely. + - none + * - .. _OCF_DEGRADED_PROMOTED: + + .. index:: + single: OCF_DEGRADED_PROMOTED + single: OCF return code; OCF_DEGRADED_PROMOTED + pair: OCF return code; 191 + + 191 + - OCF_DEGRADED_PROMOTED + - The resource is properly active in the promoted role, but in such a + condition that future failures are more likely. + - none + * - other + - *none* + - Custom error code. + - soft Exceptions to the recovery handling described above: @@ -347,6 +422,670 @@ Exceptions to the recovery handling described above: if they had returned success, but status output will indicate that the resource is degraded. +.. _ocf_env_vars: + +Environment Variables +_____________________ + +Pacemaker sets certain environment variables when it executes an OCF resource +agent. Agents can check these variables to get information about resource +parameters or the execution environment. + +**Note:** Pacemaker may set other environment variables for its own purposes. +They may be present in the agent's environment, but Pacemaker is not providing +them for the agent's use, and so the agent should not rely on any variables not +listed in the table below. + +.. list-table:: **OCF Environment Variables** + :class: longtable + :widths: 1 6 + :header-rows: 1 + + * - Environment Variable + - Description + * - .. _OCF_CHECK_LEVEL: + + .. index:: + single: OCF_CHECK_LEVEL + single: environment variable; OCF_CHECK_LEVEL + + OCF_CHECK_LEVEL + - Requested intensity level of checks in ``monitor`` and ``validate-all`` + actions. Usually set as an operation attribute; see Pacemaker Explained + for an example. + * - .. _OCF_EXIT_REASON_PREFIX: + + .. index:: + single: OCF_EXIT_REASON_PREFIX + single: environment variable; OCF_EXIT_REASON_PREFIX + + OCF_EXIT_REASON_PREFIX + - Prefix for printing fatal error messages from the resource agent. + * - .. _OCF_RA_VERSION_MAJOR: + + .. index:: + single: OCF_RA_VERSION_MAJOR + single: environment variable; OCF_RA_VERSION_MAJOR + + OCF_RA_VERSION_MAJOR + - Major version number of the OCF Resource Agent API. If the script does + not support this revision, it should report an error. + See the `OCF specification `_ for an + explanation of the versioning scheme used. The version number is split + into two numbers for ease of use in shell scripts. These two may be used + by the agent to determine whether it is run under an OCF-compliant + resource manager. + * - .. _OCF_RA_VERSION_MINOR: + + .. index:: + single: OCF_RA_VERSION_MINOR + single: environment variable; OCF_RA_VERSION_MINOR + + OCF_RA_VERSION_MINOR + - Minor version number of the OCF Resource Agent API. See + :ref:`OCF_RA_VERSION_MAJOR ` for more details. + * - .. _OCF_RESKEY_crm_feature_set: + + .. index:: + single: OCF_RESKEY_crm_feature_set + single: environment variable; OCF_RESKEY_crm_feature_set + + OCF_RESKEY_crm_feature_set + - ``crm_feature_set`` on the DC (or on the local node, if the agent is run + by ``crm_resource``). + * - .. _OCF_RESKEY_CRM_meta_interval: + + .. index:: + single: OCF_RESKEY_CRM_meta_interval + single: environment variable; OCF_RESKEY_CRM_meta_interval + + OCF_RESKEY_CRM_meta_interval + - Interval (in milliseconds) of the current operation. + * - .. _OCF_RESKEY_CRM_meta_name: + + .. index:: + single: OCF_RESKEY_CRM_meta_name + single: environment variable; OCF_RESKEY_CRM_meta_name + + OCF_RESKEY_CRM_meta_name + - Name of the current operation. + * - .. _OCF_RESKEY_CRM_meta_notify: + + .. index:: + single: OCF_RESKEY_CRM_meta_notify_* + single: environment variable; OCF_RESKEY_CRM_meta_notify_* + + OCF_RESKEY_CRM_meta_notify_* + - See :ref:`Clone Notifications `. + * - .. _OCF_RESKEY_CRM_meta_on_node: + + .. index:: + single: OCF_RESKEY_CRM_meta_on_node + single: environment variable; OCF_RESKEY_CRM_meta_on_node + + OCF_RESKEY_CRM_meta_on_node + - Name of the node where the current operation is running. + * - .. _OCF_RESKEY_CRM_meta_on_node_uuid: + + .. index:: + single: OCF_RESKEY_CRM_meta_on_node_uuid + single: environment variable; OCF_RESKEY_CRM_meta_on_node_uuid + + OCF_RESKEY_CRM_meta_on_node_uuid + - Cluster-layer ID of the node where the current operation is running (or + node name for Pacemaker Remote nodes). + * - .. _OCF_RESKEY_CRM_meta_physical_host: + + .. index:: + single: OCF_RESKEY_CRM_meta_physical_host + single: environment variable; OCF_RESKEY_CRM_meta_physical_host + + OCF_RESKEY_CRM_meta_physical_host + - If the node where the current operation is running is a guest node, the + host on which the container is running. + * - .. _OCF_RESKEY_CRM_meta_timeout: + + .. index:: + single: OCF_RESKEY_CRM_meta_timeout + single: environment variable; OCF_RESKEY_CRM_meta_timeout + + OCF_RESKEY_CRM_meta_timeout + - Timeout (in milliseconds) of the current operation. + * - .. _OCF_RESKEY_CRM_meta: + + .. index:: + single: OCF_RESKEY_CRM_meta_* + single: environment variable; OCF_RESKEY_CRM_meta_* + + OCF_RESKEY_CRM_meta_* + - Each of a resource's meta-attributes is converted to an environment + variable prefixed with "OCF_RESKEY_CRM_meta\_". See Pacemaker Explained + for some meta-attributes that have special meaning to Pacemaker. + * - .. _OCF_RESKEY: + + .. index:: + single: OCF_RESKEY_* + single: environment variable; OCF_RESKEY_* + + OCF_RESKEY_* + - Each of a resource's instance parameters is converted to an environment + variable prefixed with "OCF_RESKEY\_". + * - .. _OCF_RESOURCE_INSTANCE: + + .. index:: + single: OCF_RESOURCE_INSTANCE + single: environment variable; OCF_RESOURCE_INSTANCE + + OCF_RESOURCE_INSTANCE + - The name of the resource instance. + * - .. _OCF_RESOURCE_PROVIDER: + + .. index:: + single: OCF_RESOURCE_PROVIDER + single: environment variable; OCF_RESOURCE_PROVIDER + + OCF_RESOURCE_PROVIDER + - The name of the resource agent provider. + * - .. _OCF_RESOURCE_TYPE: + + .. index:: + single: OCF_RESOURCE_TYPE + single: environment variable; OCF_RESOURCE_TYPE + + OCF_RESOURCE_TYPE + - The name of the resource type. + * - .. _OCF_ROOT: + + .. index:: + single: OCF_ROOT + single: environment variable; OCF_ROOT + + OCF_ROOT + - The root of the OCF directory hierarchy. + * - .. _OCF_TRACE_FILE: + + .. index:: + single: OCF_TRACE_FILE + single: environment variable; OCF_TRACE_FILE + + OCF_TRACE_FILE + - The absolute path or file descriptor to write trace output to, if + ``OCF_TRACE_RA`` is set to true. Pacemaker sets this only to + ``/dev/stderr`` and only when running a resource agent via + ``crm_resource``. + * - .. _OCF_TRACE_RA: + + .. index:: + single: OCF_TRACE_RA + single: environment variable; OCF_TRACE_RA + + OCF_TRACE_RA + - If set to true, enable tracing of the resource agent. Trace output is + written to ``OCF_TRACE_FILE`` if set; otherwise, it's written to a file + in ``OCF_RESKEY_trace_dir`` if set or in a default directory if not. + Pacemaker sets this to true only when running a resource agent via + ``crm_resource`` with one or more ``-V`` flags. + * - .. _PCMK_DEBUGLOG: + .. _HA_DEBUGLOG: + + .. index:: + single: PCMK_DEBUGLOG + single: environment variable; PCMK_DEBUGLOG + single: HA_DEBUGLOG + single: environment variable; HA_DEBUGLOG + + PCMK_DEBUGLOG (and HA_DEBUGLOG) + - Where to write resource agent debug logs. Pacemaker sets this to + ``PCMK_logfile`` if set to a value other than ``none`` and if debugging + is enabled for the executor. + * - .. _PCMK_LOGFACILITY: + .. _HA_LOGFACILITY: + + .. index:: + single: PCMK_LOGFACILITY + single: environment variable; PCMK_LOGFACILITY + single: HA_LOGFACILITY + single: environment variable; HA_LOGFACILITY + + PCMK_LOGFACILITY (and HA_LOGFACILITY) + - Syslog facility for resource agent logs. Pacemaker sets this to + ``PCMK_logfacility`` if set to a value other than ``none`` or + ``/dev/null``. + * - .. _PCMK_LOGFILE: + .. _HA_LOGFILE:: + + .. index:: + single: PCMK_LOGFILE: + single: environment variable; PCMK_LOGFILE: + single: HA_LOGFILE: + single: environment variable; HA_LOGFILE: + + PCMK_LOGFILE (and HA_LOGFILE) + - Where to write resource agent logs. Pacemaker sets this to + ``PCMK_logfile`` if set to a value other than ``none``. + * - .. _PCMK_service: + + .. index:: + single: PCMK_service + single: environment variable; PCMK_service + + PCMK_service + - The name of the Pacemaker subsystem or command-line tool that's executing + the resource agent. Specific values are subject to change; useful mainly + for logging. + +Clone Resource Agent Requirements +_________________________________ + +Any resource can be used as an anonymous clone, as it requires no additional +support from the resource agent. Whether it makes sense to do so depends on your +resource and its resource agent. + +Resource Agent Requirements for Globally Unique Clones +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Globally unique clones require additional support in the resource agent. In +particular, it must respond with ``OCF_SUCCESS`` only if the node has that exact +instance active. All other probes for instances of the clone should result in +``OCF_NOT_RUNNING`` (or one of the other OCF error codes if they are failed). + +Individual instances of a clone are identified by appending a colon and a +numerical offset (for example, ``apache:2``). + +A resource agent can find out how many copies there are by examining the +``OCF_RESKEY_CRM_meta_clone_max`` environment variable and which instance it is +by examining ``OCF_RESKEY_CRM_meta_clone``. + +The resource agent must not make any assumptions (based on +``OCF_RESKEY_CRM_meta_clone``) about which numerical instances are active. In +particular, the list of active copies is not always an unbroken sequence, nor +does it always start at 0. + +Resource Agent Requirements for Promotable Clones +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Promotable clone resources require two extra actions, ``demote`` and ``promote``, +which are responsible for changing the state of the resource. Like ``start`` and +``stop``, they should return ``OCF_SUCCESS`` if they completed successfully or a +relevant error code if they did not. + +The states can mean whatever you wish, but when the resource is started, it must +begin in the unpromoted role. From there, the cluster will decide which +instances to promote. + +In addition to the clone requirements for monitor actions, agents must also +*accurately* report which state they are in. The cluster relies on the agent to +report its status (including role) accurately and does not indicate to the agent +what role it currently believes it to be in. + +.. list-table:: **Role Implications of OCF Return Codes** + :class: longtable + :widths: 1 3 + :header-rows: 1 + + * - Monitor Return Code + - Description + * - :ref:`OCF_NOT_RUNNING ` + - .. index:: + single: OCF_NOT_RUNNING + single: OCF return code; OCF_NOT_RUNNING + + Stopped + * - :ref:`OCF_SUCCESS ` + - .. index:: + single: OCF_SUCCESS + single: OCF return code; OCF_SUCCESS + + Running (Unpromoted) + * - :ref:`OCF_RUNNING_PROMOTED ` + - .. index:: + single: OCF_RUNNING_PROMOTED + single: OCF return code; OCF_RUNNING_PROMOTED + + Running (Promoted) + * - :ref:`OCF_FAILED_PROMOTED ` + - .. index:: + single: OCF_FAILED_PROMOTED + single: OCF return code; OCF_FAILED_PROMOTED + + Failed (Promoted) + * - Other + - Failed (Unpromoted) + +.. _clone_notifications: + +Clone Notifications +~~~~~~~~~~~~~~~~~~~ + +If the clone has the ``notify`` meta-attribute set to ``true`` and the resource +agent supports the ``notify`` action, Pacemaker will call the action when +appropriate, passing a number of extra variables. These variables, when combined +with additional context, can be used to calculate the current state of the +cluster and what is about to happen to it. + +.. index:: + single: clone; environment variables + single: notify; environment variables + +.. list-table:: **Environment Variables Supplied with Clone Notify Actions** + :class: longtable + :widths: 1 1 + :header-rows: 1 + + * - Variable + - Description + * - .. _OCF_RESKEY_CRM_meta_notify_type: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_type + single: OCF_RESKEY_CRM_meta_notify_type + + OCF_RESKEY_CRM_meta_notify_type + - Allowed values: ``pre``, ``post`` + * - .. _OCF_RESKEY_CRM_meta_notify_operation: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_operation + single: OCF_RESKEY_CRM_meta_notify_operation + + OCF_RESKEY_CRM_meta_notify_operation + - Allowed values: ``start``, ``stop`` + * - .. _OCF_RESKEY_CRM_meta_notify_start_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_start_resource + single: OCF_RESKEY_CRM_meta_notify_start_resource + + OCF_RESKEY_CRM_meta_notify_start_resource + - Resources to be started + * - .. _OCF_RESKEY_CRM_meta_notify_stop_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_resource + single: OCF_RESKEY_CRM_meta_notify_stop_resource + + OCF_RESKEY_CRM_meta_notify_stop_resource + - Resources to be stopped + * - .. _OCF_RESKEY_CRM_meta_notify_active_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_active_resource + single: OCF_RESKEY_CRM_meta_notify_active_resource + + OCF_RESKEY_CRM_meta_notify_active_resource + - Resources that are running + * - .. _OCF_RESKEY_CRM_meta_notify_inactive_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_inactive_resource + single: OCF_RESKEY_CRM_meta_notify_inactive_resource + + OCF_RESKEY_CRM_meta_notify_inactive_resource + - Resources that are not running + * - .. _OCF_RESKEY_CRM_meta_notify_start_uname: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_start_uname + single: OCF_RESKEY_CRM_meta_notify_start_uname + + OCF_RESKEY_CRM_meta_notify_start_uname + - Nodes on which resources will be started + * - .. _OCF_RESKEY_CRM_meta_notify_stop_uname: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_uname + single: OCF_RESKEY_CRM_meta_notify_stop_uname + + OCF_RESKEY_CRM_meta_notify_stop_uname + - Nodes on which resources will be stopped + * - .. _OCF_RESKEY_CRM_meta_notify_active_uname: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_active_uname + single: OCF_RESKEY_CRM_meta_notify_active_uname + + OCF_RESKEY_CRM_meta_notify_active_uname + - Nodes on which resources are running + +The variables come in pairs, such as +``OCF_RESKEY_CRM_meta_notify_start_resource`` and +``OCF_RESKEY_CRM_meta_notify_start_uname``, and should be treated as an array of +whitespace-separated elements. + +``OCF_RESKEY_CRM_meta_notify_inactive_resource`` is an exception, as the +matching ``uname`` variable does not exist since inactive resources are not +running on any node. + +Thus, in order to indicate that ``clone:0`` will be started on ``sles-1``, +``clone:2`` will be started on ``sles-3``, and ``clone:3`` will be started +on ``sles-2``, the cluster would set: + +.. topic:: Notification Variables + + .. code-block:: none + + OCF_RESKEY_CRM_meta_notify_start_resource="clone:0 clone:2 clone:3" + OCF_RESKEY_CRM_meta_notify_start_uname="sles-1 sles-3 sles-2" + +.. note:: + + Pacemaker will log but otherwise ignore failures of notify actions. + +Interpretation of Notification Variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Pre-notification (stop):** + +* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` +* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (stop) / Pre-notification (start):** + +* Active resources + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Inactive resources + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (start):** + +* Active resources: + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Inactive resources: + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +Extra Notifications for Promotable Clones +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. index:: + single: clone; environment variables + single: promotable; environment variables + +.. list-table:: **Extra Environment Variables Supplied for Promotable Clones** + :class: longtable + :widths: 1 1 + :header-rows: 1 + + * - Variable + - Description + * - .. _OCF_RESKEY_CRM_meta_notify_promoted_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_resource + single: OCF_RESKEY_CRM_meta_notify_promoted_resource + + OCF_RESKEY_CRM_meta_notify_promoted_resource + - Resources that are running in the promoted role + * - .. _OCF_RESKEY_CRM_meta_notify_unpromoted_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_resource + single: OCF_RESKEY_CRM_meta_notify_unpromoted_resource + + OCF_RESKEY_CRM_meta_notify_unpromoted_resource + - Resources that are running in the unpromoted role + * - .. _OCF_RESKEY_CRM_meta_notify_promote_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_resource + single: OCF_RESKEY_CRM_meta_notify_promote_resource + + OCF_RESKEY_CRM_meta_notify_promote_resource + - Resources to be promoted + * - .. _OCF_RESKEY_CRM_meta_notify_demote_resource: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_resource + single: OCF_RESKEY_CRM_meta_notify_demote_resource + + OCF_RESKEY_CRM_meta_notify_demote_resource + - Resources to be demoted + * - .. _OCF_RESKEY_CRM_meta_notify_promote_uname: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_uname + single: OCF_RESKEY_CRM_meta_notify_promote_uname + + OCF_RESKEY_CRM_meta_notify_promote_uname + - Nodes on which resources will be promoted + * - .. _OCF_RESKEY_CRM_meta_notify_demote_uname: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_uname + single: OCF_RESKEY_CRM_meta_notify_demote_uname + + OCF_RESKEY_CRM_meta_notify_demote_uname + - Nodes on which resources will be demoted + * - .. _OCF_RESKEY_CRM_meta_notify_promoted_uname: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_uname + single: OCF_RESKEY_CRM_meta_notify_promoted_uname + + OCF_RESKEY_CRM_meta_notify_promoted_uname + - Nodes on which resources are running in the promoted role + * - .. _OCF_RESKEY_CRM_meta_notify_unpromoted_uname: + + .. index:: + single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_uname + single: OCF_RESKEY_CRM_meta_notify_unpromoted_uname + + OCF_RESKEY_CRM_meta_notify_unpromoted_uname + - Nodes on which resources are running in the unpromoted role + +Interpretation of Promotable Notification Variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Pre-notification (demote):** + +* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` +* Promoted resources: ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` +* Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` +* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (demote) / Pre-notification (stop):** + +* Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` +* Promoted resources: + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` +* Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` + +**Post-notification (stop) / Pre-notification (start)** + +* Active resources: + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Promoted resources: + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Unpromoted resources: + * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Inactive resources: + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (start) / Pre-notification (promote)** + +* Active resources: + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Promoted resources: + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Unpromoted resources: + * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Inactive resources: + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + +**Post-notification (promote)** + +* Active resources: + * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Promoted resources: + * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Unpromoted resources: + * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Inactive resources: + * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` + * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` +* Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` +* Resources that were promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` +* Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` +* Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` + .. index:: single: resource agent; LSB diff --git a/doc/sphinx/Pacemaker_Administration/configuring.rst b/doc/sphinx/Pacemaker_Administration/configuring.rst index 295c96a..e4d70c4 100644 --- a/doc/sphinx/Pacemaker_Administration/configuring.rst +++ b/doc/sphinx/Pacemaker_Administration/configuring.rst @@ -186,53 +186,14 @@ Connecting from a Remote Machine Provided Pacemaker is installed on a machine, it is possible to connect to the cluster even if the machine itself is not in the same cluster. To do this, one -simply sets up a number of environment variables and runs the same commands as -when working on a cluster node. - -.. list-table:: **Environment Variables Used to Connect to Remote Instances of the CIB** - :class: longtable - :widths: 2 2 5 - :header-rows: 1 - - * - Environment Variable - - Default - - Description - * - .. index:: - single: CIB_user - single: environment variable; CIB_user - - CIB_user - - |CRM_DAEMON_USER_RAW| - - The user to connect as. Needs to be part of the |CRM_DAEMON_GROUP| group - on the target host. - * - .. index:: - single: CIB_passwd - single: environment variable; CIB_passwd - - CIB_passwd - - - - The user's password. Read from the command line if unset. - * - .. index:: - single: CIB_server - single: environment variable; CIB_server - - CIB_server - - localhost - - The host to contact - * - .. index:: - single: CIB_port - single: environment variable; CIB_port - - CIB_port - - - - The port on which to contact the server; required - * - .. index:: - single: CIB_encrypted - single: environment variable; CIB_encrypted - - CIB_encrypted - - true - - Whether to encrypt network traffic +simply sets the following environment variables and runs the same commands as +when working on a cluster node: + +* :ref:`CIB_port ` (required) +* :ref:`CIB_server ` +* :ref:`CIB_user ` +* :ref:`CIB_passwd ` +* :ref:`CIB_encrypted ` So, if **c001n01** is an active cluster node and is listening on port 1234 for connections, and **someuser** is a member of the |CRM_DAEMON_GROUP| group, diff --git a/doc/sphinx/Pacemaker_Administration/index.rst b/doc/sphinx/Pacemaker_Administration/index.rst index af89380..c8fd722 100644 --- a/doc/sphinx/Pacemaker_Administration/index.rst +++ b/doc/sphinx/Pacemaker_Administration/index.rst @@ -20,6 +20,7 @@ Table of Contents intro installing cluster + options configuring tools administrative diff --git a/doc/sphinx/Pacemaker_Administration/installing.rst b/doc/sphinx/Pacemaker_Administration/installing.rst index 44a3f5f..feea962 100644 --- a/doc/sphinx/Pacemaker_Administration/installing.rst +++ b/doc/sphinx/Pacemaker_Administration/installing.rst @@ -4,6 +4,6 @@ Installing Cluster Software .. index:: installation Most major Linux distributions have pacemaker packages in their standard -package repositories, or the software can be built from source code. -See the `Install wiki page `_ -for details. +package repositories, or the software can be built from source code. See +`How to Install `_ +on the ClusterLabs wiki for details. diff --git a/doc/sphinx/Pacemaker_Administration/options.rst b/doc/sphinx/Pacemaker_Administration/options.rst new file mode 100644 index 0000000..731d17f --- /dev/null +++ b/doc/sphinx/Pacemaker_Administration/options.rst @@ -0,0 +1,178 @@ +.. index:: client options + +Client Options +-------------- + +Pacemaker uses several environment variables set on the client side. + +.. note:: Directory and file paths below may differ on your system depending on + your Pacemaker build settings. Check your Pacemaker configuration + file to find the correct paths. + +.. list-table:: **Client-side Environment Variables** + :class: longtable + :widths: 2 4 5 + :header-rows: 1 + + * - Environment Variable + - Default + - Description + * - .. _CIB_encrypted: + + .. index:: + single: CIB_encrypted + single: environment variable; CIB_encrypted + + CIB_encrypted + - true + - Whether to encrypt network traffic. Used with :ref:`CIB_port ` + for connecting to a remote CIB instance; ignored if + :ref:`CIB_port ` is not set. + * - .. _CIB_file: + + .. index:: + single: CIB_file + single: environment variable; CIB_file + + CIB_file + - + - If set, CIB connections are created against the named XML file. Clients + read an input CIB from, and write the result CIB to, the named file. + Ignored if :ref:`CIB_shadow ` is set. + * - .. _CIB_passwd: + + .. index:: + single: CIB_passwd + single: environment variable; CIB_passwd + + CIB_passwd + - + - :ref:`$CIB_user `'s password. Read from the command line if + unset. Used with :ref:`CIB_port ` for connecting to a remote + CIB instance; ignored if :ref:`CIB_port ` is not set. + * - .. _CIB_port: + + .. index:: + single: CIB_port + single: environment variable; CIB_port + + CIB_port + - + - If set, CIB connections are created as clients to a remote CIB instance + on :ref:`$CIB_server ` via this port. Ignored if + :ref:`CIB_shadow ` or :ref:`CIB_file ` is set. + * - .. _CIB_server: + + .. index:: + single: CIB_server + single: environment variable; CIB_server + + CIB_server + - localhost + - The host to connect to. Used with :ref:`CIB_port ` for + connecting to a remote CIB instance; ignored if + :ref:`CIB_port ` is not set. + * - .. _CIB_shadow: + + .. index:: + single: CIB_shadow + single: environment variable; CIB_shadow + + CIB_shadow + - + - If set, CIB connections are created against a temporary working + ("shadow") CIB file called ``shadow.$CIB_shadow`` in + :ref:`$CIB_shadow_dir `. Should be set only to the name + of a shadow CIB created by :ref:`crm_shadow `. Otherwise, + behavior is undefined. + * - .. _CIB_shadow_dir: + + .. index:: + single: CIB_shadow_dir + single: environment variable; CIB_shadow_dir + + CIB_shadow_dir + - |CRM_CONFIG_DIR| if the current user is ``root`` or |CRM_DAEMON_USER|; + otherwise ``$HOME/.cib`` if :ref:`$HOME ` is set; otherwise + ``$TMPDIR/.cib`` if :ref:`$TMPDIR ` is set to an absolute path; + otherwise ``/tmp/.cib`` + - If set, shadow files are created in this directory. Ignored if + :ref:`CIB_shadow ` is not set. + * - .. _CIB_user: + + .. index:: + single: CIB_user + single: environment variable; CIB_user + + CIB_user + - |CRM_DAEMON_USER| if used with :ref:`CIB_port `, or the current + effective user otherwise + - If used with :ref:`CIB_port `, connect to + :ref:`$CIB_server ` as this user. Must be part of the + |CRM_DAEMON_GROUP| group on :ref:`$CIB_server `. Otherwise + (without :ref:`CIB_port `), this is used only for ACL and + display purposes. + * - .. _EDITOR: + + .. index:: + single: EDITOR + single: environment variable; EDITOR + + EDITOR + - + - Text editor to use for editing shadow files. Required for the ``--edit`` + command of :ref:`crm_shadow `. + * - .. _HOME: + + .. index:: + single: HOME + single: environment variable; HOME + + HOME + - Current user's home directory as configured in the passwd database, if an + entry exists + - Used to create a default :ref:`CIB_shadow_dir ` for non- + privileged users. + * - .. _PE_fail: + + .. index:: + single: PE_fail + single: environment variable; PE_fail + + PE_fail + - 0 + - Advanced use only: A dummy graph action with action ID matching this + option will be marked as failed. Primarily for developer use with + scheduler simulations. + * - .. _PS1: + + .. index:: + single: PS1 + single: environment variable; PS1 + + PS1 + - + - The shell's primary prompt string. Used by + :ref:`crm_shadow `: set to indicate that the user is in an + interactive shadow CIB session, and checked to determine whether the user + is already in an interactive session before creating a new one. + * - .. _SHELL: + + .. index:: + single: SHELL + single: environment variable; SHELL + + SHELL + - + - Absolute path to a shell. Used by :ref:`crm_shadow ` when + launching an interactive session. + * - .. _TMPDIR: + + .. index:: + single: TMPDIR + single: environment variable; TMPDIR + + TMPDIR + - /tmp + - Directory for temporary files. If not an absolute path, the default is + used instead. diff --git a/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst b/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst index 3eda60a..06fb24f 100644 --- a/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst +++ b/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst @@ -4,7 +4,7 @@ Quick Comparison of pcs and crm shell ``pcs`` and ``crm shell`` are two popular higher-level command-line interfaces to Pacemaker. Each has its own syntax; this chapter gives a quick comparion of how to accomplish the same tasks using either one. Some examples also show the -equivalent command using low-level Pacmaker command-line tools. +equivalent command using low-level Pacemaker command-line tools. These examples show the simplest syntax; see the respective man pages for all possible options. @@ -118,6 +118,7 @@ Manage Resources .. topic:: Create a Resource .. code-block:: none + crmsh # crm configure primitive ClusterIP IPaddr2 params ip=192.168.122.120 cidr_netmask=24 pcs # pcs resource create ClusterIP IPaddr2 ip=192.168.122.120 cidr_netmask=24 diff --git a/doc/sphinx/Pacemaker_Administration/upgrading.rst b/doc/sphinx/Pacemaker_Administration/upgrading.rst index 1ca2a4e..bccfc22 100644 --- a/doc/sphinx/Pacemaker_Administration/upgrading.rst +++ b/doc/sphinx/Pacemaker_Administration/upgrading.rst @@ -159,11 +159,12 @@ Special considerations when planning a rolling upgrade: * If the Pacemaker Remote protocol version is changing, all cluster nodes should be upgraded before upgrading any Pacemaker Remote nodes. -See the ClusterLabs wiki's -`release calendar `_ -to figure out whether the CRM feature set and/or Pacemaker Remote protocol -version changed between the the Pacemaker release versions in your rolling -upgrade. +See the +`Pacemaker release calendar +`_ +on the ClusterLabs wiki to figure out whether the CRM feature set and/or +Pacemaker Remote protocol version changed between the the Pacemaker release +versions in your rolling upgrade. To perform a rolling upgrade, on each node in turn: @@ -302,9 +303,8 @@ A more cautious approach would proceed like this: #. The transformation was successful but produced an invalid result. If the result of the transformation is invalid, you may see a number of - errors from the validation library. If these are not helpful, visit the - `Validation FAQ wiki page `_ - and/or try the manual upgrade procedure described below. + errors from the validation library. If these are not helpful, try the manual + upgrade procedure described below. #. Check the changes: @@ -398,9 +398,10 @@ the C API. Highlights: higher-level tools are strongly recommended to use instead of trying to parse the text output, which may change from release to release). -For a detailed list of changes, see the release notes and the -`Pacemaker 2.1 Changes `_ -page on the ClusterLabs wiki. +For a detailed list of changes, see the release notes and +`Pacemaker 2.1 Changes +`_ +on the ClusterLabs wiki. What Changed in 2.0 @@ -431,9 +432,10 @@ behavior. Highlights: * The public API for Pacemaker libraries that software applications can use has changed significantly. -For a detailed list of changes, see the release notes and the -`Pacemaker 2.0 Changes `_ -page on the ClusterLabs wiki. +For a detailed list of changes, see the release notes and +`Pacemaker 2.0 Changes +`_ +on the ClusterLabs wiki. What Changed in 1.0 diff --git a/doc/sphinx/Pacemaker_Development/c.rst b/doc/sphinx/Pacemaker_Development/c.rst index b03ddae..8bc5e80 100644 --- a/doc/sphinx/Pacemaker_Development/c.rst +++ b/doc/sphinx/Pacemaker_Development/c.rst @@ -752,12 +752,35 @@ Function names should be unique across the entire project, to allow for individual tracing via ``PCMK_trace_functions``, and make it easier to search code and follow detail logs. -A common function signature is a comparison function that returns 0 if its -arguments are equal for sorting purposes, -1 if the first argument should sort -first, and 1 is the second argument should sort first. Such a function should -have ``cmp`` in its name, to parallel ``strcmp()``; ``sort`` should only be -used in the names of functions that sort an entire list (typically using a -``cmp`` function). +.. _sort_func: + +Sorting +^^^^^^^ + +A function that sorts an entire list should have ``sort`` in its name. It sorts +elements using a :ref:`comparison ` function, which may be either +hard-coded or passed as an argument. + +.. _compare_func: + +Comparison +^^^^^^^^^^ + +A comparison function for :ref:`sorting ` should have ``cmp`` in its +name and should *not* have ``sort`` in its name. + +.. _constructor_func: + +Constructors +^^^^^^^^^^^^ + +A constructor creates a new dynamically allocated object. It may perform some +initialization procedure on the new object. + +* If the constructor always creates an independent object instance, its name + should include ``new``. +* If the constructor may add the new object to some existing object, its name + should include ``create``. Function Definitions @@ -832,6 +855,12 @@ messages and converting from one to another, can be found in Of course, functions may have return values that aren't success/failure indicators, such as a pointer, integer count, or bool. +:ref:`Comparison ` functions should return + +* a negative integer if the first argument should sort first +* 0 if its arguments are equal for sorting purposes +* a positive integer is the second argument should sort first + Public API Functions ____________________ @@ -880,6 +909,30 @@ __________________________________ * The convenience macros ``pcmk__plural_s()`` and ``pcmk__plural_alt()`` are handy when logging a word that may be singular or plural. +Log Levels +__________ + +When to use each log level: + +* **critical:** fatal error (usually something that would make a daemon exit) +* **error:** failure of something that affects the cluster (such as a resource + action, fencing action, etc.) or daemon operation +* **warning:** minor, potential, or recoverable failures (such as something + only affecting a daemon client, or invalid configuration that can be left to + default) +* **notice:** important successful events (such as a node joining or leaving, + resource action results, or configuration changes) +* **info:** events that would be helpful with troubleshooting (such as status + section updates or elections) +* **debug:** information that would be helpful for debugging code or complex + problems +* **trace:** like debug but for very noisy or low-level stuff + +By default, critical through notice are logged to the system log and detail +log, info is logged to the detail log only, and debug and trace are not logged +(if enabled, they go to the detail log only). + + Logging _______ @@ -912,6 +965,34 @@ using libqb's "extended logging" feature: pcmk_rc_str(rc), rc, id); +Assertion Logging +_________________ + +``CRM_ASSERT(expr)`` + If ``expr`` is false, this will call crm_err() with a "Triggered + fatal assert" message (with details), then abort execution. This should be + used for logic errors that should be impossible (such as a NULL function + argument where not accepted) and environmental errors that can't be handled + gracefully (for example, memory allocation failures, though returning + ``ENOMEM`` is often better). + +``CRM_LOG_ASSERT(expr)`` + If ``expr`` is false, this will generally log a message without aborting. If + the log level is below trace, it just calls ``crm_err()`` with a "Triggered + assert" message (with details). If the log level is trace, and the caller is + a daemon, then it will fork a child process in which to dump core, as well as + logging the message. If the log level is trace, and the caller is not a + daemon, then it will behave like ``CRM_ASSERT()`` (i.e. log and abort). This + should be used for logic or protocol errors that require no special handling. + +``CRM_CHECK(expr, failed_action)`` + If ``expr`` is false, behave like ``CRM_LOG_ASSERT(expr)`` (that is, log a + message and dump core if requested) then perform ``failed_action`` (which + must not contain ``continue``, ``break``, or ``errno``). This should be used + for logic or protocol errors that can be handled, usually by returning an + error status. + + Output ______ @@ -924,12 +1005,40 @@ A custom message can be defined with a unique string identifier, plus implementation functions for each supported format. The caller invokes the message using the identifier. The user selects the output format via ``--output-as``, and the output code automatically calls the appropriate -implementation function. +implementation function. Custom messages are useful when you want to output +messages that are more complex than a one-line error or informational message, +reproducible, and automatically handled by the output formatting system. +Custom messages can contain other custom messages. + +Custom message functions are implemented as follows: Start with the macro +``PCMK__OUTPUT_ARGS``, whose arguments are the message name, followed by the +arguments to the message. Then there is the function declaration, for which the +arguments are the pointer to the current output object, then a variable argument +list. + +To output a custom message, you first need to create, i.e. register, the custom +message that you want to output. Either call ``register_message``, which +registers a custom message at runtime, or make use of the collection of +predefined custom messages in ``fmt_functions``, which is defined in +``lib/pacemaker/pcmk_output.c``. Once you have the message to be outputted, +output it by calling ``message``. + +Note: The ``fmt_functions`` functions accommodate all of the output formats; +the default implementation accommodates any format that isn't explicitly +accommodated. The default output provides valid output for any output format, +but you may still want to implement a specific output, i.e. xml, text, or html. +The ``message`` function automatically knows which implementation to use, +because the ``pcmk__output_s`` contains this information. The interface (most importantly ``pcmk__output_t``) is declared in ``include/crm/common/output*h``. See the API comments and existing tools for -examples. +examples. +Some of its important member functions are ``err``, which formats error messages +and ``info``, which formats informational messages. Also, ``list_item``, +which formats list items, ``begin_list``, which starts lists, and ``end_list``, +which ends lists, are important because lists can be useful, yet differently +handled by the different output types. .. index:: single: Makefile.am diff --git a/doc/sphinx/Pacemaker_Development/components.rst b/doc/sphinx/Pacemaker_Development/components.rst index 5086fa8..ce6b36b 100644 --- a/doc/sphinx/Pacemaker_Development/components.rst +++ b/doc/sphinx/Pacemaker_Development/components.rst @@ -27,10 +27,10 @@ As might be expected, it has the most code of any of the daemons. Join sequence _____________ -Most daemons track their cluster peers using Corosync's membership and CPG -only. The controller additionally requires peers to `join`, which ensures they -are ready to be assigned tasks. Joining proceeds through a series of phases -referred to as the `join sequence` or `join process`. +Most daemons track their cluster peers using Corosync's membership and +:term:`CPG` only. The controller additionally requires peers to `join`, which +ensures they are ready to be assigned tasks. Joining proceeds through a series +of phases referred to as the `join sequence` or `join process`. A node's current join phase is tracked by the ``join`` member of ``crm_node_t`` (used in the peer cache). It is an ``enum crm_join_phase`` that (ideally) @@ -141,7 +141,7 @@ _______________ The function calls for a fencing request go something like this: -The local fencer receives the client's request via an IPC or messaging +The local fencer receives the client's request via an :term:`IPC` or messaging layer callback, which calls * ``stonith_command()``, which (for requests) calls @@ -199,8 +199,8 @@ __________________ Each ``STONITH_OP_FENCE`` request goes something like this: -The chosen peer fencer receives the ``STONITH_OP_FENCE`` request via IPC or -messaging layer callback, which calls: +The chosen peer fencer receives the ``STONITH_OP_FENCE`` request via +:term:`IPC` or messaging layer callback, which calls: * ``stonith_command()``, which (for requests) calls @@ -240,7 +240,7 @@ returns, and calls Fencing replies _______________ -The original fencer receives the ``STONITH_OP_FENCE`` reply via IPC or +The original fencer receives the ``STONITH_OP_FENCE`` reply via :term:`IPC` or messaging layer callback, which calls: * ``stonith_command()``, which (for replies) calls @@ -295,10 +295,10 @@ The purpose of the scheduler is to take a CIB as input and generate a transition graph (list of actions that need to be taken) as output. The controller invokes the scheduler by contacting the scheduler daemon via -local IPC. Tools such as ``crm_simulate``, ``crm_mon``, and ``crm_resource`` -can also invoke the scheduler, but do so by calling the library functions -directly. This allows them to run using a ``CIB_file`` without the cluster -needing to be active. +local :term:`IPC`. Tools such as ``crm_simulate``, ``crm_mon``, and +``crm_resource`` can also invoke the scheduler, but do so by calling the +library functions directly. This allows them to run using a ``CIB_file`` +without the cluster needing to be active. The main entry point for the scheduler code is ``lib/pacemaker/pcmk_scheduler.c:pcmk__schedule_actions()``. It sets @@ -315,7 +315,7 @@ defaults and calls a series of functions for the scheduling. Some key steps: the CIB status section. This is used to decide whether certain actions need to be done, such as deleting orphan resources, forcing a restart when a resource definition changes, etc. -* ``assign_resources()`` assigns resources to nodes. +* ``assign_resources()`` :term:`assigns ` resources to nodes. * ``schedule_resource_actions()`` schedules resource-specific actions (which might or might not end up in the final graph). * ``pcmk__apply_orderings()`` processes ordering constraints in order to modify @@ -364,7 +364,7 @@ Resources _________ ``pcmk_resource_t`` is the data object representing cluster resources. A -resource has a variant: primitive (a.k.a. native), group, clone, or bundle. +resource has a variant: :term:`primitive`, group, clone, or :term:`bundle`. The resource object has members for two sets of methods, ``resource_object_functions_t`` from the ``libpe_status`` public API, and @@ -374,9 +374,9 @@ The resource object has members for two sets of methods, The object functions have basic capabilities such as unpacking the resource XML, and determining the current or planned location of the resource. -The assignment functions have more obscure capabilities needed for scheduling, -such as processing location and ordering constraints. For example, -``pcmk__create_internal_constraints()`` simply calls the +The :term:`assignment ` functions have more obscure capabilities needed +for scheduling, such as processing location and ordering constraints. For +example, ``pcmk__create_internal_constraints()`` simply calls the ``internal_constraints()`` method for each top-level resource in the cluster. .. index:: @@ -385,9 +385,10 @@ such as processing location and ordering constraints. For example, Nodes _____ -Assignment of resources to nodes is done by choosing the node with the highest -score for a given resource. The scheduler does a bunch of processing to -generate the scores, then the actual assignment is straightforward. +:term:`Assignment ` of resources to nodes is done by choosing the node +with the highest :term:`score` for a given resource. The scheduler does a bunch +of processing to generate the scores, then the actual assignment is +straightforward. Node lists are frequently used. For example, ``pcmk_scheduler_t`` has a ``nodes`` member which is a list of all nodes in the cluster, and @@ -435,8 +436,8 @@ ___________ Colocation constraints come into play in these parts of the scheduler code: -* When sorting resources for assignment, so resources with highest node score - are assigned first (see ``cmp_resources()``) +* When sorting resources for :term:`assignment `, so resources with + highest node :term:`score` are assigned first (see ``cmp_resources()``) * When updating node scores for resource assigment or promotion priority * When assigning resources, so any resources to be colocated with can be assigned first, and so colocations affect where the resource is assigned @@ -449,7 +450,8 @@ The resource assignment functions have several methods related to colocations: dependent's allowed node scores (if called while resources are being assigned) or the dependent's priority (if called while choosing promotable instance roles). It can behave differently depending on whether it is being - called as the primary's method or as the dependent's method. + called as the :term:`primary's ` method or as the :term:`dependent's + ` method. * ``add_colocated_node_scores():`` This updates a table of nodes for a given colocation attribute and score. It goes through colocations involving a given resource, and updates the scores of the nodes in the table with the best diff --git a/doc/sphinx/Pacemaker_Development/documentation.rst b/doc/sphinx/Pacemaker_Development/documentation.rst new file mode 100644 index 0000000..6880bb0 --- /dev/null +++ b/doc/sphinx/Pacemaker_Development/documentation.rst @@ -0,0 +1,35 @@ +.. index:: + pair: documentation; guidelines + +Documentation Guidelines +------------------------ + +See `doc/README.md +`_ in the +source code repository for the kinds of documentation that Pacemaker provides. + +Books +##### + +The ``doc/sphinx`` subdirectory has a subdirectory for each book by title. Each +book's directory contains .rst files, which are the chapter sources in +`reStructuredText +`_ format (with +index.rst as the starting point). + +Once you have edited the sources as desired, run ``make`` in the ``doc`` or +``doc/sphinx`` directory to generate all the books locally. You can view the +results by pointing your web browser to (replacing PATH\_TO\_CHECKOUT and +BOOK\_TITLE appropriately): + + file:///PATH_TO_CHECKOUT/doc/sphinx/BOOK_TITLE/_build/html/index.html + +See the comments at the top of ``doc/sphinx/Makefile.am`` for options you can +use. + +Recommended practices: + +* Use ``list-table`` instead of ``table`` for tables +* When documenting newly added features and syntax, add "\*(since X.Y.Z)\*" + with the version introducing them. These comments can be removed when rolling + upgrades from that version are no longer supported. diff --git a/doc/sphinx/Pacemaker_Development/faq.rst b/doc/sphinx/Pacemaker_Development/faq.rst index e738b7d..b1b1e5a 100644 --- a/doc/sphinx/Pacemaker_Development/faq.rst +++ b/doc/sphinx/Pacemaker_Development/faq.rst @@ -32,21 +32,20 @@ Frequently Asked Questions :Q: What are the different Git branches and repositories used for? :A: * The `main branch `_ - is the primary branch used for development. - * The `2.1 branch `_ is - the current release branch. Normally, it does not receive any changes, but - during the release cycle for a new release, it will contain release - candidates. During the release cycle, certain bug fixes will go to the - 2.1 branch first (and be pulled into main later). + is used for all new development. + * The `3.0 `_ and + `2.1 `_ branches are + for the currently supported major and minor version release series. + Normally, they do not receive any changes, but during the release cycle + for a new release, they will contain release candidates. The main branch + is pulled into 3.0 just before the first release candidate of a new + release, but otherwise, separate pull requests must be submitted to + backport changes from the main branch into a release branch. * The `2.0 branch `_, `1.1 branch `_, and separate `1.0 repository `_ are frozen snapshots of earlier release series, no longer being developed. - * Messages will be posted to the - `developers@ClusterLabs.org `_ - mailing list during the release cycle, with instructions about which - branches to use when submitting requests. ---- @@ -163,9 +162,5 @@ Frequently Asked Questions :Q: What if I still have questions? :A: Ask on the - `developers@ClusterLabs.org `_ - mailing list for development-related questions, or on the - `users@ClusterLabs.org `_ - mailing list for general questions about using Pacemaker. - Developers often also hang out on the - [ClusterLabs IRC channel](https://wiki.clusterlabs.org/wiki/ClusterLabs_IRC_channel). + `ClusterLabs mailing lists + `_. diff --git a/doc/sphinx/Pacemaker_Development/general.rst b/doc/sphinx/Pacemaker_Development/general.rst index 9d9dcec..94015c9 100644 --- a/doc/sphinx/Pacemaker_Development/general.rst +++ b/doc/sphinx/Pacemaker_Development/general.rst @@ -38,3 +38,13 @@ may put more specific copyright notices in their commit messages if desired. `"Updating Copyright Notices" `_ for a more readable summary. + +Terminology +########### + +Pacemaker is extremely complex, and it helps to use terminology consistently +throughout documentation, symbol names and comments in code, and so forth. It +also helps to use natural language when practical instead of technical jargon +and acronyms. + +For specific recommendations, see the :ref:`glossary`. diff --git a/doc/sphinx/Pacemaker_Development/glossary.rst b/doc/sphinx/Pacemaker_Development/glossary.rst new file mode 100644 index 0000000..6f73e96 --- /dev/null +++ b/doc/sphinx/Pacemaker_Development/glossary.rst @@ -0,0 +1,84 @@ +.. index:: + single: glossary + +.. _glossary: + +Glossary +-------- + +.. glossary:: + + assign + In the scheduler, this refers to associating a resource with a node. Do + not use *allocate* for this purpose. + + bundle + The collective resource type associating instances of a container with + storage and networking. Do not use :term:`container` when referring to + the bundle as a whole. + + cluster layer + The layer of the :term:`cluster stack` that provides membership and + messaging capabilities (such as Corosync). + + cluster stack + The core components of a high-availability cluster: the + :term:`cluster layer` at the "bottom" of the stack, then Pacemaker, then + resource agents, and then the actual services managed by the cluster at + the "top" of the stack. Do not use *stack* for the cluster layer alone. + + CPG + Corosync Process Group. This is the messaging layer in a Corosync-based + cluster. Pacemaker daemons use CPG to communicate with their counterparts + on other nodes. + + container + This can mean either a container in the usual sense (whether as a + standalone resource or as part of a bundle), or as the container resource + meta-attribute (which does not necessarily reference a container in the + usual sense). + + dangling migration + Live migration of a resource consists of a **migrate_to** action on the + source node, followed by a **migrate_from** on the target node, followed + by a **stop** on the source node. If the **migrate_to** and + **migrate_from** have completed successfully, but the **stop** has not + yet been done, the migration is considered to be *dangling*. + + dependent + In colocation constraints, this refers to the resource located relative + to the :term:`primary` resource. Do not use *rh* or *right-hand* for this + purpose. + + IPC + Inter-process communication. In Pacemaker, clients send requests to + daemons using libqb IPC. + + message + This can refer to log messages, custom messages defined for a + **pcmk_output_t** object, or XML messages sent via :term:`CPG` or + :term:`IPC`. + + metadata + In the context of options and resource agents, this refers to OCF-style + metadata. Do not use a hyphen except when referring to the OCF-defined + action name *meta-data*. + + primary + In colocation constraints, this refers to the resource that the + :term:`dependent` resource is located relative to. Do not use *lh* or + *left-hand* for this purpose. + + primitive + The fundamental resource type in Pacemaker. Do not use *native* for this + purpose. + + score + An integer value constrained between **-PCMK_SCORE_INFINITY** and + **+PCMK_SCORE_INFINITY**. Certain strings (such as + **PCMK_VALUE_INFINITY**) parse as particular score values. Do not use + *weight* for this purpose. + + self-fencing + When a node is chosen to execute its own fencing. Do not use *suicide* + for this purpose. diff --git a/doc/sphinx/Pacemaker_Development/index.rst b/doc/sphinx/Pacemaker_Development/index.rst index cbe1499..a3f624f 100644 --- a/doc/sphinx/Pacemaker_Development/index.rst +++ b/doc/sphinx/Pacemaker_Development/index.rst @@ -20,11 +20,13 @@ Table of Contents faq general + documentation python c components helpers evolution + glossary Index ----- diff --git a/doc/sphinx/Pacemaker_Explained/alerts.rst b/doc/sphinx/Pacemaker_Explained/alerts.rst index 1d02187..f4cad72 100644 --- a/doc/sphinx/Pacemaker_Explained/alerts.rst +++ b/doc/sphinx/Pacemaker_Explained/alerts.rst @@ -1,3 +1,5 @@ +.. _alerts: + .. index:: single: alert single: resource; alert @@ -209,7 +211,28 @@ By default, an alert agent will be called for node events, fencing events, and resource events. An agent may choose to ignore certain types of events, but there is still the overhead of calling it for those events. To eliminate that overhead, you may select which types of events the agent should receive. - + +Alert filters are configured within a ``select`` element inside an ``alert`` +element. + +.. list-table:: **Possible alert filters** + :class: longtable + :widths: 1 3 + :header-rows: 1 + + * - Name + - Events alerted + * - select_nodes + - A node joins or leaves the cluster (whether at the cluster layer for + cluster nodes, or via a remote connection for Pacemaker Remote nodes). + * - select_fencing + - Fencing or unfencing of a node completes (whether successfully or not). + * - select_resources + - A resource action other than meta-data completes (whether successfully + or not). + * - select_attributes + - A transient attribute value update is sent to the CIB. + .. topic:: Alert configuration to receive only node events and fencing events .. code-block:: xml @@ -227,9 +250,6 @@ overhead, you may select which types of events the agent should receive. -The possible options within ``