summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 07:45:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 07:45:40 +0000
commit07d7f4cfa4b10de87a31b68191036ff446add675 (patch)
tree7162524d8aaf1aef62d2f4fa51f595ed113981ff /lib
parentAdding upstream version 2.1.6. (diff)
downloadpacemaker-0d560556df519c6626cda7660f843a815b3c227e.tar.xz
pacemaker-0d560556df519c6626cda7660f843a815b3c227e.zip
Adding upstream version 2.1.7.upstream/2.1.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile.am18
-rw-r--r--lib/cib/Makefile.am22
-rw-r--r--lib/cib/cib_attrs.c19
-rw-r--r--lib/cib/cib_client.c112
-rw-r--r--lib/cib/cib_file.c477
-rw-r--r--lib/cib/cib_native.c56
-rw-r--r--lib/cib/cib_ops.c228
-rw-r--r--lib/cib/cib_remote.c38
-rw-r--r--lib/cib/cib_utils.c511
-rw-r--r--lib/cluster/Makefile.am19
-rw-r--r--lib/cluster/cluster.c33
-rw-r--r--lib/cluster/cpg.c18
-rw-r--r--lib/cluster/crmcluster_private.h6
-rw-r--r--lib/cluster/membership.c85
-rw-r--r--lib/common/Makefile.am33
-rw-r--r--lib/common/acl.c41
-rw-r--r--lib/common/actions.c (renamed from lib/common/operations.c)28
-rw-r--r--lib/common/alerts.c87
-rw-r--r--lib/common/cib.c23
-rw-r--r--lib/common/crmcommon_private.h63
-rw-r--r--lib/common/digest.c4
-rw-r--r--lib/common/io.c8
-rw-r--r--lib/common/ipc_attrd.c37
-rw-r--r--lib/common/ipc_client.c461
-rw-r--r--lib/common/ipc_common.c2
-rw-r--r--lib/common/ipc_controld.c61
-rw-r--r--lib/common/ipc_pacemakerd.c4
-rw-r--r--lib/common/ipc_schedulerd.c4
-rw-r--r--lib/common/ipc_server.c48
-rw-r--r--lib/common/iso8601.c3
-rw-r--r--lib/common/logging.c151
-rw-r--r--lib/common/mainloop.c42
-rw-r--r--lib/common/mock.c26
-rw-r--r--lib/common/mock_private.h6
-rw-r--r--lib/common/nvpair.c92
-rw-r--r--lib/common/options.c19
-rw-r--r--lib/common/output_html.c4
-rw-r--r--lib/common/output_log.c130
-rw-r--r--lib/common/output_xml.c20
-rw-r--r--lib/common/patchset.c121
-rw-r--r--lib/common/patchset_display.c26
-rw-r--r--lib/common/remote.c39
-rw-r--r--lib/common/results.c133
-rw-r--r--lib/common/scheduler.c14
-rw-r--r--lib/common/schemas.c149
-rw-r--r--lib/common/strings.c16
-rw-r--r--lib/common/tests/Makefile.am4
-rw-r--r--lib/common/tests/acl/Makefile.am11
-rw-r--r--lib/common/tests/actions/Makefile.am (renamed from lib/common/tests/operations/Makefile.am)16
-rw-r--r--lib/common/tests/actions/copy_in_properties_test.c (renamed from lib/common/tests/operations/copy_in_properties_test.c)0
-rw-r--r--lib/common/tests/actions/expand_plus_plus_test.c (renamed from lib/common/tests/operations/expand_plus_plus_test.c)0
-rw-r--r--lib/common/tests/actions/fix_plus_plus_recursive_test.c (renamed from lib/common/tests/operations/fix_plus_plus_recursive_test.c)0
-rw-r--r--lib/common/tests/actions/parse_op_key_test.c (renamed from lib/common/tests/operations/parse_op_key_test.c)0
-rw-r--r--lib/common/tests/actions/pcmk_is_probe_test.c (renamed from lib/common/tests/operations/pcmk_is_probe_test.c)0
-rw-r--r--lib/common/tests/actions/pcmk_xe_is_probe_test.c (renamed from lib/common/tests/operations/pcmk_xe_is_probe_test.c)0
-rw-r--r--lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c (renamed from lib/common/tests/operations/pcmk_xe_mask_probe_failure_test.c)0
-rw-r--r--lib/common/tests/agents/Makefile.am12
-rw-r--r--lib/common/tests/agents/crm_parse_agent_spec_test.c18
-rw-r--r--lib/common/tests/cmdline/Makefile.am5
-rw-r--r--lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c13
-rw-r--r--lib/common/tests/cmdline/pcmk__new_common_args_test.c62
-rw-r--r--lib/common/tests/flags/Makefile.am11
-rw-r--r--lib/common/tests/io/Makefile.am7
-rw-r--r--lib/common/tests/lists/Makefile.am9
-rw-r--r--lib/common/tests/nvpair/Makefile.am8
-rw-r--r--lib/common/tests/options/Makefile.am9
-rw-r--r--lib/common/tests/options/pcmk__set_env_option_test.c57
-rw-r--r--lib/common/tests/output/Makefile.am20
-rw-r--r--lib/common/tests/output/pcmk__output_new_test.c8
-rw-r--r--lib/common/tests/results/Makefile.am4
-rw-r--r--lib/common/tests/results/pcmk__results_test.c8
-rw-r--r--lib/common/tests/scores/Makefile.am9
-rw-r--r--lib/common/tests/scores/pcmk__add_scores_test.c4
-rw-r--r--lib/common/tests/strings/Makefile.am54
-rw-r--r--lib/common/tests/strings/pcmk__compress_test.c2
-rw-r--r--lib/common/tests/strings/pcmk__guint_from_hash_test.c4
-rw-r--r--lib/common/tests/strings/pcmk__scan_ll_test.c64
-rw-r--r--lib/common/tests/utils/Makefile.am22
-rw-r--r--lib/common/tests/utils/pcmk__fail_attr_name_test.c36
-rw-r--r--lib/common/tests/utils/pcmk__failcount_name_test.c35
-rw-r--r--lib/common/tests/utils/pcmk__lastfailure_name_test.c35
-rw-r--r--lib/common/tests/xml/Makefile.am6
-rw-r--r--lib/common/tests/xml/pcmk__xe_foreach_child_test.c13
-rw-r--r--lib/common/tests/xpath/Makefile.am4
-rw-r--r--lib/common/watchdog.c13
-rw-r--r--lib/common/xml.c527
-rw-r--r--lib/common/xml_attr.c84
-rw-r--r--lib/common/xml_display.c18
-rw-r--r--lib/common/xpath.c13
-rw-r--r--lib/fencing/Makefile.am12
-rw-r--r--lib/fencing/st_client.c50
-rw-r--r--lib/fencing/st_lha.c13
-rw-r--r--lib/fencing/st_rhcs.c15
-rw-r--r--lib/lrmd/Makefile.am17
-rw-r--r--lib/lrmd/lrmd_alerts.c4
-rw-r--r--lib/lrmd/lrmd_client.c67
-rw-r--r--lib/pacemaker/Makefile.am20
-rw-r--r--lib/pacemaker/libpacemaker_private.h712
-rw-r--r--lib/pacemaker/pcmk_acl.c142
-rw-r--r--lib/pacemaker/pcmk_agents.c243
-rw-r--r--lib/pacemaker/pcmk_cluster_queries.c23
-rw-r--r--lib/pacemaker/pcmk_fence.c59
-rw-r--r--lib/pacemaker/pcmk_graph_consumer.c52
-rw-r--r--lib/pacemaker/pcmk_graph_logging.c15
-rw-r--r--lib/pacemaker/pcmk_graph_producer.c420
-rw-r--r--lib/pacemaker/pcmk_injections.c60
-rw-r--r--lib/pacemaker/pcmk_output.c512
-rw-r--r--lib/pacemaker/pcmk_resource.c30
-rw-r--r--lib/pacemaker/pcmk_rule.c67
-rw-r--r--lib/pacemaker/pcmk_sched_actions.c860
-rw-r--r--lib/pacemaker/pcmk_sched_bundle.c1422
-rw-r--r--lib/pacemaker/pcmk_sched_clone.c684
-rw-r--r--lib/pacemaker/pcmk_sched_colocation.c1266
-rw-r--r--lib/pacemaker/pcmk_sched_constraints.c199
-rw-r--r--lib/pacemaker/pcmk_sched_fencing.c181
-rw-r--r--lib/pacemaker/pcmk_sched_group.c581
-rw-r--r--lib/pacemaker/pcmk_sched_instances.c738
-rw-r--r--lib/pacemaker/pcmk_sched_location.c216
-rw-r--r--lib/pacemaker/pcmk_sched_migration.c220
-rw-r--r--lib/pacemaker/pcmk_sched_nodes.c221
-rw-r--r--lib/pacemaker/pcmk_sched_ordering.c459
-rw-r--r--lib/pacemaker/pcmk_sched_primitive.c641
-rw-r--r--lib/pacemaker/pcmk_sched_probes.c350
-rw-r--r--lib/pacemaker/pcmk_sched_promotable.c443
-rw-r--r--lib/pacemaker/pcmk_sched_recurring.c240
-rw-r--r--lib/pacemaker/pcmk_sched_remote.c252
-rw-r--r--lib/pacemaker/pcmk_sched_resource.c517
-rw-r--r--lib/pacemaker/pcmk_sched_tickets.c142
-rw-r--r--lib/pacemaker/pcmk_sched_utilization.c102
-rw-r--r--lib/pacemaker/pcmk_scheduler.c421
-rw-r--r--lib/pacemaker/pcmk_simulate.c269
-rw-r--r--lib/pacemaker/pcmk_status.c73
-rw-r--r--lib/pengine/Makefile.am52
-rw-r--r--lib/pengine/bundle.c415
-rw-r--r--lib/pengine/clone.c428
-rw-r--r--lib/pengine/common.c339
-rw-r--r--lib/pengine/complex.c338
-rw-r--r--lib/pengine/failcounts.c247
-rw-r--r--lib/pengine/group.c102
-rw-r--r--lib/pengine/native.c335
-rw-r--r--lib/pengine/pe_actions.c1303
-rw-r--r--lib/pengine/pe_digest.c162
-rw-r--r--lib/pengine/pe_health.c16
-rw-r--r--lib/pengine/pe_notif.c226
-rw-r--r--lib/pengine/pe_output.c552
-rw-r--r--lib/pengine/pe_status_private.h83
-rw-r--r--lib/pengine/remote.c100
-rw-r--r--lib/pengine/rules.c47
-rw-r--r--lib/pengine/rules_alerts.c13
-rw-r--r--lib/pengine/status.c268
-rw-r--r--lib/pengine/tags.c37
-rw-r--r--lib/pengine/tests/Makefile.am15
-rw-r--r--lib/pengine/tests/native/Makefile.am4
-rw-r--r--lib/pengine/tests/native/native_find_rsc_test.c724
-rw-r--r--lib/pengine/tests/native/pe_base_name_eq_test.c31
-rw-r--r--lib/pengine/tests/status/Makefile.am12
-rw-r--r--lib/pengine/tests/status/pe_find_node_any_test.c6
-rw-r--r--lib/pengine/tests/status/pe_find_node_id_test.c6
-rw-r--r--lib/pengine/tests/status/pe_find_node_test.c6
-rw-r--r--lib/pengine/tests/status/pe_new_working_set_test.c10
-rw-r--r--lib/pengine/tests/status/set_working_set_defaults_test.c27
-rw-r--r--lib/pengine/tests/utils/Makefile.am5
-rw-r--r--lib/pengine/tests/utils/pe__cmp_node_name_test.c6
-rw-r--r--lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c4
-rw-r--r--lib/pengine/unpack.c1794
-rw-r--r--lib/pengine/utils.c331
-rw-r--r--lib/pengine/variant.h91
-rw-r--r--lib/services/Makefile.am13
-rw-r--r--lib/services/dbus.c2
-rw-r--r--lib/services/services.c8
-rw-r--r--lib/services/services_linux.c22
-rw-r--r--lib/services/services_lsb.c5
-rw-r--r--lib/services/services_nagios.c4
-rw-r--r--lib/services/systemd.c26
-rw-r--r--lib/services/upstart.c21
175 files changed, 15136 insertions, 10892 deletions
diff --git a/lib/Makefile.am b/lib/Makefile.am
index ed5bfa3..52cf974 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2003-2021 the Pacemaker project contributors
+# Copyright 2003-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -8,7 +8,11 @@
#
MAINTAINERCLEANFILES = Makefile.in
-LIBS = cib lrmd service fencing cluster
+LIBS = cib \
+ lrmd \
+ service \
+ fencing \
+ cluster
pkgconfig_DATA = $(LIBS:%=pacemaker-%.pc) \
libpacemaker.pc \
@@ -18,4 +22,12 @@ pkgconfig_DATA = $(LIBS:%=pacemaker-%.pc) \
EXTRA_DIST = $(pkgconfig_DATA:%=%.in)
-SUBDIRS = gnu common pengine cib services fencing lrmd cluster pacemaker
+SUBDIRS = gnu \
+ common \
+ pengine \
+ cib \
+ services \
+ fencing \
+ lrmd \
+ cluster \
+ pacemaker
diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am
index 721fca1..a74c4b1 100644
--- a/lib/cib/Makefile.am
+++ b/lib/cib/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2018 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,18 +11,20 @@ include $(top_srcdir)/mk/common.mk
## libraries
lib_LTLIBRARIES = libcib.la
-## SOURCES
-libcib_la_SOURCES = cib_ops.c cib_utils.c cib_client.c cib_native.c cib_attrs.c
-libcib_la_SOURCES += cib_file.c cib_remote.c
+## Library sources (*must* use += format for bumplibs)
+libcib_la_SOURCES = cib_attrs.c
+libcib_la_SOURCES += cib_client.c
+libcib_la_SOURCES += cib_file.c
+libcib_la_SOURCES += cib_native.c
+libcib_la_SOURCES += cib_ops.c
+libcib_la_SOURCES += cib_remote.c
+libcib_la_SOURCES += cib_utils.c
-libcib_la_LDFLAGS = -version-info 31:0:4
+libcib_la_LDFLAGS = -version-info 32:0:5
libcib_la_CPPFLAGS = -I$(top_srcdir) $(AM_CPPFLAGS)
libcib_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libcib_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcib_la_LIBADD = $(top_builddir)/lib/pengine/libpe_rules.la \
- $(top_builddir)/lib/common/libcrmcommon.la
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
+libcib_la_LIBADD = $(top_builddir)/lib/pengine/libpe_rules.la \
+ $(top_builddir)/lib/common/libcrmcommon.la
diff --git a/lib/cib/cib_attrs.c b/lib/cib/cib_attrs.c
index 5f3a722..11629b8 100644
--- a/lib/cib/cib_attrs.c
+++ b/lib/cib/cib_attrs.c
@@ -152,16 +152,15 @@ find_attr(cib_t *cib, const char *section, const char *node_uuid,
static int
handle_multiples(pcmk__output_t *out, xmlNode *search, const char *attr_name)
{
- if (xml_has_children(search)) {
+ if ((search != NULL) && (search->children != NULL)) {
xmlNode *child = NULL;
- out->info(out, "Multiple attributes match name=%s", attr_name);
+ out->info(out, "Multiple attributes match name=%s", attr_name);
for (child = pcmk__xml_first_child(search); child != NULL;
child = pcmk__xml_next(child)) {
out->info(out, " Value: %s \t(id=%s)",
crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
}
-
return ENOTUNIQ;
} else {
@@ -184,9 +183,9 @@ cib__update_node_attr(pcmk__output_t *out, cib_t *cib, int call_options, const c
char *local_attr_id = NULL;
char *local_set_name = NULL;
- CRM_CHECK(section != NULL, return EINVAL);
- CRM_CHECK(attr_value != NULL, return EINVAL);
- CRM_CHECK(attr_name != NULL || attr_id != NULL, return EINVAL);
+ CRM_CHECK((out != NULL) && (cib != NULL) && (section != NULL)
+ && ((attr_id != NULL) || (attr_name != NULL))
+ && (attr_value != NULL), return EINVAL);
rc = find_attr(cib, section, node_uuid, set_type, set_name, attr_id,
attr_name, user_name, &xml_search);
@@ -360,7 +359,7 @@ cib__get_node_attrs(pcmk__output_t *out, cib_t *cib, const char *section,
crm_trace("Query failed for attribute %s (section=%s node=%s set=%s): %s",
pcmk__s(attr_name, "with unspecified name"),
section, pcmk__s(set_name, "<null>"),
- pcmk__s(node_uuid, "<null>"), pcmk_strerror(rc));
+ pcmk__s(node_uuid, "<null>"), pcmk_rc_str(rc));
}
return rc;
@@ -487,7 +486,7 @@ read_attr_delegate(cib_t *cib, const char *section, const char *node_uuid,
attr_id, attr_name, user_name, &result);
if (rc == pcmk_rc_ok) {
- if (!xml_has_children(result)) {
+ if (result->children == NULL) {
pcmk__str_update(attr_value, crm_element_value(result, XML_NVPAIR_ATTR_VALUE));
} else {
rc = ENOTUNIQ;
@@ -677,9 +676,7 @@ query_node_uname(cib_t * the_cib, const char *uuid, char **uname)
}
xml_obj = fragment;
- CRM_CHECK(pcmk__str_eq(crm_element_name(xml_obj), XML_CIB_TAG_NODES, pcmk__str_casei),
- return -ENOMSG);
- CRM_ASSERT(xml_obj != NULL);
+ CRM_CHECK(pcmk__xe_is(xml_obj, XML_CIB_TAG_NODES), return -ENOMSG);
crm_log_xml_trace(xml_obj, "Result section");
rc = -ENXIO;
diff --git a/lib/cib/cib_client.c b/lib/cib/cib_client.c
index 2d179e0..32e1f83 100644
--- a/lib/cib/cib_client.c
+++ b/lib/cib/cib_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -253,14 +253,15 @@ cib_client_noop(cib_t * cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_NOOP, NULL, NULL, NULL, NULL,
- call_options, NULL);
+ call_options, cib->user);
}
static int
cib_client_ping(cib_t * cib, xmlNode ** output_data, int call_options)
{
op_common(cib);
- return cib_internal_op(cib, CRM_OP_PING, NULL, NULL, NULL, output_data, call_options, NULL);
+ return cib_internal_op(cib, CRM_OP_PING, NULL, NULL, NULL, output_data,
+ call_options, cib->user);
}
static int
@@ -275,7 +276,7 @@ cib_client_query_from(cib_t * cib, const char *host, const char *section,
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_QUERY, host, section, NULL,
- output_data, call_options, NULL);
+ output_data, call_options, cib->user);
}
static int
@@ -283,7 +284,7 @@ is_primary(cib_t *cib)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_IS_PRIMARY, NULL, NULL, NULL,
- NULL, cib_scope_local|cib_sync_call, NULL);
+ NULL, cib_scope_local|cib_sync_call, cib->user);
}
static int
@@ -291,7 +292,7 @@ set_secondary(cib_t *cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_SECONDARY, NULL, NULL, NULL,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -306,7 +307,7 @@ set_primary(cib_t *cib, int call_options)
op_common(cib);
crm_trace("Adding cib_scope_local to options");
return cib_internal_op(cib, PCMK__CIB_REQUEST_PRIMARY, NULL, NULL, NULL,
- NULL, call_options|cib_scope_local, NULL);
+ NULL, call_options|cib_scope_local, cib->user);
}
static int
@@ -314,7 +315,7 @@ cib_client_bump_epoch(cib_t * cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_BUMP, NULL, NULL, NULL, NULL,
- call_options, NULL);
+ call_options, cib->user);
}
static int
@@ -322,7 +323,7 @@ cib_client_upgrade(cib_t * cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_UPGRADE, NULL, NULL, NULL,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -336,7 +337,7 @@ cib_client_sync_from(cib_t * cib, const char *host, const char *section, int cal
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_SYNC_TO_ALL, host, section,
- NULL, NULL, call_options, NULL);
+ NULL, NULL, call_options, cib->user);
}
static int
@@ -344,7 +345,7 @@ cib_client_create(cib_t * cib, const char *section, xmlNode * data, int call_opt
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_CREATE, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -352,7 +353,7 @@ cib_client_modify(cib_t * cib, const char *section, xmlNode * data, int call_opt
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_MODIFY, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -360,7 +361,7 @@ cib_client_replace(cib_t * cib, const char *section, xmlNode * data, int call_op
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_REPLACE, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -368,7 +369,7 @@ cib_client_delete(cib_t * cib, const char *section, xmlNode * data, int call_opt
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_DELETE, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -376,7 +377,7 @@ cib_client_delete_absolute(cib_t * cib, const char *section, xmlNode * data, int
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_ABS_DELETE, NULL, section,
- data, NULL, call_options, NULL);
+ data, NULL, call_options, cib->user);
}
static int
@@ -384,7 +385,76 @@ cib_client_erase(cib_t * cib, xmlNode ** output_data, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_ERASE, NULL, NULL, NULL,
- output_data, call_options, NULL);
+ output_data, call_options, cib->user);
+}
+
+static int
+cib_client_init_transaction(cib_t *cib)
+{
+ int rc = pcmk_rc_ok;
+
+ op_common(cib);
+
+ if (cib->transaction != NULL) {
+ // A client can have at most one transaction at a time
+ rc = pcmk_rc_already;
+ }
+
+ if (rc == pcmk_rc_ok) {
+ cib->transaction = create_xml_node(NULL, T_CIB_TRANSACTION);
+ if (cib->transaction == NULL) {
+ rc = ENOMEM;
+ }
+ }
+
+ if (rc != pcmk_rc_ok) {
+ const char *client_id = NULL;
+
+ cib->cmds->client_id(cib, NULL, &client_id);
+ crm_err("Failed to initialize CIB transaction for client %s: %s",
+ client_id, pcmk_rc_str(rc));
+ }
+ return pcmk_rc2legacy(rc);
+}
+
+static int
+cib_client_end_transaction(cib_t *cib, bool commit, int call_options)
+{
+ const char *client_id = NULL;
+ int rc = pcmk_ok;
+
+ op_common(cib);
+ cib->cmds->client_id(cib, NULL, &client_id);
+ client_id = pcmk__s(client_id, "(unidentified)");
+
+ if (commit) {
+ if (cib->transaction == NULL) {
+ rc = pcmk_rc_no_transaction;
+
+ crm_err("Failed to commit transaction for CIB client %s: %s",
+ client_id, pcmk_rc_str(rc));
+ return pcmk_rc2legacy(rc);
+ }
+ rc = cib_internal_op(cib, PCMK__CIB_REQUEST_COMMIT_TRANSACT, NULL, NULL,
+ cib->transaction, NULL, call_options, cib->user);
+
+ } else {
+ // Discard always succeeds
+ if (cib->transaction != NULL) {
+ crm_trace("Discarded transaction for CIB client %s", client_id);
+ } else {
+ crm_trace("No transaction found for CIB client %s", client_id);
+ }
+ }
+ free_xml(cib->transaction);
+ cib->transaction = NULL;
+ return rc;
+}
+
+static void
+cib_client_set_user(cib_t *cib, const char *user)
+{
+ pcmk__str_update(&(cib->user), user);
}
static void
@@ -622,13 +692,15 @@ cib_new_variant(void)
return NULL;
}
+ // Deprecated method
new_cib->cmds->set_op_callback = cib_client_set_op_callback;
+
new_cib->cmds->add_notify_callback = cib_client_add_notify_callback;
new_cib->cmds->del_notify_callback = cib_client_del_notify_callback;
new_cib->cmds->register_callback = cib_client_register_callback;
new_cib->cmds->register_callback_full = cib_client_register_callback_full;
- new_cib->cmds->noop = cib_client_noop;
+ new_cib->cmds->noop = cib_client_noop; // Deprecated method
new_cib->cmds->ping = cib_client_ping;
new_cib->cmds->query = cib_client_query;
new_cib->cmds->sync = cib_client_sync;
@@ -656,8 +728,14 @@ cib_new_variant(void)
new_cib->cmds->remove = cib_client_delete;
new_cib->cmds->erase = cib_client_erase;
+ // Deprecated method
new_cib->cmds->delete_absolute = cib_client_delete_absolute;
+ new_cib->cmds->init_transaction = cib_client_init_transaction;
+ new_cib->cmds->end_transaction = cib_client_end_transaction;
+
+ new_cib->cmds->set_user = cib_client_set_user;
+
return new_cib;
}
diff --git a/lib/cib/cib_file.c b/lib/cib/cib_file.c
index 7d05965..a279823 100644
--- a/lib/cib/cib_file.c
+++ b/lib/cib/cib_file.c
@@ -37,35 +37,100 @@
#define CIB_LIVE_NAME CIB_SERIES ".xml"
+// key: client ID (const char *) -> value: client (cib_t *)
+static GHashTable *client_table = NULL;
+
enum cib_file_flags {
cib_file_flag_dirty = (1 << 0),
cib_file_flag_live = (1 << 1),
};
typedef struct cib_file_opaque_s {
- uint32_t flags; // Group of enum cib_file_flags
+ char *id;
char *filename;
+ uint32_t flags; // Group of enum cib_file_flags
+ xmlNode *cib_xml;
} cib_file_opaque_t;
-struct cib_func_entry {
- const char *op;
- gboolean read_only;
- cib_op_t fn;
-};
+static int cib_file_process_commit_transaction(const char *op, int options,
+ const char *section,
+ xmlNode *req, xmlNode *input,
+ xmlNode *existing_cib,
+ xmlNode **result_cib,
+ xmlNode **answer);
-static struct cib_func_entry cib_file_ops[] = {
- { PCMK__CIB_REQUEST_QUERY, TRUE, cib_process_query },
- { PCMK__CIB_REQUEST_MODIFY, FALSE, cib_process_modify },
- { PCMK__CIB_REQUEST_APPLY_PATCH, FALSE, cib_process_diff },
- { PCMK__CIB_REQUEST_BUMP, FALSE, cib_process_bump },
- { PCMK__CIB_REQUEST_REPLACE, FALSE, cib_process_replace },
- { PCMK__CIB_REQUEST_CREATE, FALSE, cib_process_create },
- { PCMK__CIB_REQUEST_DELETE, FALSE, cib_process_delete },
- { PCMK__CIB_REQUEST_ERASE, FALSE, cib_process_erase },
- { PCMK__CIB_REQUEST_UPGRADE, FALSE, cib_process_upgrade },
-};
+/*!
+ * \internal
+ * \brief Add a CIB file client to client table
+ *
+ * \param[in] cib CIB client
+ */
+static void
+register_client(const cib_t *cib)
+{
+ cib_file_opaque_t *private = cib->variant_opaque;
+
+ if (client_table == NULL) {
+ client_table = pcmk__strkey_table(NULL, NULL);
+ }
+ g_hash_table_insert(client_table, private->id, (gpointer) cib);
+}
+
+/*!
+ * \internal
+ * \brief Remove a CIB file client from client table
+ *
+ * \param[in] cib CIB client
+ */
+static void
+unregister_client(const cib_t *cib)
+{
+ cib_file_opaque_t *private = cib->variant_opaque;
-static xmlNode *in_mem_cib = NULL;
+ if (client_table == NULL) {
+ return;
+ }
+
+ g_hash_table_remove(client_table, private->id);
+
+ /* @COMPAT: Add to crm_exit() when libcib and libcrmcommon are merged,
+ * instead of destroying the client table when there are no more clients.
+ */
+ if (g_hash_table_size(client_table) == 0) {
+ g_hash_table_destroy(client_table);
+ client_table = NULL;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Look up a CIB file client by its ID
+ *
+ * \param[in] client_id CIB client ID
+ *
+ * \return CIB client with matching ID if found, or \p NULL otherwise
+ */
+static cib_t *
+get_client(const char *client_id)
+{
+ if (client_table == NULL) {
+ return NULL;
+ }
+ return g_hash_table_lookup(client_table, (gpointer) client_id);
+}
+
+static const cib__op_fn_t cib_op_functions[] = {
+ [cib__op_apply_patch] = cib_process_diff,
+ [cib__op_bump] = cib_process_bump,
+ [cib__op_commit_transact] = cib_file_process_commit_transaction,
+ [cib__op_create] = cib_process_create,
+ [cib__op_delete] = cib_process_delete,
+ [cib__op_erase] = cib_process_erase,
+ [cib__op_modify] = cib_process_modify,
+ [cib__op_query] = cib_process_query,
+ [cib__op_replace] = cib_process_replace,
+ [cib__op_upgrade] = cib_process_upgrade,
+};
/* cib_file_backup() and cib_file_write_with_digest() need to chown the
* written files only in limited circumstances, so these variables allow
@@ -95,6 +160,27 @@ static gboolean cib_do_chown = FALSE;
/*!
* \internal
+ * \brief Get the function that performs a given CIB file operation
+ *
+ * \param[in] operation Operation whose function to look up
+ *
+ * \return Function that performs \p operation for a CIB file client
+ */
+static cib__op_fn_t
+file_get_op_function(const cib__operation_t *operation)
+{
+ enum cib__op_type type = operation->type;
+
+ CRM_ASSERT(type >= 0);
+
+ if (type >= PCMK__NELEM(cib_op_functions)) {
+ return NULL;
+ }
+ return cib_op_functions[type];
+}
+
+/*!
+ * \internal
* \brief Check whether a file is the live CIB
*
* \param[in] filename Name of file to check
@@ -125,114 +211,148 @@ cib_file_is_live(const char *filename)
}
static int
-cib_file_perform_op_delegate(cib_t *cib, const char *op, const char *host,
- const char *section, xmlNode *data,
- xmlNode **output_data, int call_options,
- const char *user_name)
+cib_file_process_request(cib_t *cib, xmlNode *request, xmlNode **output)
{
int rc = pcmk_ok;
- char *effective_user = NULL;
- gboolean query = FALSE;
- gboolean changed = FALSE;
- xmlNode *request = NULL;
- xmlNode *output = NULL;
- xmlNode *cib_diff = NULL;
+ const cib__operation_t *operation = NULL;
+ cib__op_fn_t op_function = NULL;
+
+ int call_id = 0;
+ int call_options = cib_none;
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *section = crm_element_value(request, F_CIB_SECTION);
+ xmlNode *data = get_message_xml(request, F_CIB_CALLDATA);
+
+ bool changed = false;
+ bool read_only = false;
xmlNode *result_cib = NULL;
- cib_op_t *fn = NULL;
- int lpc = 0;
- static int max_msg_types = PCMK__NELEM(cib_file_ops);
+ xmlNode *cib_diff = NULL;
+
cib_file_opaque_t *private = cib->variant_opaque;
- crm_info("Handling %s operation for %s as %s",
- (op? op : "invalid"), (section? section : "entire CIB"),
- (user_name? user_name : "default user"));
+ // We error checked these in callers
+ cib__get_operation(op, &operation);
+ op_function = file_get_op_function(operation);
- cib__set_call_options(call_options, "file operation",
- cib_no_mtime|cib_inhibit_bcast|cib_scope_local);
+ crm_element_value_int(request, F_CIB_CALLID, &call_id);
+ crm_element_value_int(request, F_CIB_CALLOPTS, &call_options);
- if (cib->state == cib_disconnected) {
- return -ENOTCONN;
- }
+ read_only = !pcmk_is_set(operation->flags, cib__op_attr_modifies);
- if (output_data != NULL) {
- *output_data = NULL;
+ // Mirror the logic in prepare_input() in pacemaker-based
+ if ((section != NULL) && pcmk__xe_is(data, XML_TAG_CIB)) {
+
+ data = pcmk_find_cib_element(data, section);
}
- if (op == NULL) {
- return -EINVAL;
+ rc = cib_perform_op(op, call_options, op_function, read_only, section,
+ request, data, true, &changed, &private->cib_xml,
+ &result_cib, &cib_diff, output);
+
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ /* The rest of the logic applies only to the transaction as a whole, not
+ * to individual requests.
+ */
+ goto done;
}
- for (lpc = 0; lpc < max_msg_types; lpc++) {
- if (pcmk__str_eq(op, cib_file_ops[lpc].op, pcmk__str_casei)) {
- fn = &(cib_file_ops[lpc].fn);
- query = cib_file_ops[lpc].read_only;
- break;
+ if (rc == -pcmk_err_schema_validation) {
+ validate_xml_verbose(result_cib);
+
+ } else if ((rc == pcmk_ok) && !read_only) {
+ pcmk__log_xml_patchset(LOG_DEBUG, cib_diff);
+
+ if (result_cib != private->cib_xml) {
+ free_xml(private->cib_xml);
+ private->cib_xml = result_cib;
}
+ cib_set_file_flags(private, cib_file_flag_dirty);
}
- if (fn == NULL) {
- return -EPROTONOSUPPORT;
+ // Global operation callback (deprecated)
+ if (cib->op_callback != NULL) {
+ cib->op_callback(NULL, call_id, rc, *output);
}
- cib->call_id++;
- request = cib_create_op(cib->call_id, op, host, section, data, call_options,
- user_name);
- if(user_name) {
- crm_xml_add(request, XML_ACL_TAG_USER, user_name);
+done:
+ if ((result_cib != private->cib_xml) && (result_cib != *output)) {
+ free_xml(result_cib);
}
+ free_xml(cib_diff);
+ return rc;
+}
- /* Mirror the logic in cib_prepare_common() */
- if (section != NULL && data != NULL && pcmk__str_eq(crm_element_name(data), XML_TAG_CIB, pcmk__str_none)) {
- data = pcmk_find_cib_element(data, section);
- }
+static int
+cib_file_perform_op_delegate(cib_t *cib, const char *op, const char *host,
+ const char *section, xmlNode *data,
+ xmlNode **output_data, int call_options,
+ const char *user_name)
+{
+ int rc = pcmk_ok;
+ xmlNode *request = NULL;
+ xmlNode *output = NULL;
+ cib_file_opaque_t *private = cib->variant_opaque;
- rc = cib_perform_op(op, call_options, fn, query,
- section, request, data, TRUE, &changed, in_mem_cib, &result_cib, &cib_diff,
- &output);
+ const cib__operation_t *operation = NULL;
- free_xml(request);
- if (rc == -pcmk_err_schema_validation) {
- validate_xml_verbose(result_cib);
+ crm_info("Handling %s operation for %s as %s",
+ pcmk__s(op, "invalid"), pcmk__s(section, "entire CIB"),
+ pcmk__s(user_name, "default user"));
+
+ if (output_data != NULL) {
+ *output_data = NULL;
}
- if (rc != pcmk_ok) {
- free_xml(result_cib);
+ if (cib->state == cib_disconnected) {
+ return -ENOTCONN;
+ }
- } else if (query == FALSE) {
- pcmk__output_t *out = NULL;
+ rc = cib__get_operation(op, &operation);
+ rc = pcmk_rc2legacy(rc);
+ if (rc != pcmk_ok) {
+ // @COMPAT: At compatibility break, use rc directly
+ return -EPROTONOSUPPORT;
+ }
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, goto done);
+ if (file_get_op_function(operation) == NULL) {
+ // @COMPAT: At compatibility break, use EOPNOTSUPP
+ crm_err("Operation %s is not supported by CIB file clients", op);
+ return -EPROTONOSUPPORT;
+ }
- pcmk__output_set_log_level(out, LOG_DEBUG);
- rc = out->message(out, "xml-patchset", cib_diff);
- out->finish(out, pcmk_rc2exitc(rc), true, NULL);
- pcmk__output_free(out);
- rc = pcmk_ok;
+ cib__set_call_options(call_options, "file operation", cib_no_mtime);
- free_xml(in_mem_cib);
- in_mem_cib = result_cib;
- cib_set_file_flags(private, cib_file_flag_dirty);
+ rc = cib__create_op(cib, op, host, section, data, call_options, user_name,
+ NULL, &request);
+ if (rc != pcmk_ok) {
+ return rc;
}
+ crm_xml_add(request, XML_ACL_TAG_USER, user_name);
+ crm_xml_add(request, F_CIB_CLIENTID, private->id);
- if (cib->op_callback != NULL) {
- cib->op_callback(NULL, cib->call_id, rc, output);
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ rc = cib__extend_transaction(cib, request);
+ goto done;
}
+ rc = cib_file_process_request(cib, request, &output);
+
if ((output_data != NULL) && (output != NULL)) {
- *output_data = (output == in_mem_cib)? copy_xml(output) : output;
+ if (output->doc == private->cib_xml->doc) {
+ *output_data = copy_xml(output);
+ } else {
+ *output_data = output;
+ }
}
done:
- free_xml(cib_diff);
+ if ((output != NULL)
+ && (output->doc != private->cib_xml->doc)
+ && ((output_data == NULL) || (output != *output_data))) {
- if ((output_data == NULL) && (output != in_mem_cib)) {
- /* Don't free output if we're still using it. (output_data != NULL)
- * means we may have assigned *output_data = output above.
- */
free_xml(output);
}
- free(effective_user);
+ free_xml(request);
return rc;
}
@@ -240,7 +360,8 @@ done:
* \internal
* \brief Read CIB from disk and validate it against XML schema
*
- * \param[in] filename Name of file to read CIB from
+ * \param[in] filename Name of file to read CIB from
+ * \param[out] output Where to store the read CIB XML
*
* \return pcmk_ok on success,
* -ENXIO if file does not exist (or stat() otherwise fails), or
@@ -251,7 +372,7 @@ done:
* because some callers might not need to write.
*/
static int
-load_file_cib(const char *filename)
+load_file_cib(const char *filename, xmlNode **output)
{
struct stat buf;
xmlNode *root = NULL;
@@ -282,7 +403,7 @@ load_file_cib(const char *filename)
}
/* Remember the parsed XML for later use */
- in_mem_cib = root;
+ *output = root;
return pcmk_ok;
}
@@ -295,7 +416,7 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type)
if (private->filename == NULL) {
rc = -EINVAL;
} else {
- rc = load_file_cib(private->filename);
+ rc = load_file_cib(private->filename, &private->cib_xml);
}
if (rc == pcmk_ok) {
@@ -303,10 +424,11 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type)
private->filename, name);
cib->state = cib_connected_command;
cib->type = cib_command;
+ register_client(cib);
} else {
- crm_info("Connection to local file '%s' for %s failed: %s\n",
- private->filename, name, pcmk_strerror(rc));
+ crm_info("Connection to local file '%s' for %s (client %s) failed: %s",
+ private->filename, name, private->id, pcmk_strerror(rc));
}
return rc;
}
@@ -315,12 +437,13 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type)
* \internal
* \brief Write out the in-memory CIB to a live CIB file
*
- * param[in,out] path Full path to file to write
+ * param[in] cib_root Root of XML tree to write
+ * param[in,out] path Full path to file to write
*
* \return 0 on success, -1 on failure
*/
static int
-cib_file_write_live(char *path)
+cib_file_write_live(xmlNode *cib_root, char *path)
{
uid_t uid = geteuid();
struct passwd *daemon_pwent;
@@ -370,7 +493,7 @@ cib_file_write_live(char *path)
}
/* write the file */
- if (cib_file_write_with_digest(in_mem_cib, cib_dirname,
+ if (cib_file_write_with_digest(cib_root, cib_dirname,
cib_filename) != pcmk_ok) {
rc = -1;
}
@@ -410,13 +533,15 @@ cib_file_signoff(cib_t *cib)
crm_debug("Disconnecting from the CIB manager");
cib->state = cib_disconnected;
cib->type = cib_no_connection;
+ unregister_client(cib);
+ cib->cmds->end_transaction(cib, false, cib_none);
/* If the in-memory CIB has been changed, write it to disk */
if (pcmk_is_set(private->flags, cib_file_flag_dirty)) {
/* If this is the live CIB, write it out with a digest */
if (pcmk_is_set(private->flags, cib_file_flag_live)) {
- if (cib_file_write_live(private->filename) < 0) {
+ if (cib_file_write_live(private->cib_xml, private->filename) < 0) {
rc = pcmk_err_generic;
}
@@ -424,7 +549,8 @@ cib_file_signoff(cib_t *cib)
} else {
gboolean do_bzip = pcmk__ends_with_ext(private->filename, ".bz2");
- if (write_xml_file(in_mem_cib, private->filename, do_bzip) <= 0) {
+ if (write_xml_file(private->cib_xml, private->filename,
+ do_bzip) <= 0) {
rc = pcmk_err_generic;
}
}
@@ -438,8 +564,8 @@ cib_file_signoff(cib_t *cib)
}
/* Free the in-memory CIB */
- free_xml(in_mem_cib);
- in_mem_cib = NULL;
+ free_xml(private->cib_xml);
+ private->cib_xml = NULL;
return rc;
}
@@ -455,9 +581,11 @@ cib_file_free(cib_t *cib)
if (rc == pcmk_ok) {
cib_file_opaque_t *private = cib->variant_opaque;
+ free(private->id);
free(private->filename);
- free(cib->cmds);
free(private);
+ free(cib->cmds);
+ free(cib->user);
free(cib);
} else {
@@ -494,24 +622,24 @@ cib_file_set_connection_dnotify(cib_t *cib,
* \param[out] async_id If not \p NULL, where to store asynchronous client ID
* \param[out] sync_id If not \p NULL, where to store synchronous client ID
*
- * \return Legacy Pacemaker return code (specifically, \p -EPROTONOSUPPORT)
+ * \return Legacy Pacemaker return code
*
* \note This is the \p cib_file variant implementation of
* \p cib_api_operations_t:client_id().
- * \note A \p cib_file object doesn't connect to the CIB and is never assigned a
- * client ID.
*/
static int
cib_file_client_id(const cib_t *cib, const char **async_id,
const char **sync_id)
{
+ cib_file_opaque_t *private = cib->variant_opaque;
+
if (async_id != NULL) {
- *async_id = NULL;
+ *async_id = private->id;
}
if (sync_id != NULL) {
- *sync_id = NULL;
+ *sync_id = private->id;
}
- return -EPROTONOSUPPORT;
+ return pcmk_ok;
}
cib_t *
@@ -530,6 +658,7 @@ cib_file_new(const char *cib_location)
free(cib);
return NULL;
}
+ private->id = crm_generate_uuid();
cib->variant = cib_file;
cib->variant_opaque = private;
@@ -550,7 +679,7 @@ cib_file_new(const char *cib_location)
cib->cmds->signon = cib_file_signon;
cib->cmds->signoff = cib_file_signoff;
cib->cmds->free = cib_file_free;
- cib->cmds->inputfd = cib_file_inputfd;
+ cib->cmds->inputfd = cib_file_inputfd; // Deprecated method
cib->cmds->register_notification = cib_file_register_notification;
cib->cmds->set_connection_dnotify = cib_file_set_connection_dnotify;
@@ -917,3 +1046,133 @@ cib_file_write_with_digest(xmlNode *cib_root, const char *cib_dirname,
free(tmp_cib);
return exit_rc;
}
+
+/*!
+ * \internal
+ * \brief Process requests in a CIB transaction
+ *
+ * Stop when a request fails or when all requests have been processed.
+ *
+ * \param[in,out] cib CIB client
+ * \param[in,out] transaction CIB transaction
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+cib_file_process_transaction_requests(cib_t *cib, xmlNode *transaction)
+{
+ cib_file_opaque_t *private = cib->variant_opaque;
+
+ for (xmlNode *request = first_named_child(transaction, T_CIB_COMMAND);
+ request != NULL; request = crm_next_same_xml(request)) {
+
+ xmlNode *output = NULL;
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+
+ int rc = cib_file_process_request(cib, request, &output);
+
+ rc = pcmk_legacy2rc(rc);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Aborting transaction for CIB file client (%s) on file "
+ "'%s' due to failed %s request: %s",
+ private->id, private->filename, op, pcmk_rc_str(rc));
+ crm_log_xml_info(request, "Failed request");
+ return rc;
+ }
+
+ crm_trace("Applied %s request to transaction working CIB for CIB file "
+ "client (%s) on file '%s'",
+ op, private->id, private->filename);
+ crm_log_xml_trace(request, "Successful request");
+ }
+
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Commit a given CIB file client's transaction to a working CIB copy
+ *
+ * \param[in,out] cib CIB file client
+ * \param[in] transaction CIB transaction
+ * \param[in,out] result_cib Where to store result CIB
+ *
+ * \return Standard Pacemaker return code
+ *
+ * \note The caller is responsible for replacing the \p cib argument's
+ * \p private->cib_xml with \p result_cib on success, and for freeing
+ * \p result_cib using \p free_xml() on failure.
+ */
+static int
+cib_file_commit_transaction(cib_t *cib, xmlNode *transaction,
+ xmlNode **result_cib)
+{
+ int rc = pcmk_rc_ok;
+ cib_file_opaque_t *private = cib->variant_opaque;
+ xmlNode *saved_cib = private->cib_xml;
+
+ CRM_CHECK(pcmk__xe_is(transaction, T_CIB_TRANSACTION),
+ return pcmk_rc_no_transaction);
+
+ /* *result_cib should be a copy of private->cib_xml (created by
+ * cib_perform_op()). If not, make a copy now. Change tracking isn't
+ * strictly required here because:
+ * * Each request in the transaction will have changes tracked and ACLs
+ * checked if appropriate.
+ * * cib_perform_op() will infer changes for the commit request at the end.
+ */
+ CRM_CHECK((*result_cib != NULL) && (*result_cib != private->cib_xml),
+ *result_cib = copy_xml(private->cib_xml));
+
+ crm_trace("Committing transaction for CIB file client (%s) on file '%s' to "
+ "working CIB",
+ private->id, private->filename);
+
+ // Apply all changes to a working copy of the CIB
+ private->cib_xml = *result_cib;
+
+ rc = cib_file_process_transaction_requests(cib, transaction);
+
+ crm_trace("Transaction commit %s for CIB file client (%s) on file '%s'",
+ ((rc == pcmk_rc_ok)? "succeeded" : "failed"),
+ private->id, private->filename);
+
+ /* Some request types (for example, erase) may have freed private->cib_xml
+ * (the working copy) and pointed it at a new XML object. In that case, it
+ * follows that *result_cib (the working copy) was freed.
+ *
+ * Point *result_cib at the updated working copy stored in private->cib_xml.
+ */
+ *result_cib = private->cib_xml;
+
+ // Point private->cib_xml back to the unchanged original copy
+ private->cib_xml = saved_cib;
+
+ return rc;
+}
+
+static int
+cib_file_process_commit_transaction(const char *op, int options,
+ const char *section, xmlNode *req,
+ xmlNode *input, xmlNode *existing_cib,
+ xmlNode **result_cib, xmlNode **answer)
+{
+ int rc = pcmk_rc_ok;
+ const char *client_id = crm_element_value(req, F_CIB_CLIENTID);
+ cib_t *cib = NULL;
+
+ CRM_CHECK(client_id != NULL, return -EINVAL);
+
+ cib = get_client(client_id);
+ CRM_CHECK(cib != NULL, return -EINVAL);
+
+ rc = cib_file_commit_transaction(cib, input, result_cib);
+ if (rc != pcmk_rc_ok) {
+ cib_file_opaque_t *private = cib->variant_opaque;
+
+ crm_err("Could not commit transaction for CIB file client (%s) on "
+ "file '%s': %s",
+ private->id, private->filename, pcmk_rc_str(rc));
+ }
+ return pcmk_rc2legacy(rc);
+}
diff --git a/lib/cib/cib_native.c b/lib/cib/cib_native.c
index 4a87f56..c5e8b9e 100644
--- a/lib/cib/cib_native.c
+++ b/lib/cib/cib_native.c
@@ -69,20 +69,19 @@ cib_native_perform_op_delegate(cib_t *cib, const char *op, const char *host,
pcmk__set_ipc_flags(ipc_flags, "client", crm_ipc_client_response);
}
- cib->call_id++;
- if (cib->call_id < 1) {
- cib->call_id = 1;
+ rc = cib__create_op(cib, op, host, section, data, call_options, user_name,
+ NULL, &op_msg);
+ if (rc != pcmk_ok) {
+ return rc;
}
- op_msg = cib_create_op(cib->call_id, op, host, section, data, call_options,
- user_name);
- if (op_msg == NULL) {
- return -EPROTO;
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ rc = cib__extend_transaction(cib, op_msg);
+ goto done;
}
crm_trace("Sending %s message to the CIB manager (timeout=%ds)", op, cib->call_timeout);
rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, cib->call_timeout * 1000, &op_reply);
- free_xml(op_msg);
if (rc < 0) {
crm_err("Couldn't perform %s operation (timeout=%ds): %s (%d)", op,
@@ -168,6 +167,7 @@ cib_native_perform_op_delegate(cib_t *cib, const char *op, const char *host,
cib->state = cib_disconnected;
}
+ free_xml(op_msg);
free_xml(op_reply);
return rc;
}
@@ -255,6 +255,7 @@ cib_native_signoff(cib_t *cib)
crm_ipc_destroy(ipc);
}
+ cib->cmds->end_transaction(cib, false, cib_none);
cib->state = cib_disconnected;
cib->type = cib_no_connection;
@@ -268,6 +269,7 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
int rc = pcmk_ok;
const char *channel = NULL;
cib_native_opaque_t *native = cib->variant_opaque;
+ xmlNode *hello = NULL;
struct ipc_client_callbacks cib_callbacks = {
.dispatch = cib_native_dispatch_internal,
@@ -296,12 +298,16 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
if (async_fd != NULL) {
native->ipc = crm_ipc_new(channel, 0);
-
- if (native->ipc && crm_ipc_connect(native->ipc)) {
- *async_fd = crm_ipc_get_fd(native->ipc);
-
- } else if (native->ipc) {
- rc = -ENOTCONN;
+ if (native->ipc != NULL) {
+ rc = pcmk__connect_generic_ipc(native->ipc);
+ if (rc == pcmk_rc_ok) {
+ rc = pcmk__ipc_fd(native->ipc, async_fd);
+ if (rc != pcmk_rc_ok) {
+ crm_info("Couldn't get file descriptor for %s IPC",
+ channel);
+ }
+ }
+ rc = pcmk_rc2legacy(rc);
}
} else {
@@ -317,23 +323,23 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
}
if (rc == pcmk_ok) {
- xmlNode *reply = NULL;
- xmlNode *hello = create_xml_node(NULL, "cib_command");
+ rc = cib__create_op(cib, CRM_OP_REGISTER, NULL, NULL, NULL,
+ cib_sync_call, NULL, name, &hello);
+ }
- crm_xml_add(hello, F_TYPE, T_CIB);
- crm_xml_add(hello, F_CIB_OPERATION, CRM_OP_REGISTER);
- crm_xml_add(hello, F_CIB_CLIENTNAME, name);
- crm_xml_add_int(hello, F_CIB_CALLOPTS, cib_sync_call);
+ if (rc == pcmk_ok) {
+ xmlNode *reply = NULL;
- if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply) > 0) {
+ if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1,
+ &reply) > 0) {
const char *msg_type = crm_element_value(reply, F_CIB_OPERATION);
- rc = pcmk_ok;
crm_log_xml_trace(reply, "reg-reply");
if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) {
- crm_info("Reply to CIB registration message has "
- "unknown type '%s'", msg_type);
+ crm_info("Reply to CIB registration message has unknown type "
+ "'%s'",
+ msg_type);
rc = -EPROTO;
} else {
@@ -347,7 +353,6 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
} else {
rc = -ECOMM;
}
-
free_xml(hello);
}
@@ -383,6 +388,7 @@ cib_native_free(cib_t *cib)
free(native->token);
free(cib->variant_opaque);
free(cib->cmds);
+ free(cib->user);
free(cib);
}
diff --git a/lib/cib/cib_ops.c b/lib/cib/cib_ops.c
index d3293c4..c324304 100644
--- a/lib/cib/cib_ops.c
+++ b/lib/cib/cib_ops.c
@@ -19,6 +19,9 @@
#include <sys/param.h>
#include <sys/types.h>
+#include <glib.h>
+#include <libxml/tree.h>
+
#include <crm/crm.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
@@ -26,6 +29,139 @@
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
+// @TODO: Free this via crm_exit() when libcib gets merged with libcrmcommon
+static GHashTable *operation_table = NULL;
+
+static const cib__operation_t cib_ops[] = {
+ {
+ PCMK__CIB_REQUEST_ABS_DELETE, cib__op_abs_delete,
+ cib__op_attr_modifies|cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_APPLY_PATCH, cib__op_apply_patch,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_BUMP, cib__op_bump,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_COMMIT_TRANSACT, cib__op_commit_transact,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_replaces
+ |cib__op_attr_writes_through
+ },
+ {
+ PCMK__CIB_REQUEST_CREATE, cib__op_create,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_DELETE, cib__op_delete,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_ERASE, cib__op_erase,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_replaces
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_IS_PRIMARY, cib__op_is_primary,
+ cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_MODIFY, cib__op_modify,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_NOOP, cib__op_noop, cib__op_attr_none
+ },
+ {
+ CRM_OP_PING, cib__op_ping, cib__op_attr_none
+ },
+ {
+ // @COMPAT: Drop cib__op_attr_modifies when we drop legacy mode support
+ PCMK__CIB_REQUEST_PRIMARY, cib__op_primary,
+ cib__op_attr_modifies|cib__op_attr_privileged|cib__op_attr_local
+ },
+ {
+ PCMK__CIB_REQUEST_QUERY, cib__op_query, cib__op_attr_none
+ },
+ {
+ PCMK__CIB_REQUEST_REPLACE, cib__op_replace,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_replaces
+ |cib__op_attr_writes_through
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_SECONDARY, cib__op_secondary,
+ cib__op_attr_privileged|cib__op_attr_local
+ },
+ {
+ PCMK__CIB_REQUEST_SHUTDOWN, cib__op_shutdown, cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_SYNC_TO_ALL, cib__op_sync_all, cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_SYNC_TO_ONE, cib__op_sync_one, cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_UPGRADE, cib__op_upgrade,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_writes_through
+ |cib__op_attr_transaction
+ },
+};
+
+/*!
+ * \internal
+ * \brief Get the \c cib__operation_t object for a given CIB operation name
+ *
+ * \param[in] op CIB operation name
+ * \param[out] operation Where to store CIB operation object
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+cib__get_operation(const char *op, const cib__operation_t **operation)
+{
+ CRM_ASSERT((op != NULL) && (operation != NULL));
+
+ if (operation_table == NULL) {
+ operation_table = pcmk__strkey_table(NULL, NULL);
+
+ for (int lpc = 0; lpc < PCMK__NELEM(cib_ops); lpc++) {
+ const cib__operation_t *oper = &(cib_ops[lpc]);
+
+ g_hash_table_insert(operation_table, (gpointer) oper->name,
+ (gpointer) oper);
+ }
+ }
+
+ *operation = g_hash_table_lookup(operation_table, op);
+ if (*operation == NULL) {
+ crm_err("Operation %s is invalid", op);
+ return EINVAL;
+ }
+ return pcmk_rc_ok;
+}
+
int
cib_process_query(const char *op, int options, const char *section, xmlNode * req, xmlNode * input,
xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer)
@@ -54,8 +190,8 @@ cib_process_query(const char *op, int options, const char *section, xmlNode * re
result = -ENXIO;
} else if (options & cib_no_children) {
- const char *tag = TYPE(obj_root);
- xmlNode *shallow = create_xml_node(*answer, tag);
+ xmlNode *shallow = create_xml_node(*answer,
+ (const char *) obj_root->name);
copy_in_properties(shallow, obj_root);
*answer = shallow;
@@ -107,12 +243,14 @@ cib_process_erase(const char *op, int options, const char *section, xmlNode * re
int result = pcmk_ok;
crm_trace("Processing \"%s\" event", op);
- *answer = NULL;
- free_xml(*result_cib);
- *result_cib = createEmptyCib(0);
+ if (*result_cib != existing_cib) {
+ free_xml(*result_cib);
+ }
+ *result_cib = createEmptyCib(0);
copy_in_properties(*result_cib, existing_cib);
update_counter(*result_cib, XML_ATTR_GENERATION_ADMIN, false);
+ *answer = NULL;
return result;
}
@@ -172,7 +310,6 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode *
xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib,
xmlNode ** answer)
{
- const char *tag = NULL;
int result = pcmk_ok;
crm_trace("Processing %s for %s section",
@@ -189,16 +326,14 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode *
return -EINVAL;
}
- tag = crm_element_name(input);
-
if (pcmk__str_eq(XML_CIB_TAG_SECTION_ALL, section, pcmk__str_casei)) {
section = NULL;
- } else if (pcmk__str_eq(tag, section, pcmk__str_casei)) {
+ } else if (pcmk__xe_is(input, section)) {
section = NULL;
}
- if (pcmk__str_eq(tag, XML_TAG_CIB, pcmk__str_casei)) {
+ if (pcmk__xe_is(input, XML_TAG_CIB)) {
int updates = 0;
int epoch = 0;
int admin_epoch = 0;
@@ -262,7 +397,9 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode *
replace_admin_epoch, replace_epoch, replace_updates, peer);
}
- free_xml(*result_cib);
+ if (*result_cib != existing_cib) {
+ free_xml(*result_cib);
+ }
*result_cib = copy_xml(input);
} else {
@@ -299,7 +436,7 @@ cib_process_delete(const char *op, int options, const char *section, xmlNode * r
}
obj_root = pcmk_find_cib_element(*result_cib, section);
- if(pcmk__str_eq(crm_element_name(input), section, pcmk__str_casei)) {
+ if (pcmk__xe_is(input, section)) {
xmlNode *child = NULL;
for (child = pcmk__xml_first_child(input); child;
child = pcmk__xml_next(child)) {
@@ -360,7 +497,8 @@ cib_process_modify(const char *op, int options, const char *section, xmlNode * r
}
}
- if(options & cib_mixed_update) {
+ // @COMPAT cib_mixed_update is deprecated as of 2.1.7
+ if (pcmk_is_set(options, cib_mixed_update)) {
int max = 0, lpc;
xmlXPathObjectPtr xpathObj = xpath_search(*result_cib, "//@__delete__");
@@ -396,7 +534,7 @@ update_cib_object(xmlNode * parent, xmlNode * update)
CRM_CHECK(update != NULL, return -EINVAL);
CRM_CHECK(parent != NULL, return -EINVAL);
- object_name = crm_element_name(update);
+ object_name = (const char *) update->name;
CRM_CHECK(object_name != NULL, return -EINVAL);
object_id = ID(update);
@@ -425,33 +563,25 @@ update_cib_object(xmlNode * parent, xmlNode * update)
// @COMPAT: XML_CIB_ATTR_REPLACE is unused internally. Remove at break.
replace = crm_element_value(update, XML_CIB_ATTR_REPLACE);
if (replace != NULL) {
- xmlNode *remove = NULL;
- int last = 0, lpc = 0, len = 0;
+ int last = 0;
+ int len = strlen(replace);
- len = strlen(replace);
- while (lpc <= len) {
+ for (int lpc = 0; lpc <= len; ++lpc) {
if (replace[lpc] == ',' || replace[lpc] == 0) {
- char *replace_item = NULL;
-
- if (last == lpc) {
- /* nothing to do */
- last = lpc + 1;
- goto incr;
- }
-
- replace_item = strndup(replace + last, lpc - last);
- remove = find_xml_node(target, replace_item, FALSE);
- if (remove != NULL) {
- crm_trace("Replacing node <%s> in <%s>",
- replace_item, crm_element_name(target));
- free_xml(remove);
- remove = NULL;
+ if (last != lpc) {
+ char *replace_item = strndup(replace + last, lpc - last);
+ xmlNode *remove = find_xml_node(target, replace_item,
+ FALSE);
+
+ if (remove != NULL) {
+ crm_trace("Replacing node <%s> in <%s>",
+ replace_item, target->name);
+ free_xml(remove);
+ }
+ free(replace_item);
}
- free(replace_item);
last = lpc + 1;
}
- incr:
- lpc++;
}
xml_remove_prop(update, XML_CIB_ATTR_REPLACE);
xml_remove_prop(target, XML_CIB_ATTR_REPLACE);
@@ -475,7 +605,7 @@ update_cib_object(xmlNode * parent, xmlNode * update)
a_child = pcmk__xml_next(a_child)) {
int tmp_result = 0;
- crm_trace("Updating child <%s%s%s%s>", crm_element_name(a_child),
+ crm_trace("Updating child <%s%s%s%s>", a_child->name,
((ID(a_child) == NULL)? "" : " " XML_ATTR_ID "='"),
pcmk__s(ID(a_child), ""), ((ID(a_child) == NULL)? "" : "'"));
@@ -484,7 +614,7 @@ update_cib_object(xmlNode * parent, xmlNode * update)
/* only the first error is likely to be interesting */
if (tmp_result != pcmk_ok) {
crm_err("Error updating child <%s%s%s%s>",
- crm_element_name(a_child),
+ a_child->name,
((ID(a_child) == NULL)? "" : " " XML_ATTR_ID "='"),
pcmk__s(ID(a_child), ""),
((ID(a_child) == NULL)? "" : "'"));
@@ -514,7 +644,7 @@ add_cib_object(xmlNode * parent, xmlNode * new_obj)
return -EINVAL;
}
- object_name = crm_element_name(new_obj);
+ object_name = (const char *) new_obj->name;
if (object_name == NULL) {
return -EINVAL;
}
@@ -555,7 +685,8 @@ update_results(xmlNode *failed, xmlNode *target, const char *operation,
add_node_copy(xml_node, target);
crm_xml_add(xml_node, XML_FAILCIB_ATTR_ID, ID(target));
- crm_xml_add(xml_node, XML_FAILCIB_ATTR_OBJTYPE, TYPE(target));
+ crm_xml_add(xml_node, XML_FAILCIB_ATTR_OBJTYPE,
+ (const char *) target->name);
crm_xml_add(xml_node, XML_FAILCIB_ATTR_OP, operation);
crm_xml_add(xml_node, XML_FAILCIB_ATTR_REASON, error_msg);
@@ -582,7 +713,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r
} else if (pcmk__str_eq(XML_TAG_CIB, section, pcmk__str_casei)) {
section = NULL;
- } else if (pcmk__str_eq(crm_element_name(input), XML_TAG_CIB, pcmk__str_casei)) {
+ } else if (pcmk__xe_is(input, XML_TAG_CIB)) {
section = NULL;
}
@@ -601,7 +732,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r
failed = create_xml_node(NULL, XML_TAG_FAILED);
update_section = pcmk_find_cib_element(*result_cib, section);
- if (pcmk__str_eq(crm_element_name(input), section, pcmk__str_casei)) {
+ if (pcmk__xe_is(input, section)) {
xmlNode *a_child = NULL;
for (a_child = pcmk__xml_first_child(input); a_child != NULL;
@@ -617,7 +748,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r
update_results(failed, input, op, result);
}
- if ((result == pcmk_ok) && xml_has_children(failed)) {
+ if ((result == pcmk_ok) && (failed->children != NULL)) {
result = -EINVAL;
}
@@ -646,8 +777,11 @@ cib_process_diff(const char *op, int options, const char *section, xmlNode * req
op, originator,
(pcmk_is_set(options, cib_force_diff)? " (global update)" : ""));
- free_xml(*result_cib);
+ if (*result_cib != existing_cib) {
+ free_xml(*result_cib);
+ }
*result_cib = copy_xml(existing_cib);
+
return xml_apply_patchset(*result_cib, input, TRUE);
}
@@ -670,7 +804,7 @@ cib__config_changed_v1(xmlNode *last, xmlNode *next, xmlNode **diff)
goto done;
}
- crm_element_value_int(*diff, "format", &format);
+ crm_element_value_int(*diff, PCMK_XA_FORMAT, &format);
CRM_LOG_ASSERT(format == 1);
xpathObj = xpath_search(*diff, "//" XML_CIB_TAG_CONFIGURATION);
@@ -803,8 +937,8 @@ cib_process_xpath(const char *op, int options, const char *section,
} else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none)) {
if (options & cib_no_children) {
- const char *tag = TYPE(match);
- xmlNode *shallow = create_xml_node(*answer, tag);
+ xmlNode *shallow = create_xml_node(*answer,
+ (const char *) match->name);
copy_in_properties(shallow, match);
diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c
index 28095b3..77479d7 100644
--- a/lib/cib/cib_remote.c
+++ b/lib/cib/cib_remote.c
@@ -55,7 +55,8 @@ typedef struct cib_remote_opaque_s {
static int
cib_remote_perform_op(cib_t *cib, const char *op, const char *host,
const char *section, xmlNode *data,
- xmlNode **output_data, int call_options, const char *name)
+ xmlNode **output_data, int call_options,
+ const char *user_name)
{
int rc;
int remaining_time = 0;
@@ -79,15 +80,16 @@ cib_remote_perform_op(cib_t *cib, const char *op, const char *host,
return -EINVAL;
}
- cib->call_id++;
- if (cib->call_id < 1) {
- cib->call_id = 1;
+ rc = cib__create_op(cib, op, host, section, data, call_options, user_name,
+ NULL, &op_msg);
+ if (rc != pcmk_ok) {
+ return rc;
}
- op_msg = cib_create_op(cib->call_id, op, host, section, data, call_options,
- NULL);
- if (op_msg == NULL) {
- return -EPROTO;
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ rc = cib__extend_transaction(cib, op_msg);
+ free_xml(op_msg);
+ return rc;
}
crm_trace("Sending %s message to the CIB manager", op);
@@ -378,7 +380,7 @@ cib_tls_signon(cib_t *cib, pcmk__remote_t *connection, gboolean event_channel)
}
/* login to server */
- login = create_xml_node(NULL, "cib_command");
+ login = create_xml_node(NULL, T_CIB_COMMAND);
crm_xml_add(login, "op", "authenticate");
crm_xml_add(login, "user", private->user);
crm_xml_add(login, "password", private->passwd);
@@ -434,6 +436,7 @@ cib_remote_signon(cib_t *cib, const char *name, enum cib_conn_type type)
{
int rc = pcmk_ok;
cib_remote_opaque_t *private = cib->variant_opaque;
+ xmlNode *hello = NULL;
if (private->passwd == NULL) {
if (private->out == NULL) {
@@ -459,10 +462,13 @@ cib_remote_signon(cib_t *cib, const char *name, enum cib_conn_type type)
}
if (rc == pcmk_ok) {
- xmlNode *hello = cib_create_op(0, CRM_OP_REGISTER, NULL, NULL, NULL, 0,
- NULL);
- crm_xml_add(hello, F_CIB_CLIENTNAME, name);
- pcmk__remote_send_xml(&private->command, hello);
+ rc = cib__create_op(cib, CRM_OP_REGISTER, NULL, NULL, NULL, cib_none,
+ NULL, name, &hello);
+ }
+
+ if (rc == pcmk_ok) {
+ rc = pcmk__remote_send_xml(&private->command, hello);
+ rc = pcmk_rc2legacy(rc);
free_xml(hello);
}
@@ -490,6 +496,7 @@ cib_remote_signoff(cib_t *cib)
cib_tls_close(cib);
#endif
+ cib->cmds->end_transaction(cib, false, cib_none);
cib->state = cib_disconnected;
cib->type = cib_no_connection;
@@ -511,6 +518,7 @@ cib_remote_free(cib_t *cib)
free(private->user);
free(private->passwd);
free(cib->cmds);
+ free(cib->user);
free(private);
free(cib);
}
@@ -530,7 +538,7 @@ cib_remote_inputfd(cib_t * cib)
static int
cib_remote_register_notification(cib_t * cib, const char *callback, int enabled)
{
- xmlNode *notify_msg = create_xml_node(NULL, "cib_command");
+ xmlNode *notify_msg = create_xml_node(NULL, T_CIB_COMMAND);
cib_remote_opaque_t *private = cib->variant_opaque;
crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY);
@@ -614,7 +622,7 @@ cib_remote_new(const char *server, const char *user, const char *passwd, int por
cib->cmds->signon = cib_remote_signon;
cib->cmds->signoff = cib_remote_signoff;
cib->cmds->free = cib_remote_free;
- cib->cmds->inputfd = cib_remote_inputfd;
+ cib->cmds->inputfd = cib_remote_inputfd; // Deprecated method
cib->cmds->register_notification = cib_remote_register_notification;
cib->cmds->set_connection_dnotify = cib_remote_set_connection_dnotify;
diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c
index c75d844..0082eef 100644
--- a/lib/cib/cib_utils.c
+++ b/lib/cib/cib_utils.c
@@ -20,6 +20,7 @@
#include <crm/crm.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
+#include <crm/common/cib_internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/pengine/rules.h>
@@ -78,6 +79,154 @@ cib_diff_version_details(xmlNode * diff, int *admin_epoch, int *epoch, int *upda
}
/*!
+ * \internal
+ * \brief Get the XML patchset from a CIB diff notification
+ *
+ * \param[in] msg CIB diff notification
+ * \param[out] patchset Where to store XML patchset
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+cib__get_notify_patchset(const xmlNode *msg, const xmlNode **patchset)
+{
+ int rc = pcmk_err_generic;
+
+ CRM_ASSERT(patchset != NULL);
+ *patchset = NULL;
+
+ if (msg == NULL) {
+ crm_err("CIB diff notification received with no XML");
+ return ENOMSG;
+ }
+
+ if ((crm_element_value_int(msg, F_CIB_RC, &rc) != 0) || (rc != pcmk_ok)) {
+ crm_warn("Ignore failed CIB update: %s " CRM_XS " rc=%d",
+ pcmk_strerror(rc), rc);
+ crm_log_xml_debug(msg, "failed");
+ return pcmk_legacy2rc(rc);
+ }
+
+ *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
+
+ if (*patchset == NULL) {
+ crm_err("CIB diff notification received with no patchset");
+ return ENOMSG;
+ }
+ return pcmk_rc_ok;
+}
+
+#define XPATH_DIFF_V1 "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
+
+/*!
+ * \internal
+ * \brief Check whether a given CIB element was modified in a CIB patchset (v1)
+ *
+ * \param[in] patchset CIB XML patchset
+ * \param[in] element XML tag of CIB element to check (\c NULL is equivalent
+ * to \c XML_TAG_CIB)
+ *
+ * \return \c true if \p element was modified, or \c false otherwise
+ */
+static bool
+element_in_patchset_v1(const xmlNode *patchset, const char *element)
+{
+ char *xpath = crm_strdup_printf(XPATH_DIFF_V1 "//%s",
+ pcmk__s(element, XML_TAG_CIB));
+ xmlXPathObject *xpath_obj = xpath_search(patchset, xpath);
+
+ free(xpath);
+
+ if (xpath_obj == NULL) {
+ return false;
+ }
+ freeXpathObject(xpath_obj);
+ return true;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given CIB element was modified in a CIB patchset (v2)
+ *
+ * \param[in] patchset CIB XML patchset
+ * \param[in] element XML tag of CIB element to check (\c NULL is equivalent
+ * to \c XML_TAG_CIB). Supported values include any CIB
+ * element supported by \c pcmk__cib_abs_xpath_for().
+ *
+ * \return \c true if \p element was modified, or \c false otherwise
+ */
+static bool
+element_in_patchset_v2(const xmlNode *patchset, const char *element)
+{
+ const char *element_xpath = pcmk__cib_abs_xpath_for(element);
+ const char *parent_xpath = pcmk_cib_parent_name_for(element);
+ char *element_regex = NULL;
+ bool rc = false;
+
+ CRM_CHECK(element_xpath != NULL, return false); // Unsupported element
+
+ // Matches if and only if element_xpath is part of a changed path
+ element_regex = crm_strdup_printf("^%s(/|$)", element_xpath);
+
+ for (const xmlNode *change = first_named_child(patchset, XML_DIFF_CHANGE);
+ change != NULL; change = crm_next_same_xml(change)) {
+
+ const char *op = crm_element_value(change, F_CIB_OPERATION);
+ const char *diff_xpath = crm_element_value(change, XML_DIFF_PATH);
+
+ if (pcmk__str_eq(diff_xpath, element_regex, pcmk__str_regex)) {
+ // Change to an existing element
+ rc = true;
+ break;
+ }
+
+ if (pcmk__str_eq(op, "create", pcmk__str_none)
+ && pcmk__str_eq(diff_xpath, parent_xpath, pcmk__str_none)
+ && pcmk__xe_is(pcmk__xml_first_child(change), element)) {
+
+ // Newly added element
+ rc = true;
+ break;
+ }
+ }
+
+ free(element_regex);
+ return rc;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given CIB element was modified in a CIB patchset
+ *
+ * \param[in] patchset CIB XML patchset
+ * \param[in] element XML tag of CIB element to check (\c NULL is equivalent
+ * to \c XML_TAG_CIB). Supported values include any CIB
+ * element supported by \c pcmk__cib_abs_xpath_for().
+ *
+ * \return \c true if \p element was modified, or \c false otherwise
+ */
+bool
+cib__element_in_patchset(const xmlNode *patchset, const char *element)
+{
+ int format = 1;
+
+ CRM_ASSERT(patchset != NULL);
+
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
+ switch (format) {
+ case 1:
+ return element_in_patchset_v1(patchset, element);
+
+ case 2:
+ return element_in_patchset_v2(patchset, element);
+
+ default:
+ crm_warn("Unknown patch format: %d", format);
+ return false;
+ }
+}
+
+/*!
* \brief Create XML for a new (empty) CIB
*
* \param[in] cib_epoch What to use as "epoch" CIB property
@@ -141,30 +290,79 @@ cib_acl_enabled(xmlNode *xml, const char *user)
return rc;
}
+/*!
+ * \internal
+ * \brief Determine whether to perform operations on a scratch copy of the CIB
+ *
+ * \param[in] op CIB operation
+ * \param[in] section CIB section
+ * \param[in] call_options CIB call options
+ *
+ * \return \p true if we should make a copy of the CIB, or \p false otherwise
+ */
+static bool
+should_copy_cib(const char *op, const char *section, int call_options)
+{
+ if (pcmk_is_set(call_options, cib_dryrun)) {
+ // cib_dryrun implies a scratch copy by definition; no side effects
+ return true;
+ }
+
+ if (pcmk__str_eq(op, PCMK__CIB_REQUEST_COMMIT_TRANSACT, pcmk__str_none)) {
+ /* Commit-transaction must make a copy for atomicity. We must revert to
+ * the original CIB if the entire transaction cannot be applied
+ * successfully.
+ */
+ return true;
+ }
+
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ /* If cib_transaction is set, then we're in the process of committing a
+ * transaction. The commit-transaction request already made a scratch
+ * copy, and we're accumulating changes in that copy.
+ */
+ return false;
+ }
+
+ if (pcmk__str_eq(section, XML_CIB_TAG_STATUS, pcmk__str_none)) {
+ /* Copying large CIBs accounts for a huge percentage of our CIB usage,
+ * and this avoids some of it.
+ *
+ * @TODO: Is this safe? See discussion at
+ * https://github.com/ClusterLabs/pacemaker/pull/3094#discussion_r1211400690.
+ */
+ return false;
+ }
+
+ // Default behavior is to operate on a scratch copy
+ return true;
+}
+
int
-cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_query,
- const char *section, xmlNode * req, xmlNode * input,
- gboolean manage_counters, gboolean * config_changed,
- xmlNode * current_cib, xmlNode ** result_cib, xmlNode ** diff, xmlNode ** output)
+cib_perform_op(const char *op, int call_options, cib__op_fn_t fn, bool is_query,
+ const char *section, xmlNode *req, xmlNode *input,
+ bool manage_counters, bool *config_changed,
+ xmlNode **current_cib, xmlNode **result_cib, xmlNode **diff,
+ xmlNode **output)
{
int rc = pcmk_ok;
- gboolean check_schema = TRUE;
+ bool check_schema = true;
+ bool make_copy = true;
xmlNode *top = NULL;
xmlNode *scratch = NULL;
+ xmlNode *patchset_cib = NULL;
xmlNode *local_diff = NULL;
const char *new_version = NULL;
const char *user = crm_element_value(req, F_CIB_USER);
- bool with_digest = FALSE;
-
- pcmk__output_t *out = NULL;
- int out_rc = pcmk_rc_no_output;
+ bool with_digest = false;
crm_trace("Begin %s%s%s op",
(pcmk_is_set(call_options, cib_dryrun)? "dry run of " : ""),
(is_query? "read-only " : ""), op);
CRM_CHECK(output != NULL, return -ENOMSG);
+ CRM_CHECK(current_cib != NULL, return -ENOMSG);
CRM_CHECK(result_cib != NULL, return -ENOMSG);
CRM_CHECK(config_changed != NULL, return -ENOMSG);
@@ -173,25 +371,26 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
}
*result_cib = NULL;
- *config_changed = FALSE;
+ *config_changed = false;
if (fn == NULL) {
return -EINVAL;
}
if (is_query) {
- xmlNode *cib_ro = current_cib;
+ xmlNode *cib_ro = *current_cib;
xmlNode *cib_filtered = NULL;
- if(cib_acl_enabled(cib_ro, user)) {
- if(xml_acl_filtered_copy(user, current_cib, current_cib, &cib_filtered)) {
- if (cib_filtered == NULL) {
- crm_debug("Pre-filtered the entire cib");
- return -EACCES;
- }
- cib_ro = cib_filtered;
- crm_log_xml_trace(cib_ro, "filtered");
+ if (cib_acl_enabled(cib_ro, user)
+ && xml_acl_filtered_copy(user, *current_cib, *current_cib,
+ &cib_filtered)) {
+
+ if (cib_filtered == NULL) {
+ crm_debug("Pre-filtered the entire cib");
+ return -EACCES;
}
+ cib_ro = cib_filtered;
+ crm_log_xml_trace(cib_ro, "filtered");
}
rc = (*fn) (op, call_options, section, req, input, cib_ro, result_cib, output);
@@ -202,14 +401,14 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
} else if(cib_filtered == *output) {
cib_filtered = NULL; /* Let them have this copy */
- } else if(*output == current_cib) {
+ } else if (*output == *current_cib) {
/* They already know not to free it */
} else if(cib_filtered && (*output)->doc == cib_filtered->doc) {
/* We're about to free the document of which *output is a part */
*output = copy_xml(*output);
- } else if((*output)->doc == current_cib->doc) {
+ } else if ((*output)->doc == (*current_cib)->doc) {
/* Give them a copy they can free */
*output = copy_xml(*output);
}
@@ -218,31 +417,41 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
return rc;
}
+ make_copy = should_copy_cib(op, section, call_options);
- if (pcmk_is_set(call_options, cib_zero_copy)) {
+ if (!make_copy) {
/* Conditional on v2 patch style */
- scratch = current_cib;
+ scratch = *current_cib;
- /* Create a shallow copy of current_cib for the version details */
- current_cib = create_xml_node(NULL, (const char *)scratch->name);
- copy_in_properties(current_cib, scratch);
- top = current_cib;
+ // Make a copy of the top-level element to store version details
+ top = create_xml_node(NULL, (const char *) scratch->name);
+ copy_in_properties(top, scratch);
+ patchset_cib = top;
xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user));
rc = (*fn) (op, call_options, section, req, input, scratch, &scratch, output);
+ /* If scratch points to a new object now (for example, after an erase
+ * operation), then *current_cib should point to the same object.
+ */
+ *current_cib = scratch;
+
} else {
- scratch = copy_xml(current_cib);
+ scratch = copy_xml(*current_cib);
+ patchset_cib = *current_cib;
+
xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user));
- rc = (*fn) (op, call_options, section, req, input, current_cib, &scratch, output);
+ rc = (*fn) (op, call_options, section, req, input, *current_cib,
+ &scratch, output);
- if(scratch && xml_tracking_changes(scratch) == FALSE) {
+ if ((scratch != NULL) && !xml_tracking_changes(scratch)) {
crm_trace("Inferring changes after %s op", op);
- xml_track_changes(scratch, user, current_cib, cib_acl_enabled(current_cib, user));
- xml_calculate_changes(current_cib, scratch);
+ xml_track_changes(scratch, user, *current_cib,
+ cib_acl_enabled(*current_cib, user));
+ xml_calculate_changes(*current_cib, scratch);
}
- CRM_CHECK(current_cib != scratch, return -EINVAL);
+ CRM_CHECK(*current_cib != scratch, return -EINVAL);
}
xml_acl_disable(scratch); /* Allow the system to make any additional changes */
@@ -271,12 +480,12 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
}
}
- if (current_cib) {
+ if (patchset_cib != NULL) {
int old = 0;
int new = 0;
crm_element_value_int(scratch, XML_ATTR_GENERATION_ADMIN, &new);
- crm_element_value_int(current_cib, XML_ATTR_GENERATION_ADMIN, &old);
+ crm_element_value_int(patchset_cib, XML_ATTR_GENERATION_ADMIN, &old);
if (old > new) {
crm_err("%s went backwards: %d -> %d (Opts: %#x)",
@@ -287,7 +496,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
} else if (old == new) {
crm_element_value_int(scratch, XML_ATTR_GENERATION, &new);
- crm_element_value_int(current_cib, XML_ATTR_GENERATION, &old);
+ crm_element_value_int(patchset_cib, XML_ATTR_GENERATION, &old);
if (old > new) {
crm_err("%s went backwards: %d -> %d (Opts: %#x)",
XML_ATTR_GENERATION, old, new, call_options);
@@ -302,13 +511,14 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
pcmk__strip_xml_text(scratch);
fix_plus_plus_recursive(scratch);
- if (pcmk_is_set(call_options, cib_zero_copy)) {
- /* At this point, current_cib is just the 'cib' tag and its properties,
+ if (!make_copy) {
+ /* At this point, patchset_cib is just the "cib" tag and its properties.
*
* The v1 format would barf on this, but we know the v2 patch
* format only needs it for the top-level version fields
*/
- local_diff = xml_create_patchset(2, current_cib, scratch, (bool*)config_changed, manage_counters);
+ local_diff = xml_create_patchset(2, patchset_cib, scratch,
+ config_changed, manage_counters);
} else {
static time_t expires = 0;
@@ -316,63 +526,38 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
if (expires < tm_now) {
expires = tm_now + 60; /* Validate clients are correctly applying v2-style diffs at most once a minute */
- with_digest = TRUE;
+ with_digest = true;
}
- local_diff = xml_create_patchset(0, current_cib, scratch, (bool*)config_changed, manage_counters);
+ local_diff = xml_create_patchset(0, patchset_cib, scratch,
+ config_changed, manage_counters);
}
- // Create a log output object only if we're going to use it
- pcmk__if_tracing(
- {
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, goto done);
-
- pcmk__output_set_log_level(out, LOG_TRACE);
- out_rc = pcmk__xml_show_changes(out, scratch);
- },
- {}
- );
+ pcmk__log_xml_changes(LOG_TRACE, scratch);
xml_accept_changes(scratch);
if(local_diff) {
- int temp_rc = pcmk_rc_no_output;
-
- patchset_process_digest(local_diff, current_cib, scratch, with_digest);
-
- if (out == NULL) {
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, goto done);
- }
- pcmk__output_set_log_level(out, LOG_INFO);
- temp_rc = out->message(out, "xml-patchset", local_diff);
- out_rc = pcmk__output_select_rc(rc, temp_rc);
-
+ patchset_process_digest(local_diff, patchset_cib, scratch, with_digest);
+ pcmk__log_xml_patchset(LOG_INFO, local_diff);
crm_log_xml_trace(local_diff, "raw patch");
}
- if (out != NULL) {
- out->finish(out, pcmk_rc2exitc(out_rc), true, NULL);
- pcmk__output_free(out);
- out = NULL;
- }
-
- if (!pcmk_is_set(call_options, cib_zero_copy) && (local_diff != NULL)) {
+ if (make_copy && (local_diff != NULL)) {
// Original to compare against doesn't exist
pcmk__if_tracing(
{
// Validate the calculated patch set
int test_rc = pcmk_ok;
int format = 1;
- xmlNode *cib_copy = copy_xml(current_cib);
+ xmlNode *cib_copy = copy_xml(patchset_cib);
- crm_element_value_int(local_diff, "format", &format);
+ crm_element_value_int(local_diff, PCMK_XA_FORMAT, &format);
test_rc = xml_apply_patchset(cib_copy, local_diff,
manage_counters);
if (test_rc != pcmk_ok) {
save_xml_to_file(cib_copy, "PatchApply:calculated", NULL);
- save_xml_to_file(current_cib, "PatchApply:input", NULL);
+ save_xml_to_file(patchset_cib, "PatchApply:input", NULL);
save_xml_to_file(scratch, "PatchApply:actual", NULL);
save_xml_to_file(local_diff, "PatchApply:diff", NULL);
crm_err("v%d patchset error, patch failed to apply: %s "
@@ -391,7 +576,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
* a) we don't really care whats in the status section
* b) we don't validate any of its contents at the moment anyway
*/
- check_schema = FALSE;
+ check_schema = false;
}
/* === scratch must not be modified after this point ===
@@ -420,19 +605,35 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
/* Does the CIB support the "update-*" attributes... */
if (current_schema >= minimum_schema) {
+ /* Ensure values of origin, client, and user in scratch match
+ * the values in req
+ */
const char *origin = crm_element_value(req, F_ORIG);
+ const char *client = crm_element_value(req, F_CIB_CLIENTNAME);
+
+ if (origin != NULL) {
+ crm_xml_add(scratch, XML_ATTR_UPDATE_ORIG, origin);
+ } else {
+ xml_remove_prop(scratch, XML_ATTR_UPDATE_ORIG);
+ }
- CRM_LOG_ASSERT(origin != NULL);
- crm_xml_replace(scratch, XML_ATTR_UPDATE_ORIG, origin);
- crm_xml_replace(scratch, XML_ATTR_UPDATE_CLIENT,
- crm_element_value(req, F_CIB_CLIENTNAME));
- crm_xml_replace(scratch, XML_ATTR_UPDATE_USER, crm_element_value(req, F_CIB_USER));
+ if (client != NULL) {
+ crm_xml_add(scratch, XML_ATTR_UPDATE_CLIENT, user);
+ } else {
+ xml_remove_prop(scratch, XML_ATTR_UPDATE_CLIENT);
+ }
+
+ if (user != NULL) {
+ crm_xml_add(scratch, XML_ATTR_UPDATE_USER, user);
+ } else {
+ xml_remove_prop(scratch, XML_ATTR_UPDATE_USER);
+ }
}
}
}
crm_trace("Perform validation: %s", pcmk__btoa(check_schema));
- if ((rc == pcmk_ok) && check_schema && !validate_xml(scratch, NULL, TRUE)) {
+ if ((rc == pcmk_ok) && check_schema && !validate_xml(scratch, NULL, true)) {
const char *current_schema = crm_element_value(scratch,
XML_ATTR_VALIDATION);
@@ -444,13 +645,17 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
done:
*result_cib = scratch;
- if(rc != pcmk_ok && cib_acl_enabled(current_cib, user)) {
- if(xml_acl_filtered_copy(user, current_cib, scratch, result_cib)) {
- if (*result_cib == NULL) {
- crm_debug("Pre-filtered the entire cib result");
- }
- free_xml(scratch);
+
+ /* @TODO: This may not work correctly with !make_copy, since we don't
+ * keep the original CIB.
+ */
+ if ((rc != pcmk_ok) && cib_acl_enabled(patchset_cib, user)
+ && xml_acl_filtered_copy(user, patchset_cib, scratch, result_cib)) {
+
+ if (*result_cib == NULL) {
+ crm_debug("Pre-filtered the entire cib result");
}
+ free_xml(scratch);
}
if(diff) {
@@ -464,36 +669,117 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
return rc;
}
-xmlNode *
-cib_create_op(int call_id, const char *op, const char *host,
- const char *section, xmlNode *data, int call_options,
- const char *user_name)
+int
+cib__create_op(cib_t *cib, const char *op, const char *host,
+ const char *section, xmlNode *data, int call_options,
+ const char *user_name, const char *client_name,
+ xmlNode **op_msg)
{
- xmlNode *op_msg = create_xml_node(NULL, "cib_command");
+ CRM_CHECK((cib != NULL) && (op_msg != NULL), return -EPROTO);
- CRM_CHECK(op_msg != NULL, return NULL);
-
- crm_xml_add(op_msg, F_XML_TAGNAME, "cib_command");
+ *op_msg = create_xml_node(NULL, T_CIB_COMMAND);
+ if (*op_msg == NULL) {
+ return -EPROTO;
+ }
- crm_xml_add(op_msg, F_TYPE, T_CIB);
- crm_xml_add(op_msg, F_CIB_OPERATION, op);
- crm_xml_add(op_msg, F_CIB_HOST, host);
- crm_xml_add(op_msg, F_CIB_SECTION, section);
- crm_xml_add_int(op_msg, F_CIB_CALLID, call_id);
- if (user_name) {
- crm_xml_add(op_msg, F_CIB_USER, user_name);
+ cib->call_id++;
+ if (cib->call_id < 1) {
+ cib->call_id = 1;
}
+
+ crm_xml_add(*op_msg, F_XML_TAGNAME, T_CIB_COMMAND);
+ crm_xml_add(*op_msg, F_TYPE, T_CIB);
+ crm_xml_add(*op_msg, F_CIB_OPERATION, op);
+ crm_xml_add(*op_msg, F_CIB_HOST, host);
+ crm_xml_add(*op_msg, F_CIB_SECTION, section);
+ crm_xml_add(*op_msg, F_CIB_USER, user_name);
+ crm_xml_add(*op_msg, F_CIB_CLIENTNAME, client_name);
+ crm_xml_add_int(*op_msg, F_CIB_CALLID, cib->call_id);
+
crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options);
- crm_xml_add_int(op_msg, F_CIB_CALLOPTS, call_options);
+ crm_xml_add_int(*op_msg, F_CIB_CALLOPTS, call_options);
if (data != NULL) {
- add_message_xml(op_msg, F_CIB_CALLDATA, data);
+ add_message_xml(*op_msg, F_CIB_CALLDATA, data);
}
- if (call_options & cib_inhibit_bcast) {
- CRM_CHECK((call_options & cib_scope_local), return NULL);
+ if (pcmk_is_set(call_options, cib_inhibit_bcast)) {
+ CRM_CHECK(pcmk_is_set(call_options, cib_scope_local),
+ free_xml(*op_msg); return -EPROTO);
}
- return op_msg;
+ return pcmk_ok;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a CIB request is supported in a transaction
+ *
+ * \param[in] request CIB request
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+validate_transaction_request(const xmlNode *request)
+{
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *host = crm_element_value(request, F_CIB_HOST);
+ const cib__operation_t *operation = NULL;
+ int rc = cib__get_operation(op, &operation);
+
+ if (rc != pcmk_rc_ok) {
+ // cib__get_operation() logs error
+ return rc;
+ }
+
+ if (!pcmk_is_set(operation->flags, cib__op_attr_transaction)) {
+ crm_err("Operation %s is not supported in CIB transactions", op);
+ return EOPNOTSUPP;
+ }
+
+ if (host != NULL) {
+ crm_err("Operation targeting a specific node (%s) is not supported in "
+ "a CIB transaction",
+ host);
+ return EOPNOTSUPP;
+ }
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Append a CIB request to a CIB transaction
+ *
+ * \param[in,out] cib CIB client whose transaction to extend
+ * \param[in,out] request Request to add to transaction
+ *
+ * \return Legacy Pacemaker return code
+ */
+int
+cib__extend_transaction(cib_t *cib, xmlNode *request)
+{
+ int rc = pcmk_rc_ok;
+
+ CRM_ASSERT((cib != NULL) && (request != NULL));
+
+ rc = validate_transaction_request(request);
+
+ if ((rc == pcmk_rc_ok) && (cib->transaction == NULL)) {
+ rc = pcmk_rc_no_transaction;
+ }
+
+ if (rc == pcmk_rc_ok) {
+ add_node_copy(cib->transaction, request);
+
+ } else {
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *client_id = NULL;
+
+ cib->cmds->client_id(cib, NULL, &client_id);
+ crm_err("Failed to add '%s' operation to transaction for client %s: %s",
+ op, pcmk__s(client_id, "(unidentified)"), pcmk_rc_str(rc));
+ crm_log_xml_info(request, "failed");
+ }
+ return pcmk_rc2legacy(rc);
}
void
@@ -701,16 +987,7 @@ cib_apply_patch_event(xmlNode *event, xmlNode *input, xmlNode **output,
}
if (level > LOG_CRIT) {
- pcmk__output_t *out = NULL;
-
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, return rc);
-
- pcmk__output_set_log_level(out, level);
- rc = out->message(out, "xml-patchset", diff);
- out->finish(out, pcmk_rc2exitc(rc), true, NULL);
- pcmk__output_free(out);
- rc = pcmk_ok;
+ pcmk__log_xml_patchset(level, diff);
}
if (input != NULL) {
diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am
index 9225f29..2ddbffb 100644
--- a/lib/cluster/Makefile.am
+++ b/lib/cluster/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2018 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,17 +13,20 @@ noinst_HEADERS = crmcluster_private.h
## libraries
lib_LTLIBRARIES = libcrmcluster.la
-libcrmcluster_la_LDFLAGS = -version-info 30:0:1
+libcrmcluster_la_LDFLAGS = -version-info 31:0:2
libcrmcluster_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libcrmcluster_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcrmcluster_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la $(CLUSTERLIBS)
+libcrmcluster_la_LIBADD = $(top_builddir)/lib/fencing/libstonithd.la
+libcrmcluster_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
+libcrmcluster_la_LIBADD += $(CLUSTERLIBS)
-libcrmcluster_la_SOURCES = election.c cluster.c membership.c
+## Library sources (*must* use += format for bumplibs)
+libcrmcluster_la_SOURCES = cluster.c
+libcrmcluster_la_SOURCES += election.c
+libcrmcluster_la_SOURCES += membership.c
if BUILD_CS_SUPPORT
-libcrmcluster_la_SOURCES += cpg.c corosync.c
+libcrmcluster_la_SOURCES += corosync.c
+libcrmcluster_la_SOURCES += cpg.c
endif
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
diff --git a/lib/cluster/cluster.c b/lib/cluster/cluster.c
index 011e053..f2cd428 100644
--- a/lib/cluster/cluster.c
+++ b/lib/cluster/cluster.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -160,7 +160,7 @@ pcmk_cluster_free(crm_cluster_t *cluster)
*/
gboolean
send_cluster_message(const crm_node_t *node, enum crm_ais_msg_types service,
- xmlNode *data, gboolean ordered)
+ const xmlNode *data, gboolean ordered)
{
switch (get_cluster_type()) {
case pcmk_cluster_corosync:
@@ -280,7 +280,7 @@ crm_peer_uname(const char *uuid)
return NULL;
}
- node = pcmk__search_cluster_node_cache((uint32_t) id, NULL);
+ node = pcmk__search_cluster_node_cache((uint32_t) id, NULL, NULL);
if (node != NULL) {
crm_info("Setting uuid for node %s[%u] to %s",
node->uname, node->id, uuid);
@@ -294,19 +294,6 @@ crm_peer_uname(const char *uuid)
}
/*!
- * \brief Add a node's UUID as an XML attribute
- *
- * \param[in,out] xml XML element to add UUID to
- * \param[in] attr XML attribute name to set
- * \param[in,out] node Node whose UUID should be used as attribute value
- */
-void
-set_uuid(xmlNode *xml, const char *attr, crm_node_t *node)
-{
- crm_xml_add(xml, attr, crm_peer_uuid(node));
-}
-
-/*!
* \brief Get a log-friendly string equivalent of a cluster type
*
* \param[in] type Cluster type
@@ -403,3 +390,17 @@ is_corosync_cluster(void)
{
return get_cluster_type() == pcmk_cluster_corosync;
}
+
+// Deprecated functions kept only for backward API compatibility
+// LCOV_EXCL_START
+
+#include <crm/cluster/compat.h>
+
+void
+set_uuid(xmlNode *xml, const char *attr, crm_node_t *node)
+{
+ crm_xml_add(xml, attr, crm_peer_uuid(node));
+}
+
+// LCOV_EXCL_STOP
+// End deprecated API
diff --git a/lib/cluster/cpg.c b/lib/cluster/cpg.c
index 2af4a50..d1decc6 100644
--- a/lib/cluster/cpg.c
+++ b/lib/cluster/cpg.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -506,14 +506,14 @@ pcmk_message_common_cs(cpg_handle_t handle, uint32_t nodeid, uint32_t pid, void
uncompressed = calloc(1, new_size);
rc = BZ2_bzBuffToBuffDecompress(uncompressed, &new_size, msg->data, msg->compressed_size, 1, 0);
- if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc);
free(uncompressed);
goto badmsg;
}
- CRM_ASSERT(rc == BZ_OK);
CRM_ASSERT(new_size == msg->size);
data = uncompressed;
@@ -628,7 +628,7 @@ node_left(const char *cpg_group_name, int event_counter,
size_t member_list_entries)
{
crm_node_t *peer = pcmk__search_cluster_node_cache(cpg_peer->nodeid,
- NULL);
+ NULL, NULL);
const struct cpg_address **rival = NULL;
/* Most CPG-related Pacemaker code assumes that only one process on a node
@@ -888,11 +888,11 @@ cluster_connect_cpg(crm_cluster_t *cluster)
*
* \return TRUE on success, otherwise FALSE
*/
-gboolean
-pcmk__cpg_send_xml(xmlNode *msg, const crm_node_t *node,
+bool
+pcmk__cpg_send_xml(const xmlNode *msg, const crm_node_t *node,
enum crm_ais_msg_types dest)
{
- gboolean rc = TRUE;
+ bool rc = true;
char *data = NULL;
data = dump_xml_unformatted(msg);
diff --git a/lib/cluster/crmcluster_private.h b/lib/cluster/crmcluster_private.h
index 6933b73..370bca5 100644
--- a/lib/cluster/crmcluster_private.h
+++ b/lib/cluster/crmcluster_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -41,7 +41,7 @@ G_GNUC_INTERNAL
void pcmk__corosync_disconnect(crm_cluster_t *cluster);
G_GNUC_INTERNAL
-gboolean pcmk__cpg_send_xml(xmlNode *msg, const crm_node_t *node,
- enum crm_ais_msg_types dest);
+bool pcmk__cpg_send_xml(const xmlNode *msg, const crm_node_t *node,
+ enum crm_ais_msg_types dest);
#endif // PCMK__CRMCLUSTER_PRIVATE__H
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
index 0c54f19..f856cca 100644
--- a/lib/cluster/membership.c
+++ b/lib/cluster/membership.c
@@ -157,7 +157,7 @@ crm_remote_peer_cache_remove(const char *node_name)
*
* \param[in] node_state XML of node state
*
- * \return CRM_NODE_LOST if XML_NODE_IN_CLUSTER is false in node_state,
+ * \return CRM_NODE_LOST if PCMK__XA_IN_CCM is false in node_state,
* CRM_NODE_MEMBER otherwise
* \note Unlike most boolean XML attributes, this one defaults to true, for
* backward compatibility with older controllers that don't set it.
@@ -167,7 +167,8 @@ remote_state_from_cib(const xmlNode *node_state)
{
bool status = false;
- if (pcmk__xe_get_bool_attr(node_state, XML_NODE_IN_CLUSTER, &status) == pcmk_rc_ok && !status) {
+ if ((pcmk__xe_get_bool_attr(node_state, PCMK__XA_IN_CCM,
+ &status) == pcmk_rc_ok) && !status) {
return CRM_NODE_LOST;
} else {
return CRM_NODE_MEMBER;
@@ -515,7 +516,7 @@ pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags)
}
if ((node == NULL) && pcmk_is_set(flags, CRM_GET_PEER_CLUSTER)) {
- node = pcmk__search_cluster_node_cache(id, uname);
+ node = pcmk__search_cluster_node_cache(id, uname, NULL);
}
return node;
}
@@ -525,12 +526,15 @@ pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags)
*
* \param[in] id If not 0, cluster node ID to search for
* \param[in] uname If not NULL, node name to search for
+ * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster
+ * node ID to search for
* \param[in] flags Bitmask of enum crm_get_peer_flags
*
* \return (Possibly newly created) node cache entry
*/
crm_node_t *
-crm_get_peer_full(unsigned int id, const char *uname, int flags)
+pcmk__get_peer_full(unsigned int id, const char *uname, const char *uuid,
+ int flags)
{
crm_node_t *node = NULL;
@@ -543,22 +547,40 @@ crm_get_peer_full(unsigned int id, const char *uname, int flags)
}
if ((node == NULL) && pcmk_is_set(flags, CRM_GET_PEER_CLUSTER)) {
- node = crm_get_peer(id, uname);
+ node = pcmk__get_peer(id, uname, uuid);
}
return node;
}
/*!
+ * \brief Get a node cache entry (cluster or Pacemaker Remote)
+ *
+ * \param[in] id If not 0, cluster node ID to search for
+ * \param[in] uname If not NULL, node name to search for
+ * \param[in] flags Bitmask of enum crm_get_peer_flags
+ *
+ * \return (Possibly newly created) node cache entry
+ */
+crm_node_t *
+crm_get_peer_full(unsigned int id, const char *uname, int flags)
+{
+ return pcmk__get_peer_full(id, uname, NULL, flags);
+}
+
+/*!
* \internal
* \brief Search cluster node cache
*
* \param[in] id If not 0, cluster node ID to search for
* \param[in] uname If not NULL, node name to search for
+ * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster
+ * node ID to search for
*
* \return Cluster node cache entry if found, otherwise NULL
*/
crm_node_t *
-pcmk__search_cluster_node_cache(unsigned int id, const char *uname)
+pcmk__search_cluster_node_cache(unsigned int id, const char *uname,
+ const char *uuid)
{
GHashTableIter iter;
crm_node_t *node = NULL;
@@ -589,6 +611,16 @@ pcmk__search_cluster_node_cache(unsigned int id, const char *uname)
break;
}
}
+
+ } else if (uuid != NULL) {
+ g_hash_table_iter_init(&iter, crm_peer_cache);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
+ if (pcmk__str_eq(node->uuid, uuid, pcmk__str_casei)) {
+ crm_trace("UUID match: %s = %p", node->uuid, node);
+ by_id = node;
+ break;
+ }
+ }
}
node = by_id; /* Good default */
@@ -693,12 +725,14 @@ remove_conflicting_peer(crm_node_t *node)
*
* \param[in] id If not 0, cluster node ID to search for
* \param[in] uname If not NULL, node name to search for
+ * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster
+ * node ID to search for
*
* \return (Possibly newly created) cluster node cache entry
*/
/* coverity[-alloc] Memory is referenced in one or both hashtables */
crm_node_t *
-crm_get_peer(unsigned int id, const char *uname)
+pcmk__get_peer(unsigned int id, const char *uname, const char *uuid)
{
crm_node_t *node = NULL;
char *uname_lookup = NULL;
@@ -707,7 +741,7 @@ crm_get_peer(unsigned int id, const char *uname)
crm_peer_init();
- node = pcmk__search_cluster_node_cache(id, uname);
+ node = pcmk__search_cluster_node_cache(id, uname, uuid);
/* if uname wasn't provided, and find_peer did not turn up a uname based on id.
* we need to do a lookup of the node name using the id in the cluster membership. */
@@ -721,7 +755,7 @@ crm_get_peer(unsigned int id, const char *uname)
/* try to turn up the node one more time now that we know the uname. */
if (node == NULL) {
- node = pcmk__search_cluster_node_cache(id, uname);
+ node = pcmk__search_cluster_node_cache(id, uname, uuid);
}
}
@@ -750,7 +784,9 @@ crm_get_peer(unsigned int id, const char *uname)
}
if(node->uuid == NULL) {
- const char *uuid = crm_peer_uuid(node);
+ if (uuid == NULL) {
+ uuid = crm_peer_uuid(node);
+ }
if (uuid) {
crm_info("Node %u has uuid %s", id, uuid);
@@ -766,6 +802,21 @@ crm_get_peer(unsigned int id, const char *uname)
}
/*!
+ * \brief Get a cluster node cache entry
+ *
+ * \param[in] id If not 0, cluster node ID to search for
+ * \param[in] uname If not NULL, node name to search for
+ *
+ * \return (Possibly newly created) cluster node cache entry
+ */
+/* coverity[-alloc] Memory is referenced in one or both hashtables */
+crm_node_t *
+crm_get_peer(unsigned int id, const char *uname)
+{
+ return pcmk__get_peer(id, uname, NULL);
+}
+
+/*!
* \internal
* \brief Update a node's uname
*
@@ -917,6 +968,13 @@ crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const
proc2text(flag), status);
}
+ if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
+ node->when_online = time(NULL);
+
+ } else {
+ node->when_online = 0;
+ }
+
/* Call the client callback first, then update the peer state,
* in case the node will be reaped
*/
@@ -1025,6 +1083,13 @@ update_peer_state_iter(const char *source, crm_node_t *node, const char *state,
if (state && !pcmk__str_eq(node->state, state, pcmk__str_casei)) {
char *last = node->state;
+ if (is_member) {
+ node->when_member = time(NULL);
+
+ } else {
+ node->when_member = 0;
+ }
+
node->state = strdup(state);
crm_notice("Node %s state is now %s " CRM_XS
" nodeid=%u previous=%s source=%s", node->uname, state,
diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am
index ef729d4..f9c43b9 100644
--- a/lib/common/Makefile.am
+++ b/lib/common/Makefile.am
@@ -8,7 +8,8 @@
#
include $(top_srcdir)/mk/common.mk
-AM_CPPFLAGS += -I$(top_builddir)/lib/gnu -I$(top_srcdir)/lib/gnu
+AM_CPPFLAGS += -I$(top_builddir)/lib/gnu \
+ -I$(top_srcdir)/lib/gnu
## libraries
lib_LTLIBRARIES = libcrmcommon.la
@@ -29,14 +30,16 @@ CFLAGS = $(CFLAGS_COPY:-Wcast-qual=) -fPIC
# changes the order so the subdirectories are processed afterwards.
SUBDIRS = . tests
-noinst_HEADERS = crmcommon_private.h mock_private.h
+noinst_HEADERS = crmcommon_private.h \
+ mock_private.h
-libcrmcommon_la_LDFLAGS = -version-info 45:0:11
+libcrmcommon_la_LDFLAGS = -version-info 46:0:12
libcrmcommon_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libcrmcommon_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcrmcommon_la_LIBADD = @LIBADD_DL@ $(top_builddir)/lib/gnu/libgnu.la
+libcrmcommon_la_LIBADD = @LIBADD_DL@ \
+ $(top_builddir)/lib/gnu/libgnu.la
# If configured with --with-profiling or --with-coverage, BUILD_PROFILING will
# be set and -fno-builtin will be added to the CFLAGS. However, libcrmcommon
@@ -47,9 +50,10 @@ if BUILD_PROFILING
libcrmcommon_la_LIBADD += -lm
endif
-# Use += rather than backlashed continuation lines for parsing by bumplibs
+## Library sources (*must* use += format for bumplibs)
libcrmcommon_la_SOURCES =
libcrmcommon_la_SOURCES += acl.c
+libcrmcommon_la_SOURCES += actions.c
libcrmcommon_la_SOURCES += agents.c
libcrmcommon_la_SOURCES += alerts.c
libcrmcommon_la_SOURCES += attrs.c
@@ -75,7 +79,6 @@ libcrmcommon_la_SOURCES += mainloop.c
libcrmcommon_la_SOURCES += messages.c
libcrmcommon_la_SOURCES += nodes.c
libcrmcommon_la_SOURCES += nvpair.c
-libcrmcommon_la_SOURCES += operations.c
libcrmcommon_la_SOURCES += options.c
libcrmcommon_la_SOURCES += output.c
libcrmcommon_la_SOURCES += output_html.c
@@ -89,12 +92,14 @@ libcrmcommon_la_SOURCES += pid.c
libcrmcommon_la_SOURCES += procfs.c
libcrmcommon_la_SOURCES += remote.c
libcrmcommon_la_SOURCES += results.c
+libcrmcommon_la_SOURCES += scheduler.c
libcrmcommon_la_SOURCES += schemas.c
libcrmcommon_la_SOURCES += scores.c
libcrmcommon_la_SOURCES += strings.c
libcrmcommon_la_SOURCES += utils.c
libcrmcommon_la_SOURCES += watchdog.c
libcrmcommon_la_SOURCES += xml.c
+libcrmcommon_la_SOURCES += xml_attr.c
libcrmcommon_la_SOURCES += xml_display.c
libcrmcommon_la_SOURCES += xpath.c
@@ -107,18 +112,22 @@ include $(top_srcdir)/mk/tap.mk
libcrmcommon_test_la_SOURCES = $(libcrmcommon_la_SOURCES)
libcrmcommon_test_la_SOURCES += mock.c
-libcrmcommon_test_la_LDFLAGS = $(libcrmcommon_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libcrmcommon_test_la_LDFLAGS = $(libcrmcommon_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# If GCC emits a builtin function in place of something we've mocked up, that will
# get used instead of the mocked version which leads to unexpected test results. So
# disable all builtins. Older versions of GCC (at least, on RHEL7) will still emit
# replacement code for strdup (and possibly other functions) unless -fno-inline is
# also added.
-libcrmcommon_test_la_CFLAGS = $(libcrmcommon_la_CFLAGS) -DPCMK__UNIT_TESTING -fno-builtin -fno-inline
+libcrmcommon_test_la_CFLAGS = $(libcrmcommon_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
+ -fno-builtin \
+ -fno-inline
# If -fno-builtin is used, -lm also needs to be added. See the comment at
# BUILD_PROFILING above.
-libcrmcommon_test_la_LIBADD = $(libcrmcommon_la_LIBADD) -lcmocka -lm
+libcrmcommon_test_la_LIBADD = $(libcrmcommon_la_LIBADD) \
+ -lcmocka \
+ -lm
nodist_libcrmcommon_test_la_SOURCES = $(nodist_libcrmcommon_la_SOURCES)
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
diff --git a/lib/common/acl.c b/lib/common/acl.c
index 33a4e00..1ebd765 100644
--- a/lib/common/acl.c
+++ b/lib/common/acl.c
@@ -26,7 +26,7 @@
typedef struct xml_acl_s {
enum xml_private_flags mode;
- char *xpath;
+ gchar *xpath;
} xml_acl_t;
static void
@@ -35,7 +35,7 @@ free_acl(void *data)
if (data) {
xml_acl_t *acl = data;
- free(acl->xpath);
+ g_free(acl->xpath);
free(acl);
}
}
@@ -68,7 +68,7 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode)
if ((tag == NULL) && (ref == NULL) && (xpath == NULL)) {
// Schema should prevent this, but to be safe ...
crm_trace("Ignoring ACL <%s> element without selection criteria",
- crm_element_name(xml));
+ xml->name);
return NULL;
}
@@ -77,10 +77,9 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode)
acl->mode = mode;
if (xpath) {
- acl->xpath = strdup(xpath);
- CRM_ASSERT(acl->xpath != NULL);
+ acl->xpath = g_strdup(xpath);
crm_trace("Unpacked ACL <%s> element using xpath: %s",
- crm_element_name(xml), acl->xpath);
+ xml->name, acl->xpath);
} else {
GString *buf = g_string_sized_new(128);
@@ -101,12 +100,11 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode)
pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), NULL);
}
- acl->xpath = strdup((const char *) buf->str);
- CRM_ASSERT(acl->xpath != NULL);
+ acl->xpath = buf->str;
- g_string_free(buf, TRUE);
+ g_string_free(buf, FALSE);
crm_trace("Unpacked ACL <%s> element as xpath: %s",
- crm_element_name(xml), acl->xpath);
+ xml->name, acl->xpath);
}
return g_list_append(acls, acl);
@@ -131,10 +129,10 @@ parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls)
for (child = pcmk__xe_first_child(acl_entry); child;
child = pcmk__xe_next(child)) {
- const char *tag = crm_element_name(child);
+ const char *tag = (const char *) child->name;
const char *kind = crm_element_value(child, XML_ACL_ATTR_KIND);
- if (strcmp(XML_ACL_TAG_PERMISSION, tag) == 0){
+ if (pcmk__xe_is(child, XML_ACL_TAG_PERMISSION)) {
CRM_ASSERT(kind != NULL);
crm_trace("Unpacking ACL <%s> element of kind '%s'", tag, kind);
tag = kind;
@@ -157,7 +155,7 @@ parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls)
if (role_id && strcmp(ref_role, role_id) == 0) {
crm_trace("Unpacking referenced role '%s' in ACL <%s> element",
- role_id, crm_element_name(acl_entry));
+ role_id, acl_entry->name);
acls = parse_acl_entry(acl_top, role, acls);
break;
}
@@ -304,10 +302,9 @@ pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user)
for (child = pcmk__xe_first_child(acls); child;
child = pcmk__xe_next(child)) {
- const char *tag = crm_element_name(child);
- if (!strcmp(tag, XML_ACL_TAG_USER)
- || !strcmp(tag, XML_ACL_TAG_USERv1)) {
+ if (pcmk__xe_is(child, XML_ACL_TAG_USER)
+ || pcmk__xe_is(child, XML_ACL_TAG_USERv1)) {
const char *id = crm_element_value(child, XML_ATTR_NAME);
if (id == NULL) {
@@ -318,7 +315,7 @@ pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user)
crm_debug("Unpacking ACLs for user '%s'", id);
docpriv->acls = parse_acl_entry(acls, child, docpriv->acls);
}
- } else if (!strcmp(tag, XML_ACL_TAG_GROUP)) {
+ } else if (pcmk__xe_is(child, XML_ACL_TAG_GROUP)) {
const char *id = crm_element_value(child, XML_ATTR_NAME);
if (id == NULL) {
@@ -392,7 +389,7 @@ purge_xml_attributes(xmlNode *xml)
if (test_acl_mode(nodepriv->flags, pcmk__xf_acl_read)) {
crm_trace("%s[@" XML_ATTR_ID "=%s] is readable",
- crm_element_name(xml), ID(xml));
+ xml->name, ID(xml));
return true;
}
@@ -571,22 +568,22 @@ pcmk__apply_creation_acl(xmlNode *xml, bool check_top)
if (implicitly_allowed(xml)) {
crm_trace("Creation of <%s> scaffolding with id=\"%s\""
" is implicitly allowed",
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
} else if (pcmk__check_acl(xml, NULL, pcmk__xf_acl_write)) {
crm_trace("ACLs allow creation of <%s> with id=\"%s\"",
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
} else if (check_top) {
crm_trace("ACLs disallow creation of <%s> with id=\"%s\"",
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
pcmk_free_xml_subtree(xml);
return;
} else {
crm_notice("ACLs would disallow creation of %s<%s> with id=\"%s\"",
((xml == xmlDocGetRootElement(xml->doc))? "root element " : ""),
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
}
}
diff --git a/lib/common/operations.c b/lib/common/actions.c
index 3db96cd..e710615 100644
--- a/lib/common/operations.c
+++ b/lib/common/actions.c
@@ -107,15 +107,15 @@ parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms)
* contain underbars. Here, list action names and name prefixes that can.
*/
const char *actions_with_underbars[] = {
- CRMD_ACTION_MIGRATED,
- CRMD_ACTION_MIGRATE,
+ PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_MIGRATE_TO,
NULL
};
const char *action_prefixes_with_underbars[] = {
- "pre_" CRMD_ACTION_NOTIFY,
- "post_" CRMD_ACTION_NOTIFY,
- "confirmed-pre_" CRMD_ACTION_NOTIFY,
- "confirmed-post_" CRMD_ACTION_NOTIFY,
+ "pre_" PCMK_ACTION_NOTIFY,
+ "post_" PCMK_ACTION_NOTIFY,
+ "confirmed-pre_" PCMK_ACTION_NOTIFY,
+ "confirmed-post_" PCMK_ACTION_NOTIFY,
NULL,
};
@@ -470,11 +470,11 @@ crm_op_needs_metadata(const char *rsc_class, const char *op)
}
// Metadata is needed only for these actions
- return pcmk__str_any_of(op, CRMD_ACTION_START, CRMD_ACTION_STATUS,
- CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE,
- CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED,
- CRMD_ACTION_NOTIFY, NULL);
+ return pcmk__str_any_of(op, PCMK_ACTION_START, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
+ PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_NOTIFY, NULL);
}
/*!
@@ -488,7 +488,8 @@ crm_op_needs_metadata(const char *rsc_class, const char *op)
bool
pcmk__is_fencing_action(const char *action)
{
- return pcmk__str_any_of(action, "off", "reboot", "poweroff", NULL);
+ return pcmk__str_any_of(action, PCMK_ACTION_OFF, PCMK_ACTION_REBOOT,
+ "poweroff", NULL);
}
bool
@@ -498,7 +499,8 @@ pcmk_is_probe(const char *task, guint interval)
return false;
}
- return (interval == 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_none);
+ return (interval == 0)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none);
}
bool
diff --git a/lib/common/alerts.c b/lib/common/alerts.c
index abdadef..98b1e3f 100644
--- a/lib/common/alerts.c
+++ b/lib/common/alerts.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -12,8 +12,8 @@
#include <crm/lrmd.h>
#include <crm/msg_xml.h>
#include <crm/common/alerts_internal.h>
+#include <crm/common/cib_internal.h>
#include <crm/common/xml_internal.h>
-#include <crm/cib/internal.h> /* for F_CIB_UPDATE_RESULT */
/*
* to allow script compatibility we can have more than one
@@ -168,86 +168,3 @@ pcmk__add_alert_key_int(GHashTable *table, enum pcmk__alert_keys_e name,
g_hash_table_insert(table, strdup(*key), pcmk__itoa(value));
}
}
-
-#define XPATH_PATCHSET1_DIFF "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
-
-#define XPATH_PATCHSET1_CRMCONFIG XPATH_PATCHSET1_DIFF "//" XML_CIB_TAG_CRMCONFIG
-#define XPATH_PATCHSET1_ALERTS XPATH_PATCHSET1_DIFF "//" XML_CIB_TAG_ALERTS
-
-#define XPATH_PATCHSET1_EITHER \
- XPATH_PATCHSET1_CRMCONFIG " | " XPATH_PATCHSET1_ALERTS
-
-#define XPATH_CONFIG "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION
-
-#define XPATH_CRMCONFIG XPATH_CONFIG "/" XML_CIB_TAG_CRMCONFIG "/"
-#define XPATH_ALERTS XPATH_CONFIG "/" XML_CIB_TAG_ALERTS
-
-/*!
- * \internal
- * \brief Check whether a CIB update affects alerts
- *
- * \param[in] msg XML containing CIB update
- * \param[in] config Whether to check for crmconfig change as well
- *
- * \return TRUE if update affects alerts, FALSE otherwise
- */
-bool
-pcmk__alert_in_patchset(xmlNode *msg, bool config)
-{
- int rc = -1;
- int format= 1;
- xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
- xmlNode *change = NULL;
- xmlXPathObject *xpathObj = NULL;
-
- CRM_CHECK(msg != NULL, return FALSE);
-
- crm_element_value_int(msg, F_CIB_RC, &rc);
- if (rc < pcmk_ok) {
- crm_trace("Ignore failed CIB update: %s (%d)", pcmk_strerror(rc), rc);
- return FALSE;
- }
-
- crm_element_value_int(patchset, "format", &format);
- if (format == 1) {
- const char *diff = (config? XPATH_PATCHSET1_EITHER : XPATH_PATCHSET1_ALERTS);
-
- if ((xpathObj = xpath_search(msg, diff)) != NULL) {
- freeXpathObject(xpathObj);
- return TRUE;
- }
- } else if (format == 2) {
- for (change = pcmk__xml_first_child(patchset); change != NULL;
- change = pcmk__xml_next(change)) {
- const char *xpath = crm_element_value(change, XML_DIFF_PATH);
-
- if (xpath == NULL) {
- continue;
- }
-
- if ((!config || !strstr(xpath, XPATH_CRMCONFIG))
- && !strstr(xpath, XPATH_ALERTS)) {
-
- /* this is not a change to an existing section ... */
-
- xmlNode *section = NULL;
- const char *name = NULL;
-
- if ((strcmp(xpath, XPATH_CONFIG) != 0) ||
- ((section = pcmk__xml_first_child(change)) == NULL) ||
- ((name = crm_element_name(section)) == NULL) ||
- (strcmp(name, XML_CIB_TAG_ALERTS) != 0)) {
-
- /* ... nor is it a newly added alerts section */
- continue;
- }
- }
-
- return TRUE;
- }
-
- } else {
- crm_warn("Unknown patch format: %d", format);
- }
- return FALSE;
-}
diff --git a/lib/common/cib.c b/lib/common/cib.c
index b84c5e8..fee7881 100644
--- a/lib/common/cib.c
+++ b/lib/common/cib.c
@@ -1,6 +1,6 @@
/*
* Original copyright 2004 International Business Machines
- * Later changes copyright 2008-2021 the Pacemaker project contributors
+ * Later changes copyright 2008-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,6 +14,8 @@
#include <libxml/tree.h> // xmlNode
#include <crm/msg_xml.h>
+#include <crm/common/cib.h>
+#include <crm/common/cib_internal.h>
/*
* Functions to help find particular sections of the CIB
@@ -99,7 +101,7 @@ static struct {
};
/*!
- * \brief Get the XPath needed to find a specified CIB element name
+ * \brief Get the relative XPath needed to find a specified CIB element name
*
* \param[in] element_name Name of CIB element
*
@@ -120,6 +122,23 @@ pcmk_cib_xpath_for(const char *element_name)
}
/*!
+ * \internal
+ * \brief Get the absolute XPath needed to find a specified CIB element name
+ *
+ * \param[in] element Name of CIB element
+ *
+ * \return XPath for finding \p element in CIB XML (or \c NULL if unknown)
+ */
+const char *
+pcmk__cib_abs_xpath_for(const char *element)
+{
+ const char *xpath = pcmk_cib_xpath_for(element);
+
+ // XPaths returned by pcmk_cib_xpath_for() are relative (starting with "//")
+ return ((xpath != NULL)? (xpath + 1) : NULL);
+}
+
+/*!
* \brief Get the parent element name of a given CIB element name
*
* \param[in] element_name Name of CIB element
diff --git a/lib/common/crmcommon_private.h b/lib/common/crmcommon_private.h
index 7faccb6..121d663 100644
--- a/lib/common/crmcommon_private.h
+++ b/lib/common/crmcommon_private.h
@@ -63,7 +63,7 @@ typedef struct xml_doc_private_s {
} while (0)
G_GNUC_INTERNAL
-void pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer,
+void pcmk__xml2text(const xmlNode *data, uint32_t options, GString *buffer,
int depth);
G_GNUC_INTERNAL
@@ -116,12 +116,14 @@ G_GNUC_INTERNAL
void pcmk__log_xmllib_err(void *ctx, const char *fmt, ...)
G_GNUC_PRINTF(2, 3);
-static inline const char *
-pcmk__xml_attr_value(const xmlAttr *attr)
-{
- return ((attr == NULL) || (attr->children == NULL))? NULL
- : (const char *) attr->children->content;
-}
+G_GNUC_INTERNAL
+void pcmk__mark_xml_node_dirty(xmlNode *xml);
+
+G_GNUC_INTERNAL
+bool pcmk__marked_as_deleted(xmlAttrPtr a, void *user_data);
+
+G_GNUC_INTERNAL
+void pcmk__dump_xml_attr(const xmlAttr *attr, GString *buffer);
/*
* IPC
@@ -173,11 +175,11 @@ typedef struct pcmk__ipc_methods_s {
* \brief Check whether an IPC request results in a reply
*
* \param[in,out] api IPC API connection
- * \param[in,out] request IPC request XML
+ * \param[in] request IPC request XML
*
* \return true if request would result in an IPC reply, false otherwise
*/
- bool (*reply_expected)(pcmk_ipc_api_t *api, xmlNode *request);
+ bool (*reply_expected)(pcmk_ipc_api_t *api, const xmlNode *request);
/*!
* \internal
@@ -222,7 +224,7 @@ typedef struct pcmk__ipc_header_s {
} pcmk__ipc_header_t;
G_GNUC_INTERNAL
-int pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request);
+int pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request);
G_GNUC_INTERNAL
void pcmk__call_ipc_callback(pcmk_ipc_api_t *api,
@@ -264,47 +266,6 @@ pcmk__ipc_methods_t *pcmk__schedulerd_api_methods(void);
//! XML has been moved
#define PCMK__XML_PREFIX_MOVED "+~"
-/*!
- * \brief Check the authenticity of the IPC socket peer process
- *
- * If everything goes well, peer's authenticity is verified by the means
- * of comparing against provided referential UID and GID (either satisfies),
- * and the result of this check can be deduced from the return value.
- * As an exception, detected UID of 0 ("root") satisfies arbitrary
- * provided referential daemon's credentials.
- *
- * \param[in] qb_ipc libqb client connection if available
- * \param[in] sock IPC related, connected Unix socket to check peer of
- * \param[in] refuid referential UID to check against
- * \param[in] refgid referential GID to check against
- * \param[out] gotpid to optionally store obtained PID of the peer
- * (not available on FreeBSD, special value of 1
- * used instead, and the caller is required to
- * special case this value respectively)
- * \param[out] gotuid to optionally store obtained UID of the peer
- * \param[out] gotgid to optionally store obtained GID of the peer
- *
- * \return Standard Pacemaker return code
- * ie: 0 if it the connection is authentic
- * pcmk_rc_ipc_unauthorized if the connection is not authentic,
- * standard errors.
- *
- * \note While this function is tolerant on what constitutes authorized
- * IPC daemon process (its effective user matches UID=0 or \p refuid,
- * or at least its group matches \p refgid), either or both (in case
- * of UID=0) mismatches on the expected credentials of such peer
- * process \e shall be investigated at the caller when value of 1
- * gets returned there, since higher-than-expected privileges in
- * respect to the expected/intended credentials possibly violate
- * the least privilege principle and may pose an additional risk
- * (i.e. such accidental inconsistency shall be eventually fixed).
- */
-int pcmk__crm_ipc_is_authentic_process(qb_ipcc_connection_t *qb_ipc, int sock,
- uid_t refuid, gid_t refgid,
- pid_t *gotpid, uid_t *gotuid,
- gid_t *gotgid);
-
-
/*
* Output
*/
diff --git a/lib/common/digest.c b/lib/common/digest.c
index 3bf04bf..4de6f97 100644
--- a/lib/common/digest.c
+++ b/lib/common/digest.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -89,7 +89,7 @@ calculate_xml_digest_v1(xmlNode *input, gboolean sort, gboolean ignored)
* \return Newly allocated string containing digest
*/
static char *
-calculate_xml_digest_v2(xmlNode *source, gboolean do_filter)
+calculate_xml_digest_v2(const xmlNode *source, gboolean do_filter)
{
char *digest = NULL;
GString *buffer = g_string_sized_new(1024);
diff --git a/lib/common/io.c b/lib/common/io.c
index 2264e16..35efbe9 100644
--- a/lib/common/io.c
+++ b/lib/common/io.c
@@ -460,11 +460,17 @@ pcmk__file_contents(const char *filename, char **contents)
goto bail;
}
rewind(fp);
- read_len = fread(*contents, 1, length, fp); /* Coverity: False positive */
+
+ read_len = fread(*contents, 1, length, fp);
if (read_len != length) {
free(*contents);
*contents = NULL;
rc = EIO;
+ } else {
+ /* Coverity thinks *contents isn't null-terminated. It doesn't
+ * understand calloc().
+ */
+ (*contents)[length] = '\0';
}
}
diff --git a/lib/common/ipc_attrd.c b/lib/common/ipc_attrd.c
index 7c40aa7..9caaabe 100644
--- a/lib/common/ipc_attrd.c
+++ b/lib/common/ipc_attrd.c
@@ -44,7 +44,7 @@ set_pairs_data(pcmk__attrd_api_reply_t *data, xmlNode *msg_data)
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
const char *command = crm_element_value(request, PCMK__XA_TASK);
@@ -169,32 +169,29 @@ destroy_api(pcmk_ipc_api_t *api)
}
static int
-connect_and_send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request)
+connect_and_send_attrd_request(pcmk_ipc_api_t *api, const xmlNode *request)
{
int rc = pcmk_rc_ok;
- int max = 5;
-
- while (max > 0) {
- crm_info("Connecting to cluster... %d retries remaining", max);
- rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_sync);
-
- if (rc == pcmk_rc_ok) {
- rc = pcmk__send_ipc_request(api, request);
- break;
- } else if (rc == EAGAIN || rc == EALREADY) {
- sleep(5 - max);
- max--;
- } else {
- crm_err("Could not connect to attrd: %s", pcmk_rc_str(rc));
- break;
- }
+
+ rc = pcmk__connect_ipc(api, pcmk_ipc_dispatch_sync, 5);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Could not connect to %s: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ return rc;
}
- return rc;
+ rc = pcmk__send_ipc_request(api, request);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Could not send request to %s: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ return rc;
+ }
+
+ return pcmk_rc_ok;
}
static int
-send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request)
+send_attrd_request(pcmk_ipc_api_t *api, const xmlNode *request)
{
return pcmk__send_ipc_request(api, request);
}
diff --git a/lib/common/ipc_client.c b/lib/common/ipc_client.c
index c6d1645..0d38650 100644
--- a/lib/common/ipc_client.c
+++ b/lib/common/ipc_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -31,6 +31,10 @@
#include <crm/common/ipc_internal.h>
#include "crmcommon_private.h"
+static int is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock,
+ uid_t refuid, gid_t refgid, pid_t *gotpid,
+ uid_t *gotuid, gid_t *gotgid);
+
/*!
* \brief Create a new object for using Pacemaker daemon IPC
*
@@ -164,7 +168,7 @@ ipc_post_disconnect(gpointer user_data)
{
pcmk_ipc_api_t *api = user_data;
- crm_info("Disconnected from %s IPC API", pcmk_ipc_name(api, true));
+ crm_info("Disconnected from %s", pcmk_ipc_name(api, true));
// Perform any daemon-specific handling needed
if ((api->cmds != NULL) && (api->cmds->post_disconnect != NULL)) {
@@ -389,7 +393,7 @@ dispatch_ipc_source_data(const char *buffer, ssize_t length, gpointer user_data)
* meaning no data is available; all other values indicate errors.
* \todo This does not allow the caller to poll multiple file descriptors at
* once. If there is demand for that, we could add a wrapper for
- * crm_ipc_get_fd(api->ipc), so the caller can call poll() themselves.
+ * pcmk__ipc_fd(api->ipc), so the caller can call poll() themselves.
*/
int
pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms)
@@ -400,7 +404,14 @@ pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms)
if ((api == NULL) || (api->dispatch_type != pcmk_ipc_dispatch_poll)) {
return EINVAL;
}
- pollfd.fd = crm_ipc_get_fd(api->ipc);
+
+ rc = pcmk__ipc_fd(api->ipc, &(pollfd.fd));
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Could not obtain file descriptor for %s IPC: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ return rc;
+ }
+
pollfd.events = POLLIN;
rc = poll(&pollfd, 1, timeout_ms);
if (rc < 0) {
@@ -465,54 +476,54 @@ connect_with_main_loop(pcmk_ipc_api_t *api)
static int
connect_without_main_loop(pcmk_ipc_api_t *api)
{
- int rc;
+ int rc = pcmk__connect_generic_ipc(api->ipc);
- if (!crm_ipc_connect(api->ipc)) {
- rc = errno;
+ if (rc != pcmk_rc_ok) {
crm_ipc_close(api->ipc);
- return rc;
+ } else {
+ crm_debug("Connected to %s IPC (without main loop)",
+ pcmk_ipc_name(api, true));
}
- crm_debug("Connected to %s IPC (without main loop)",
- pcmk_ipc_name(api, true));
- return pcmk_rc_ok;
+ return rc;
}
/*!
- * \brief Connect to a Pacemaker daemon via IPC
+ * \internal
+ * \brief Connect to a Pacemaker daemon via IPC (retrying after soft errors)
*
* \param[in,out] api IPC API instance
* \param[in] dispatch_type How IPC replies should be dispatched
+ * \param[in] attempts How many times to try (in case of soft error)
*
* \return Standard Pacemaker return code
*/
int
-pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
+pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type,
+ int attempts)
{
- const int n_attempts = 2;
int rc = pcmk_rc_ok;
- if (api == NULL) {
- crm_err("Cannot connect to uninitialized API object");
+ if ((api == NULL) || (attempts < 1)) {
return EINVAL;
}
if (api->ipc == NULL) {
- api->ipc = crm_ipc_new(pcmk_ipc_name(api, false),
- api->ipc_size_max);
+ api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), api->ipc_size_max);
if (api->ipc == NULL) {
- crm_err("Failed to re-create IPC API");
return ENOMEM;
}
}
if (crm_ipc_connected(api->ipc)) {
- crm_trace("Already connected to %s IPC API", pcmk_ipc_name(api, true));
+ crm_trace("Already connected to %s", pcmk_ipc_name(api, true));
return pcmk_rc_ok;
}
api->dispatch_type = dispatch_type;
- for (int i = 0; i < n_attempts; i++) {
+ crm_debug("Attempting connection to %s (up to %d time%s)",
+ pcmk_ipc_name(api, true), attempts, pcmk__plural_s(attempts));
+ for (int remaining = attempts - 1; remaining >= 0; --remaining) {
switch (dispatch_type) {
case pcmk_ipc_dispatch_main:
rc = connect_with_main_loop(api);
@@ -524,17 +535,15 @@ pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
break;
}
- if (rc != EAGAIN) {
- break;
+ if ((remaining == 0) || ((rc != EAGAIN) && (rc != EALREADY))) {
+ break; // Result is final
}
- /* EAGAIN may occur due to interruption by a signal or due to some
- * transient issue. Try one more time to be more resilient.
- */
- if (i < (n_attempts - 1)) {
- crm_trace("Connection to %s IPC API failed with EAGAIN, retrying",
- pcmk_ipc_name(api, true));
- }
+ // Retry after soft error (interrupted by signal, etc.)
+ pcmk__sleep_ms((attempts - remaining) * 500);
+ crm_debug("Re-attempting connection to %s (%d attempt%s remaining)",
+ pcmk_ipc_name(api, true), remaining,
+ pcmk__plural_s(remaining));
}
if (rc != pcmk_rc_ok) {
@@ -551,6 +560,26 @@ pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
}
/*!
+ * \brief Connect to a Pacemaker daemon via IPC
+ *
+ * \param[in,out] api IPC API instance
+ * \param[in] dispatch_type How IPC replies should be dispatched
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
+{
+ int rc = pcmk__connect_ipc(api, dispatch_type, 2);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Connection to %s failed: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ }
+ return rc;
+}
+
+/*!
* \brief Disconnect an IPC API instance
*
* \param[in,out] api IPC API connection
@@ -628,7 +657,7 @@ pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb,
* \brief Send an XML request across an IPC API connection
*
* \param[in,out] api IPC API connection
- * \param[in,out] request XML request to send
+ * \param[in] request XML request to send
*
* \return Standard Pacemaker return code
*
@@ -636,7 +665,7 @@ pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb,
* requests, because it handles different dispatch types appropriately.
*/
int
-pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request)
+pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request)
{
int rc;
xmlNode *reply = NULL;
@@ -855,6 +884,77 @@ crm_ipc_new(const char *name, size_t max_size)
}
/*!
+ * \internal
+ * \brief Connect a generic (not daemon-specific) IPC object
+ *
+ * \param[in,out] ipc Generic IPC object to connect
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk__connect_generic_ipc(crm_ipc_t *ipc)
+{
+ uid_t cl_uid = 0;
+ gid_t cl_gid = 0;
+ pid_t found_pid = 0;
+ uid_t found_uid = 0;
+ gid_t found_gid = 0;
+ int rc = pcmk_rc_ok;
+
+ if (ipc == NULL) {
+ return EINVAL;
+ }
+
+ ipc->need_reply = FALSE;
+ ipc->ipc = qb_ipcc_connect(ipc->server_name, ipc->buf_size);
+ if (ipc->ipc == NULL) {
+ return errno;
+ }
+
+ rc = qb_ipcc_fd_get(ipc->ipc, &ipc->pfd.fd);
+ if (rc < 0) { // -errno
+ crm_ipc_close(ipc);
+ return -rc;
+ }
+
+ rc = pcmk_daemon_user(&cl_uid, &cl_gid);
+ rc = pcmk_legacy2rc(rc);
+ if (rc != pcmk_rc_ok) {
+ crm_ipc_close(ipc);
+ return rc;
+ }
+
+ rc = is_ipc_provider_expected(ipc->ipc, ipc->pfd.fd, cl_uid, cl_gid,
+ &found_pid, &found_uid, &found_gid);
+ if (rc != pcmk_rc_ok) {
+ if (rc == pcmk_rc_ipc_unauthorized) {
+ crm_info("%s IPC provider authentication failed: process %lld has "
+ "uid %lld (expected %lld) and gid %lld (expected %lld)",
+ ipc->server_name,
+ (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
+ (long long) found_uid, (long long) cl_uid,
+ (long long) found_gid, (long long) cl_gid);
+ }
+ crm_ipc_close(ipc);
+ return rc;
+ }
+
+ ipc->max_buf_size = qb_ipcc_get_buffer_size(ipc->ipc);
+ if (ipc->max_buf_size > ipc->buf_size) {
+ free(ipc->buffer);
+ ipc->buffer = calloc(ipc->max_buf_size, sizeof(char));
+ if (ipc->buffer == NULL) {
+ rc = errno;
+ crm_ipc_close(ipc);
+ return rc;
+ }
+ ipc->buf_size = ipc->max_buf_size;
+ }
+
+ return pcmk_rc_ok;
+}
+
+/*!
* \brief Establish an IPC connection to a Pacemaker component
*
* \param[in,out] client Connection instance obtained from crm_ipc_new()
@@ -866,76 +966,26 @@ crm_ipc_new(const char *name, size_t max_size)
bool
crm_ipc_connect(crm_ipc_t *client)
{
- uid_t cl_uid = 0;
- gid_t cl_gid = 0;
- pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0;
- int rv;
+ int rc = pcmk__connect_generic_ipc(client);
- if (client == NULL) {
- errno = EINVAL;
- return false;
+ if (rc == pcmk_rc_ok) {
+ return true;
}
-
- client->need_reply = FALSE;
- client->ipc = qb_ipcc_connect(client->server_name, client->buf_size);
-
- if (client->ipc == NULL) {
+ if ((client != NULL) && (client->ipc == NULL)) {
+ errno = (rc > 0)? rc : ENOTCONN;
crm_debug("Could not establish %s IPC connection: %s (%d)",
client->server_name, pcmk_rc_str(errno), errno);
- return false;
- }
-
- client->pfd.fd = crm_ipc_get_fd(client);
- if (client->pfd.fd < 0) {
- rv = errno;
- /* message already omitted */
- crm_ipc_close(client);
- errno = rv;
- return false;
- }
-
- rv = pcmk_daemon_user(&cl_uid, &cl_gid);
- if (rv < 0) {
- /* message already omitted */
- crm_ipc_close(client);
- errno = -rv;
- return false;
- }
-
- if ((rv = pcmk__crm_ipc_is_authentic_process(client->ipc, client->pfd.fd, cl_uid, cl_gid,
- &found_pid, &found_uid,
- &found_gid)) == pcmk_rc_ipc_unauthorized) {
- crm_err("%s IPC provider authentication failed: process %lld has "
- "uid %lld (expected %lld) and gid %lld (expected %lld)",
- client->server_name,
- (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
- (long long) found_uid, (long long) cl_uid,
- (long long) found_gid, (long long) cl_gid);
- crm_ipc_close(client);
+ } else if (rc == pcmk_rc_ipc_unauthorized) {
+ crm_err("%s IPC provider authentication failed",
+ (client == NULL)? "Pacemaker" : client->server_name);
errno = ECONNABORTED;
- return false;
-
- } else if (rv != pcmk_rc_ok) {
- crm_perror(LOG_ERR, "Could not verify authenticity of %s IPC provider",
- client->server_name);
- crm_ipc_close(client);
- if (rv > 0) {
- errno = rv;
- } else {
- errno = ENOTCONN;
- }
- return false;
- }
-
- qb_ipcc_context_set(client->ipc, client);
-
- client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc);
- if (client->max_buf_size > client->buf_size) {
- free(client->buffer);
- client->buffer = calloc(1, client->max_buf_size);
- client->buf_size = client->max_buf_size;
+ } else {
+ crm_perror(LOG_ERR,
+ "Could not verify authenticity of %s IPC provider",
+ (client == NULL)? "Pacemaker" : client->server_name);
+ errno = ENOTCONN;
}
- return true;
+ return false;
}
void
@@ -977,18 +1027,40 @@ crm_ipc_destroy(crm_ipc_t * client)
}
}
+/*!
+ * \internal
+ * \brief Get the file descriptor for a generic IPC object
+ *
+ * \param[in,out] ipc Generic IPC object to get file descriptor for
+ * \param[out] fd Where to store file descriptor
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk__ipc_fd(crm_ipc_t *ipc, int *fd)
+{
+ if ((ipc == NULL) || (fd == NULL)) {
+ return EINVAL;
+ }
+ if ((ipc->ipc == NULL) || (ipc->pfd.fd < 0)) {
+ return ENOTCONN;
+ }
+ *fd = ipc->pfd.fd;
+ return pcmk_rc_ok;
+}
+
int
crm_ipc_get_fd(crm_ipc_t * client)
{
- int fd = 0;
+ int fd = -1;
- if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) {
- return fd;
+ if (pcmk__ipc_fd(client, &fd) != pcmk_rc_ok) {
+ crm_err("Could not obtain file descriptor for %s IPC",
+ ((client == NULL)? "unspecified" : client->server_name));
+ errno = EINVAL;
+ return -EINVAL;
}
- errno = EINVAL;
- crm_perror(LOG_ERR, "Could not obtain file descriptor for %s IPC",
- (client? client->server_name : "unspecified"));
- return -errno;
+ return fd;
}
bool
@@ -1057,12 +1129,13 @@ crm_ipc_decompress(crm_ipc_t * client)
rc = BZ2_bzBuffToBuffDecompress(uncompressed + sizeof(pcmk__ipc_header_t), &size_u,
client->buffer + sizeof(pcmk__ipc_header_t), header->size_compressed, 1, 0);
+ rc = pcmk__bzlib2rc(rc);
- if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d",
+ pcmk_rc_str(rc), rc);
free(uncompressed);
- return EILSEQ;
+ return rc;
}
/*
@@ -1221,7 +1294,7 @@ internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout,
* \brief Send an IPC XML message
*
* \param[in,out] client Connection to IPC server
- * \param[in,out] message XML message to send
+ * \param[in] message XML message to send
* \param[in] flags Bitmask of crm_ipc_flags
* \param[in] ms_timeout Give up if not sent within this much time
* (5 seconds if 0, or no timeout if negative)
@@ -1231,8 +1304,8 @@ internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout,
* if reply was needed, otherwise number of bytes sent
*/
int
-crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout,
- xmlNode ** reply)
+crm_ipc_send(crm_ipc_t *client, const xmlNode *message,
+ enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode **reply)
{
int rc = 0;
ssize_t qb_rc = 0;
@@ -1385,89 +1458,129 @@ crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, in
return rc;
}
-int
-pcmk__crm_ipc_is_authentic_process(qb_ipcc_connection_t *qb_ipc, int sock, uid_t refuid, gid_t refgid,
- pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
+/*!
+ * \brief Ensure an IPC provider has expected user or group
+ *
+ * \param[in] qb_ipc libqb client connection if available
+ * \param[in] sock Connected Unix socket for IPC
+ * \param[in] refuid Expected user ID
+ * \param[in] refgid Expected group ID
+ * \param[out] gotpid If not NULL, where to store provider's actual process ID
+ * (or 1 on platforms where ID is not available)
+ * \param[out] gotuid If not NULL, where to store provider's actual user ID
+ * \param[out] gotgid If not NULL, where to store provider's actual group ID
+ *
+ * \return Standard Pacemaker return code
+ * \note An actual user ID of 0 (root) will always be considered authorized,
+ * regardless of the expected values provided. The caller can use the
+ * output arguments to be stricter than this function.
+ */
+static int
+is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock,
+ uid_t refuid, gid_t refgid,
+ pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
{
- int ret = 0;
- pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0;
-#if defined(HAVE_UCRED)
- struct ucred ucred;
- socklen_t ucred_len = sizeof(ucred);
-#endif
+ int rc = EOPNOTSUPP;
+ pid_t found_pid = 0;
+ uid_t found_uid = 0;
+ gid_t found_gid = 0;
#ifdef HAVE_QB_IPCC_AUTH_GET
- if (qb_ipc && !qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid)) {
- goto do_checks;
+ if (qb_ipc != NULL) {
+ rc = qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid);
+ rc = -rc; // libqb returns 0 or -errno
+ if (rc == pcmk_rc_ok) {
+ goto found;
+ }
}
#endif
-#if defined(HAVE_UCRED)
- if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED,
- &ucred, &ucred_len)
- && ucred_len == sizeof(ucred)) {
- found_pid = ucred.pid; found_uid = ucred.uid; found_gid = ucred.gid;
+#ifdef HAVE_UCRED
+ {
+ struct ucred ucred;
+ socklen_t ucred_len = sizeof(ucred);
-#elif defined(HAVE_SOCKPEERCRED)
- struct sockpeercred sockpeercred;
- socklen_t sockpeercred_len = sizeof(sockpeercred);
-
- if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED,
- &sockpeercred, &sockpeercred_len)
- && sockpeercred_len == sizeof(sockpeercred_len)) {
- found_pid = sockpeercred.pid;
- found_uid = sockpeercred.uid; found_gid = sockpeercred.gid;
+ if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len) < 0) {
+ rc = errno;
+ } else if (ucred_len != sizeof(ucred)) {
+ rc = EOPNOTSUPP;
+ } else {
+ found_pid = ucred.pid;
+ found_uid = ucred.uid;
+ found_gid = ucred.gid;
+ goto found;
+ }
+ }
+#endif
-#elif defined(HAVE_GETPEEREID)
- if (!getpeereid(sock, &found_uid, &found_gid)) {
- found_pid = PCMK__SPECIAL_PID; /* cannot obtain PID (FreeBSD) */
+#ifdef HAVE_SOCKPEERCRED
+ {
+ struct sockpeercred sockpeercred;
+ socklen_t sockpeercred_len = sizeof(sockpeercred);
-#elif defined(HAVE_GETPEERUCRED)
- ucred_t *ucred;
- if (!getpeerucred(sock, &ucred)) {
- errno = 0;
- found_pid = ucred_getpid(ucred);
- found_uid = ucred_geteuid(ucred); found_gid = ucred_getegid(ucred);
- ret = -errno;
- ucred_free(ucred);
- if (ret) {
- return (ret < 0) ? ret : -pcmk_err_generic;
+ if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED,
+ &sockpeercred, &sockpeercred_len) < 0) {
+ rc = errno;
+ } else if (sockpeercred_len != sizeof(sockpeercred)) {
+ rc = EOPNOTSUPP;
+ } else {
+ found_pid = sockpeercred.pid;
+ found_uid = sockpeercred.uid;
+ found_gid = sockpeercred.gid;
+ goto found;
}
-
-#else
-# error "No way to authenticate a Unix socket peer"
- errno = 0;
- if (0) {
+ }
#endif
-#ifdef HAVE_QB_IPCC_AUTH_GET
- do_checks:
+
+#ifdef HAVE_GETPEEREID // For example, FreeBSD
+ if (getpeereid(sock, &found_uid, &found_gid) < 0) {
+ rc = errno;
+ } else {
+ found_pid = PCMK__SPECIAL_PID;
+ goto found;
+ }
#endif
- if (gotpid != NULL) {
- *gotpid = found_pid;
- }
- if (gotuid != NULL) {
- *gotuid = found_uid;
- }
- if (gotgid != NULL) {
- *gotgid = found_gid;
- }
- if (found_uid == 0 || found_uid == refuid || found_gid == refgid) {
- ret = 0;
+
+#ifdef HAVE_GETPEERUCRED
+ {
+ ucred_t *ucred = NULL;
+
+ if (getpeerucred(sock, &ucred) < 0) {
+ rc = errno;
} else {
- ret = pcmk_rc_ipc_unauthorized;
+ found_pid = ucred_getpid(ucred);
+ found_uid = ucred_geteuid(ucred);
+ found_gid = ucred_getegid(ucred);
+ ucred_free(ucred);
+ goto found;
}
- } else {
- ret = (errno > 0) ? errno : pcmk_rc_error;
}
- return ret;
+#endif
+
+ return rc; // If we get here, nothing succeeded
+
+found:
+ if (gotpid != NULL) {
+ *gotpid = found_pid;
+ }
+ if (gotuid != NULL) {
+ *gotuid = found_uid;
+ }
+ if (gotgid != NULL) {
+ *gotgid = found_gid;
+ }
+ if ((found_uid != 0) && (found_uid != refuid) && (found_gid != refgid)) {
+ return pcmk_rc_ipc_unauthorized;
+ }
+ return pcmk_rc_ok;
}
int
crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid,
pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
{
- int ret = pcmk__crm_ipc_is_authentic_process(NULL, sock, refuid, refgid,
- gotpid, gotuid, gotgid);
+ int ret = is_ipc_provider_expected(NULL, sock, refuid, refgid,
+ gotpid, gotuid, gotgid);
/* The old function had some very odd return codes*/
if (ret == 0) {
@@ -1528,8 +1641,8 @@ pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid,
goto bail;
}
- auth_rc = pcmk__crm_ipc_is_authentic_process(c, fd, refuid, refgid, &found_pid,
- &found_uid, &found_gid);
+ auth_rc = is_ipc_provider_expected(c, fd, refuid, refgid,
+ &found_pid, &found_uid, &found_gid);
if (auth_rc == pcmk_rc_ipc_unauthorized) {
crm_err("Daemon (IPC %s) effectively blocked with unauthorized"
" process %lld (uid: %lld, gid: %lld)",
diff --git a/lib/common/ipc_common.c b/lib/common/ipc_common.c
index d0c0636..a48b0e9 100644
--- a/lib/common/ipc_common.c
+++ b/lib/common/ipc_common.c
@@ -35,7 +35,7 @@ pcmk__ipc_buffer_size(unsigned int max)
if (global_max == 0) {
long long global_ll;
- if ((pcmk__scan_ll(getenv("PCMK_ipc_buffer"), &global_ll,
+ if ((pcmk__scan_ll(pcmk__env_option(PCMK__ENV_IPC_BUFFER), &global_ll,
0LL) != pcmk_rc_ok)
|| (global_ll <= 0)) {
global_max = MAX_MSG_SIZE; // Default for unset or invalid
diff --git a/lib/common/ipc_controld.c b/lib/common/ipc_controld.c
index 9303afd..8e2016e 100644
--- a/lib/common/ipc_controld.c
+++ b/lib/common/ipc_controld.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -135,7 +135,7 @@ set_node_info_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data)
data->data.node_info.uuid = crm_element_value(msg_data, XML_ATTR_ID);
data->data.node_info.uname = crm_element_value(msg_data, XML_ATTR_UNAME);
- data->data.node_info.state = crm_element_value(msg_data, XML_NODE_IS_PEER);
+ data->data.node_info.state = crm_element_value(msg_data, PCMK__XA_CRMD);
}
static void
@@ -169,26 +169,24 @@ set_nodes_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data)
node_info->id = id_ll;
}
node_info->uname = crm_element_value(node, XML_ATTR_UNAME);
- node_info->state = crm_element_value(node, XML_NODE_IN_CLUSTER);
+ node_info->state = crm_element_value(node, PCMK__XA_IN_CCM);
data->data.nodes = g_list_prepend(data->data.nodes, node_info);
}
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
- const char *command = crm_element_value(request, F_CRM_TASK);
-
- if (command == NULL) {
- return false;
- }
-
- // We only need to handle commands that functions in this file can send
- return !strcmp(command, CRM_OP_REPROBE)
- || !strcmp(command, CRM_OP_NODE_INFO)
- || !strcmp(command, CRM_OP_PING)
- || !strcmp(command, CRM_OP_LRM_FAIL)
- || !strcmp(command, CRM_OP_LRM_DELETE);
+ // We only need to handle commands that API functions can send
+ return pcmk__str_any_of(crm_element_value(request, F_CRM_TASK),
+ PCMK__CONTROLD_CMD_NODES,
+ CRM_OP_LRM_DELETE,
+ CRM_OP_LRM_FAIL,
+ CRM_OP_NODE_INFO,
+ CRM_OP_PING,
+ CRM_OP_REPROBE,
+ CRM_OP_RM_NODE_CACHE,
+ NULL);
}
static bool
@@ -202,22 +200,12 @@ dispatch(pcmk_ipc_api_t *api, xmlNode *reply)
pcmk_controld_reply_unknown, NULL, NULL,
};
- /* If we got an ACK, return true so the caller knows to expect more responses
- * from the IPC server. We do this before decrementing replies_expected because
- * ACKs are not going to be included in that value.
- *
- * Note that we cannot do the same kind of status checking here that we do in
- * ipc_pacemakerd.c. The ACK message we receive does not necessarily contain
- * a status attribute. That is, we may receive this:
- *
- * <ack function="crmd_remote_proxy_cb" line="556"/>
- *
- * Instead of this:
- *
- * <ack function="dispatch_controller_ipc" line="391" status="112"/>
- */
- if (pcmk__str_eq(crm_element_name(reply), "ack", pcmk__str_none)) {
- return true; // More replies needed
+ if (pcmk__xe_is(reply, "ack")) {
+ /* ACKs are trivial responses that do not count toward expected replies,
+ * and do not have all the fields that validation requires, so skip that
+ * processing.
+ */
+ return private->replies_expected > 0;
}
if (private->replies_expected > 0) {
@@ -341,21 +329,18 @@ create_controller_request(const pcmk_ipc_api_t *api, const char *op,
// \return Standard Pacemaker return code
static int
-send_controller_request(pcmk_ipc_api_t *api, xmlNode *request,
+send_controller_request(pcmk_ipc_api_t *api, const xmlNode *request,
bool reply_is_expected)
{
- int rc;
-
if (crm_element_value(request, XML_ATTR_REFERENCE) == NULL) {
return EINVAL;
}
- rc = pcmk__send_ipc_request(api, request);
- if ((rc == pcmk_rc_ok) && reply_is_expected) {
+ if (reply_is_expected) {
struct controld_api_private_s *private = api->api_data;
private->replies_expected++;
}
- return rc;
+ return pcmk__send_ipc_request(api, request);
}
static xmlNode *
diff --git a/lib/common/ipc_pacemakerd.c b/lib/common/ipc_pacemakerd.c
index 91a3143..2f03709 100644
--- a/lib/common/ipc_pacemakerd.c
+++ b/lib/common/ipc_pacemakerd.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -178,7 +178,7 @@ post_disconnect(pcmk_ipc_api_t *api)
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
const char *command = crm_element_value(request, F_CRM_TASK);
diff --git a/lib/common/ipc_schedulerd.c b/lib/common/ipc_schedulerd.c
index c1b81a4..cf788e5 100644
--- a/lib/common/ipc_schedulerd.c
+++ b/lib/common/ipc_schedulerd.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -62,7 +62,7 @@ post_connect(pcmk_ipc_api_t *api)
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
const char *command = crm_element_value(request, F_CRM_TASK);
diff --git a/lib/common/ipc_server.c b/lib/common/ipc_server.c
index 60f20fb..5cd7e70 100644
--- a/lib/common/ipc_server.c
+++ b/lib/common/ipc_server.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -421,9 +421,11 @@ pcmk__client_data2xml(pcmk__client_t *c, void *data, uint32_t *id,
rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0);
text = uncompressed;
- if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d",
+ pcmk_rc_str(rc), rc);
free(uncompressed);
return NULL;
}
@@ -568,16 +570,16 @@ crm_ipcs_flush_events(pcmk__client_t *c)
* \internal
* \brief Create an I/O vector for sending an IPC XML message
*
- * \param[in] request Identifier for libqb response header
- * \param[in,out] message XML message to send
- * \param[in] max_send_size If 0, default IPC buffer size is used
- * \param[out] result Where to store prepared I/O vector
- * \param[out] bytes Size of prepared data in bytes
+ * \param[in] request Identifier for libqb response header
+ * \param[in] message XML message to send
+ * \param[in] max_send_size If 0, default IPC buffer size is used
+ * \param[out] result Where to store prepared I/O vector
+ * \param[out] bytes Size of prepared data in bytes
*
* \return Standard Pacemaker return code
*/
int
-pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message,
+pcmk__ipc_prepare_iov(uint32_t request, const xmlNode *message,
uint32_t max_send_size, struct iovec **result,
ssize_t *bytes)
{
@@ -741,7 +743,7 @@ pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags)
}
int
-pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message,
+pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, const xmlNode *message,
uint32_t flags)
{
struct iovec *iov = NULL;
@@ -819,6 +821,7 @@ pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c,
if (ack != NULL) {
crm_trace("Ack'ing IPC message from client %s as <%s status=%d>",
pcmk__client_name(c), tag, status);
+ crm_log_xml_trace(ack, "sent-ack");
c->request_id = 0;
rc = pcmk__ipc_send_xml(c, request, ack, flags);
free_xml(ack);
@@ -995,14 +998,17 @@ pcmk__serve_schedulerd_ipc(struct qb_ipcs_service_handlers *cb)
bool
crm_is_daemon_name(const char *name)
{
- name = pcmk__message_name(name);
- return (!strcmp(name, CRM_SYSTEM_CRMD)
- || !strcmp(name, CRM_SYSTEM_STONITHD)
- || !strcmp(name, "stonith-ng")
- || !strcmp(name, "attrd")
- || !strcmp(name, CRM_SYSTEM_CIB)
- || !strcmp(name, CRM_SYSTEM_MCP)
- || !strcmp(name, CRM_SYSTEM_DC)
- || !strcmp(name, CRM_SYSTEM_TENGINE)
- || !strcmp(name, CRM_SYSTEM_LRMD));
+ return pcmk__str_any_of(pcmk__message_name(name),
+ "attrd",
+ CRM_SYSTEM_CIB,
+ CRM_SYSTEM_CRMD,
+ CRM_SYSTEM_DC,
+ CRM_SYSTEM_LRMD,
+ CRM_SYSTEM_MCP,
+ CRM_SYSTEM_PENGINE,
+ CRM_SYSTEM_STONITHD,
+ CRM_SYSTEM_TENGINE,
+ "pacemaker-remoted",
+ "stonith-ng",
+ NULL);
}
diff --git a/lib/common/iso8601.c b/lib/common/iso8601.c
index 3e000e1..9de018f 100644
--- a/lib/common/iso8601.c
+++ b/lib/common/iso8601.c
@@ -1930,9 +1930,10 @@ pcmk__readable_interval(guint interval_ms)
#define MS_IN_H (MS_IN_M * 60)
#define MS_IN_D (MS_IN_H * 24)
#define MAXSTR sizeof("..d..h..m..s...ms")
- static char str[MAXSTR] = { '\0', };
+ static char str[MAXSTR];
int offset = 0;
+ str[0] = '\0';
if (interval_ms > MS_IN_D) {
offset += snprintf(str + offset, MAXSTR - offset, "%ud",
interval_ms / MS_IN_D);
diff --git a/lib/common/logging.c b/lib/common/logging.c
index dded873..7768c35 100644
--- a/lib/common/logging.c
+++ b/lib/common/logging.c
@@ -51,6 +51,11 @@ static unsigned int crm_log_priority = LOG_NOTICE;
static GLogFunc glib_log_default = NULL;
static pcmk__output_t *logger_out = NULL;
+pcmk__config_error_func pcmk__config_error_handler = NULL;
+pcmk__config_warning_func pcmk__config_warning_handler = NULL;
+void *pcmk__config_error_context = NULL;
+void *pcmk__config_warning_context = NULL;
+
static gboolean crm_tracing_enabled(void);
static void
@@ -237,7 +242,7 @@ chown_logfile(const char *filename, int logfd)
static void
chmod_logfile(const char *filename, int logfd)
{
- const char *modestr = getenv("PCMK_logfile_mode");
+ const char *modestr = pcmk__env_option(PCMK__ENV_LOGFILE_MODE);
mode_t filemode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
if (modestr != NULL) {
@@ -297,7 +302,7 @@ setenv_logfile(const char *filename)
{
// Some resource agents will log only if environment variable is set
if (pcmk__env_option(PCMK__ENV_LOGFILE) == NULL) {
- pcmk__set_env_option(PCMK__ENV_LOGFILE, filename);
+ pcmk__set_env_option(PCMK__ENV_LOGFILE, filename, true);
}
}
@@ -609,6 +614,20 @@ crm_log_filter_source(int source, const char *trace_files, const char *trace_fns
}
}
+#ifndef HAVE_STRCHRNUL
+/* strchrnul() is a GNU extension. If not present, use our own definition.
+ * The GNU version returns char*, but we only need it to be const char*.
+ */
+static const char *
+strchrnul(const char *s, int c)
+{
+ while ((*s != c) && (*s != '\0')) {
+ ++s;
+ }
+ return s;
+}
+#endif
+
static void
crm_log_filter(struct qb_log_callsite *cs)
{
@@ -622,11 +641,11 @@ crm_log_filter(struct qb_log_callsite *cs)
if (need_init) {
need_init = 0;
- trace_fns = getenv("PCMK_trace_functions");
- trace_fmts = getenv("PCMK_trace_formats");
- trace_tags = getenv("PCMK_trace_tags");
- trace_files = getenv("PCMK_trace_files");
- trace_blackbox = getenv("PCMK_trace_blackbox");
+ trace_fns = pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS);
+ trace_fmts = pcmk__env_option(PCMK__ENV_TRACE_FORMATS);
+ trace_tags = pcmk__env_option(PCMK__ENV_TRACE_TAGS);
+ trace_files = pcmk__env_option(PCMK__ENV_TRACE_FILES);
+ trace_blackbox = pcmk__env_option(PCMK__ENV_TRACE_BLACKBOX);
if (trace_tags != NULL) {
uint32_t tag;
@@ -695,8 +714,10 @@ crm_update_callsites(void)
log = FALSE;
crm_debug
("Enabling callsites based on priority=%d, files=%s, functions=%s, formats=%s, tags=%s",
- crm_log_level, getenv("PCMK_trace_files"), getenv("PCMK_trace_functions"),
- getenv("PCMK_trace_formats"), getenv("PCMK_trace_tags"));
+ crm_log_level, pcmk__env_option(PCMK__ENV_TRACE_FILES),
+ pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS),
+ pcmk__env_option(PCMK__ENV_TRACE_FORMATS),
+ pcmk__env_option(PCMK__ENV_TRACE_TAGS));
}
qb_log_filter_fn_set(crm_log_filter);
}
@@ -704,13 +725,11 @@ crm_update_callsites(void)
static gboolean
crm_tracing_enabled(void)
{
- if (crm_log_level == LOG_TRACE) {
- return TRUE;
- } else if (getenv("PCMK_trace_files") || getenv("PCMK_trace_functions")
- || getenv("PCMK_trace_formats") || getenv("PCMK_trace_tags")) {
- return TRUE;
- }
- return FALSE;
+ return (crm_log_level == LOG_TRACE)
+ || (pcmk__env_option(PCMK__ENV_TRACE_FILES) != NULL)
+ || (pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS) != NULL)
+ || (pcmk__env_option(PCMK__ENV_TRACE_FORMATS) != NULL)
+ || (pcmk__env_option(PCMK__ENV_TRACE_TAGS) != NULL);
}
static int
@@ -784,7 +803,8 @@ set_identity(const char *entity, int argc, char *const *argv)
CRM_ASSERT(crm_system_name != NULL);
- setenv("PCMK_service", crm_system_name, 1);
+ // Used by fencing.py.py (in fence-agents)
+ pcmk__set_env_option(PCMK__ENV_SERVICE, crm_system_name, false);
}
void
@@ -897,7 +917,7 @@ crm_log_init(const char *entity, uint8_t level, gboolean daemon, gboolean to_std
} else {
facility = PCMK__VALUE_NONE;
}
- pcmk__set_env_option(PCMK__ENV_LOGFACILITY, facility);
+ pcmk__set_env_option(PCMK__ENV_LOGFACILITY, facility, true);
}
if (pcmk__str_eq(facility, PCMK__VALUE_NONE, pcmk__str_casei)) {
@@ -1127,16 +1147,21 @@ pcmk__cli_init_logging(const char *name, unsigned int verbosity)
/*!
* \brief Log XML line-by-line in a formatted fashion
*
- * \param[in] level Priority at which to log the messages
- * \param[in] text Prefix for each line
- * \param[in] xml XML to log
+ * \param[in] file File name to use for log filtering
+ * \param[in] function Function name to use for log filtering
+ * \param[in] line Line number to use for log filtering
+ * \param[in] tags Logging tags to use for log filtering
+ * \param[in] level Priority at which to log the messages
+ * \param[in] text Prefix for each line
+ * \param[in] xml XML to log
*
* \note This does nothing when \p level is \p LOG_STDOUT.
* \note Do not call this function directly. It should be called only from the
* \p do_crm_log_xml() macro.
*/
void
-pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml)
+pcmk_log_xml_as(const char *file, const char *function, uint32_t line,
+ uint32_t tags, uint8_t level, const char *text, const xmlNode *xml)
{
if (xml == NULL) {
do_crm_log(level, "%s%sNo data to dump as XML",
@@ -1148,12 +1173,76 @@ pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml)
}
pcmk__output_set_log_level(logger_out, level);
+ pcmk__output_set_log_filter(logger_out, file, function, line, tags);
pcmk__xml_show(logger_out, text, xml, 1,
pcmk__xml_fmt_pretty
|pcmk__xml_fmt_open
|pcmk__xml_fmt_children
|pcmk__xml_fmt_close);
+ pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Log XML changes line-by-line in a formatted fashion
+ *
+ * \param[in] file File name to use for log filtering
+ * \param[in] function Function name to use for log filtering
+ * \param[in] line Line number to use for log filtering
+ * \param[in] tags Logging tags to use for log filtering
+ * \param[in] level Priority at which to log the messages
+ * \param[in] xml XML whose changes to log
+ *
+ * \note This does nothing when \p level is \c LOG_STDOUT.
+ */
+void
+pcmk__log_xml_changes_as(const char *file, const char *function, uint32_t line,
+ uint32_t tags, uint8_t level, const xmlNode *xml)
+{
+ if (xml == NULL) {
+ do_crm_log(level, "No XML to dump");
+ return;
+ }
+
+ if (logger_out == NULL) {
+ CRM_CHECK(pcmk__log_output_new(&logger_out) == pcmk_rc_ok, return);
}
+ pcmk__output_set_log_level(logger_out, level);
+ pcmk__output_set_log_filter(logger_out, file, function, line, tags);
+ pcmk__xml_show_changes(logger_out, xml);
+ pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U);
+}
+
+/*!
+ * \internal
+ * \brief Log an XML patchset line-by-line in a formatted fashion
+ *
+ * \param[in] file File name to use for log filtering
+ * \param[in] function Function name to use for log filtering
+ * \param[in] line Line number to use for log filtering
+ * \param[in] tags Logging tags to use for log filtering
+ * \param[in] level Priority at which to log the messages
+ * \param[in] patchset XML patchset to log
+ *
+ * \note This does nothing when \p level is \c LOG_STDOUT.
+ */
+void
+pcmk__log_xml_patchset_as(const char *file, const char *function, uint32_t line,
+ uint32_t tags, uint8_t level, const xmlNode *patchset)
+{
+ if (patchset == NULL) {
+ do_crm_log(level, "No patchset to dump");
+ return;
+ }
+
+ if (logger_out == NULL) {
+ CRM_CHECK(pcmk__log_output_new(&logger_out) == pcmk_rc_ok, return);
+ }
+ pcmk__output_set_log_level(logger_out, level);
+ pcmk__output_set_log_filter(logger_out, file, function, line, tags);
+ logger_out->message(logger_out, "xml-patchset", patchset);
+ pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U);
}
/*!
@@ -1188,5 +1277,23 @@ crm_add_logfile(const char *filename)
return pcmk__add_logfile(filename) == pcmk_rc_ok;
}
+void
+pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml)
+{
+ pcmk_log_xml_as(__FILE__, __func__, __LINE__, 0, level, text, xml);
+}
+
// LCOV_EXCL_STOP
// End deprecated API
+
+void pcmk__set_config_error_handler(pcmk__config_error_func error_handler, void *error_context)
+{
+ pcmk__config_error_handler = error_handler;
+ pcmk__config_error_context = error_context;
+}
+
+void pcmk__set_config_warning_handler(pcmk__config_warning_func warning_handler, void *warning_context)
+{
+ pcmk__config_warning_handler = warning_handler;
+ pcmk__config_warning_context = warning_context;
+} \ No newline at end of file
diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c
index 3124e43..f971713 100644
--- a/lib/common/mainloop.c
+++ b/lib/common/mainloop.c
@@ -393,16 +393,6 @@ mainloop_add_signal(int sig, void (*dispatch) (int sig))
mainloop_destroy_signal_entry(sig);
return FALSE;
}
-#if 0
- /* If we want signals to interrupt mainloop's poll(), instead of waiting for
- * the timeout, then we should call siginterrupt() below
- *
- * For now, just enforce a low timeout
- */
- if (siginterrupt(sig, 1) < 0) {
- crm_perror(LOG_INFO, "Could not enable system call interruptions for signal %d", sig);
- }
-#endif
return TRUE;
}
@@ -624,7 +614,7 @@ struct qb_ipcs_poll_handlers gio_poll_funcs = {
static enum qb_ipc_type
pick_ipc_type(enum qb_ipc_type requested)
{
- const char *env = getenv("PCMK_ipc_type");
+ const char *env = pcmk__env_option(PCMK__ENV_IPC_TYPE);
if (env && strcmp("shared-mem", env) == 0) {
return QB_IPC_SHM;
@@ -668,7 +658,8 @@ mainloop_add_ipc_server_with_prio(const char *name, enum qb_ipc_type type,
server = qb_ipcs_create(name, 0, pick_ipc_type(type), callbacks);
if (server == NULL) {
- crm_err("Could not create %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc);
+ crm_err("Could not create %s IPC server: %s (%d)",
+ name, pcmk_rc_str(errno), errno);
return NULL;
}
@@ -874,21 +865,34 @@ pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata,
const struct ipc_client_callbacks *callbacks,
mainloop_io_t **source)
{
+ int rc = pcmk_rc_ok;
+ int fd = -1;
+ const char *ipc_name = NULL;
+
CRM_CHECK((ipc != NULL) && (callbacks != NULL), return EINVAL);
- if (!crm_ipc_connect(ipc)) {
- int rc = errno;
- crm_debug("Connection to %s failed: %d", crm_ipc_name(ipc), errno);
+ ipc_name = pcmk__s(crm_ipc_name(ipc), "Pacemaker");
+ rc = pcmk__connect_generic_ipc(ipc);
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Connection to %s failed: %s", ipc_name, pcmk_rc_str(rc));
return rc;
}
- *source = mainloop_add_fd(crm_ipc_name(ipc), priority, crm_ipc_get_fd(ipc),
- userdata, NULL);
- if (*source == NULL) {
- int rc = errno;
+ rc = pcmk__ipc_fd(ipc, &fd);
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Could not obtain file descriptor for %s IPC: %s",
+ ipc_name, pcmk_rc_str(rc));
crm_ipc_close(ipc);
return rc;
}
+
+ *source = mainloop_add_fd(ipc_name, priority, fd, userdata, NULL);
+ if (*source == NULL) {
+ rc = errno;
+ crm_ipc_close(ipc);
+ return rc;
+ }
+
(*source)->ipc = ipc;
(*source)->destroy_fn = callbacks->destroy;
(*source)->dispatch_fn_ipc = callbacks->dispatch;
diff --git a/lib/common/mock.c b/lib/common/mock.c
index 2bd8334..6f837ad 100644
--- a/lib/common/mock.c
+++ b/lib/common/mock.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -7,6 +7,8 @@
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
+#include <crm_internal.h>
+
#include <errno.h>
#include <pwd.h>
#include <stdarg.h>
@@ -262,6 +264,8 @@ __wrap_endgrent(void) {
* will_return(__wrap_fopen, errno_to_set);
*
* expect_* functions: https://api.cmocka.org/group__cmocka__param.html
+ *
+ * This has two mocked functions, since fopen() is sometimes actually fopen64().
*/
bool pcmk__mock_fopen = false;
@@ -285,6 +289,26 @@ __wrap_fopen(const char *pathname, const char *mode)
}
}
+#ifdef HAVE_FOPEN64
+FILE *
+__wrap_fopen64(const char *pathname, const char *mode)
+{
+ if (pcmk__mock_fopen) {
+ check_expected_ptr(pathname);
+ check_expected_ptr(mode);
+ errno = mock_type(int);
+
+ if (errno != 0) {
+ return NULL;
+ } else {
+ return __real_fopen64(pathname, mode);
+ }
+
+ } else {
+ return __real_fopen64(pathname, mode);
+ }
+}
+#endif
/* getpwnam_r()
*
diff --git a/lib/common/mock_private.h b/lib/common/mock_private.h
index 45207c4..b0e0ed2 100644
--- a/lib/common/mock_private.h
+++ b/lib/common/mock_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -29,6 +29,10 @@ void *__wrap_calloc(size_t nmemb, size_t size);
extern bool pcmk__mock_fopen;
FILE *__real_fopen(const char *pathname, const char *mode);
FILE *__wrap_fopen(const char *pathname, const char *mode);
+#ifdef HAVE_FOPEN64
+FILE *__real_fopen64(const char *pathname, const char *mode);
+FILE *__wrap_fopen64(const char *pathname, const char *mode);
+#endif
extern bool pcmk__mock_getenv;
char *__real_getenv(const char *name);
diff --git a/lib/common/nvpair.c b/lib/common/nvpair.c
index 3766c45..dbb9c99 100644
--- a/lib/common/nvpair.c
+++ b/lib/common/nvpair.c
@@ -334,55 +334,6 @@ crm_xml_add(xmlNode *node, const char *name, const char *value)
}
/*!
- * \brief Replace an XML attribute with specified name and (possibly NULL) value
- *
- * \param[in,out] node XML node to modify
- * \param[in] name Attribute name to set
- * \param[in] value Attribute value to set
- *
- * \return New value on success, \c NULL otherwise
- * \note This does nothing if node or name is \c NULL or empty.
- */
-const char *
-crm_xml_replace(xmlNode *node, const char *name, const char *value)
-{
- bool dirty = FALSE;
- xmlAttr *attr = NULL;
- const char *old_value = NULL;
-
- CRM_CHECK(node != NULL, return NULL);
- CRM_CHECK(name != NULL && name[0] != 0, return NULL);
-
- old_value = crm_element_value(node, name);
-
- /* Could be re-setting the same value */
- CRM_CHECK(old_value != value, return value);
-
- if (pcmk__check_acl(node, name, pcmk__xf_acl_write) == FALSE) {
- /* Create a fake object linked to doc->_private instead? */
- crm_trace("Cannot replace %s=%s to %s", name, value, node->name);
- return NULL;
-
- } else if (old_value && !value) {
- xml_remove_prop(node, name);
- return NULL;
- }
-
- if (pcmk__tracking_xml_changes(node, FALSE)) {
- if (!old_value || !value || !strcmp(old_value, value)) {
- dirty = TRUE;
- }
- }
-
- attr = xmlSetProp(node, (pcmkXmlStr) name, (pcmkXmlStr) value);
- if (dirty) {
- pcmk__mark_xml_attr_dirty(attr);
- }
- CRM_CHECK(attr && attr->children && attr->children->content, return NULL);
- return (char *) attr->children->content;
-}
-
-/*!
* \brief Create an XML attribute with specified name and integer value
*
* This is like \c crm_xml_add() but taking an integer value.
@@ -503,7 +454,7 @@ crm_element_value(const xmlNode *data, const char *name)
return NULL;
} else if (name == NULL) {
- crm_err("Couldn't find NULL in %s", crm_element_name(data));
+ crm_err("Couldn't find NULL in %s", data->name);
return NULL;
}
@@ -883,7 +834,7 @@ xml2list(const xmlNode *parent)
nvpair_list = find_xml_node(parent, XML_TAG_ATTRS, FALSE);
if (nvpair_list == NULL) {
- crm_trace("No attributes in %s", crm_element_name(parent));
+ crm_trace("No attributes in %s", parent->name);
crm_log_xml_trace(parent, "No attributes for resource op");
}
@@ -988,5 +939,44 @@ pcmk_format_named_time(const char *name, time_t epoch_time)
return result;
}
+const char *
+crm_xml_replace(xmlNode *node, const char *name, const char *value)
+{
+ bool dirty = FALSE;
+ xmlAttr *attr = NULL;
+ const char *old_value = NULL;
+
+ CRM_CHECK(node != NULL, return NULL);
+ CRM_CHECK(name != NULL && name[0] != 0, return NULL);
+
+ old_value = crm_element_value(node, name);
+
+ /* Could be re-setting the same value */
+ CRM_CHECK(old_value != value, return value);
+
+ if (pcmk__check_acl(node, name, pcmk__xf_acl_write) == FALSE) {
+ /* Create a fake object linked to doc->_private instead? */
+ crm_trace("Cannot replace %s=%s to %s", name, value, node->name);
+ return NULL;
+
+ } else if (old_value && !value) {
+ xml_remove_prop(node, name);
+ return NULL;
+ }
+
+ if (pcmk__tracking_xml_changes(node, FALSE)) {
+ if (!old_value || !value || !strcmp(old_value, value)) {
+ dirty = TRUE;
+ }
+ }
+
+ attr = xmlSetProp(node, (pcmkXmlStr) name, (pcmkXmlStr) value);
+ if (dirty) {
+ pcmk__mark_xml_attr_dirty(attr);
+ }
+ CRM_CHECK(attr && attr->children && attr->children->content, return NULL);
+ return (char *) attr->children->content;
+}
+
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/common/options.c b/lib/common/options.c
index cb32b3f..2d86ebc 100644
--- a/lib/common/options.c
+++ b/lib/common/options.c
@@ -91,15 +91,23 @@ pcmk__env_option(const char *option)
/*!
* \brief Set or unset a Pacemaker environment variable option
*
- * Set an environment variable option with both a PCMK_ and (for
- * backward compatibility) HA_ prefix.
+ * Set an environment variable option with a \c "PCMK_" prefix and optionally
+ * an \c "HA_" prefix for backward compatibility.
*
* \param[in] option Environment variable name (without prefix)
* \param[in] value New value (or NULL to unset)
+ * \param[in] compat If false and \p value is not \c NULL, set only
+ * \c "PCMK_<option>"; otherwise, set (or unset) both
+ * \c "PCMK_<option>" and \c "HA_<option>"
+ *
+ * \note \p compat is ignored when \p value is \c NULL. A \c NULL \p value
+ * means we're unsetting \p option. \c pcmk__get_env_option() checks for
+ * both prefixes, so we want to clear them both.
*/
void
-pcmk__set_env_option(const char *option, const char *value)
+pcmk__set_env_option(const char *option, const char *value, bool compat)
{
+ // @COMPAT Drop support for "HA_" options eventually
const char *const prefixes[] = {"PCMK_", "HA_"};
char env_name[NAME_MAX];
@@ -132,6 +140,11 @@ pcmk__set_env_option(const char *option, const char *value)
crm_err("Failed to %sset %s: %s", (value != NULL)? "" : "un",
env_name, strerror(errno));
}
+
+ if (!compat && (value != NULL)) {
+ // For set, don't proceed to HA_<option> unless compat is enabled
+ break;
+ }
}
}
diff --git a/lib/common/output_html.c b/lib/common/output_html.c
index 47b14c1..92e9010 100644
--- a/lib/common/output_html.c
+++ b/lib/common/output_html.c
@@ -152,7 +152,7 @@ html_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy
* anything else that the user could add, and we want it done last to pick up
* any options that may have been given.
*/
- head_node = xmlNewNode(NULL, (pcmkXmlStr) "head");
+ head_node = xmlNewDocRawNode(NULL, NULL, (pcmkXmlStr) "head", NULL);
if (title != NULL ) {
pcmk_create_xml_text_node(head_node, "title", title);
@@ -458,7 +458,7 @@ pcmk__html_add_header(const char *name, ...) {
va_start(ap, name);
- header_node = xmlNewNode(NULL, (pcmkXmlStr) name);
+ header_node = xmlNewDocRawNode(NULL, NULL, (pcmkXmlStr) name, NULL);
while (1) {
char *key = va_arg(ap, char *);
char *value;
diff --git a/lib/common/output_log.c b/lib/common/output_log.c
index aca168d..54fa37e 100644
--- a/lib/common/output_log.c
+++ b/lib/common/output_log.c
@@ -12,6 +12,7 @@
#include <ctype.h>
#include <stdarg.h>
+#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
@@ -23,8 +24,43 @@ typedef struct private_data_s {
/* gathered in log_begin_list */
GQueue/*<char*>*/ *prefixes;
uint8_t log_level;
+ const char *function;
+ const char *file;
+ uint32_t line;
+ uint32_t tags;
} private_data_t;
+/*!
+ * \internal
+ * \brief Log a message using output object's log level and filters
+ *
+ * \param[in] priv Output object's private_data_t
+ * \param[in] fmt printf(3)-style format string
+ * \param[in] args... Format string arguments
+ */
+#define logger(priv, fmt, args...) do { \
+ qb_log_from_external_source(pcmk__s((priv)->function, __func__), \
+ pcmk__s((priv)->file, __FILE__), fmt, (priv)->log_level, \
+ (((priv)->line == 0)? __LINE__ : (priv)->line), (priv)->tags, \
+ ##args); \
+ } while (0);
+
+/*!
+ * \internal
+ * \brief Log a message using an explicit log level and output object's filters
+ *
+ * \param[in] priv Output object's private_data_t
+ * \param[in] level Log level
+ * \param[in] fmt printf(3)-style format string
+ * \param[in] ap Variadic arguments
+ */
+#define logger_va(priv, level, fmt, ap) do { \
+ qb_log_from_external_source_va(pcmk__s((priv)->function, __func__), \
+ pcmk__s((priv)->file, __FILE__), fmt, level, \
+ (((priv)->line == 0)? __LINE__ : (priv)->line), (priv)->tags, \
+ ap); \
+ } while (0);
+
static void
log_subprocess_output(pcmk__output_t *out, int exit_status,
const char *proc_stdout, const char *proc_stderr) {
@@ -94,35 +130,31 @@ log_version(pcmk__output_t *out, bool extended) {
priv = out->priv;
if (extended) {
- do_crm_log(priv->log_level, "Pacemaker %s (Build: %s): %s",
- PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
+ logger(priv, "Pacemaker %s (Build: %s): %s",
+ PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
} else {
- do_crm_log(priv->log_level, "Pacemaker %s", PACEMAKER_VERSION);
- do_crm_log(priv->log_level, "Written by Andrew Beekhof and"
- "the Pacemaker project contributors");
+ logger(priv, "Pacemaker " PACEMAKER_VERSION);
+ logger(priv, "Written by Andrew Beekhof and "
+ "the Pacemaker project contributors");
}
}
G_GNUC_PRINTF(2, 3)
static void
-log_err(pcmk__output_t *out, const char *format, ...) {
+log_err(pcmk__output_t *out, const char *format, ...)
+{
va_list ap;
- char* buffer = NULL;
- int len = 0;
+ private_data_t *priv = NULL;
- CRM_ASSERT(out != NULL);
+ CRM_ASSERT((out != NULL) && (out->priv != NULL));
+ priv = out->priv;
- va_start(ap, format);
- /* Informational output does not get indented, to separate it from other
+ /* Error output does not get indented, to separate it from other
* potentially indented list output.
*/
- len = vasprintf(&buffer, format, ap);
- CRM_ASSERT(len >= 0);
+ va_start(ap, format);
+ logger_va(priv, LOG_ERR, format, ap);
va_end(ap);
-
- crm_err("%s", buffer);
-
- free(buffer);
}
static void
@@ -195,15 +227,15 @@ log_list_item(pcmk__output_t *out, const char *name, const char *format, ...) {
if (strcmp(buffer, "") != 0) { /* We don't want empty messages */
if ((name != NULL) && (strcmp(name, "") != 0)) {
if (strcmp(prefix, "") != 0) {
- do_crm_log(priv->log_level, "%s: %s: %s", prefix, name, buffer);
+ logger(priv, "%s: %s: %s", prefix, name, buffer);
} else {
- do_crm_log(priv->log_level, "%s: %s", name, buffer);
+ logger(priv, "%s: %s", name, buffer);
}
} else {
if (strcmp(prefix, "") != 0) {
- do_crm_log(priv->log_level, "%s: %s", prefix, buffer);
+ logger(priv, "%s: %s", prefix, buffer);
} else {
- do_crm_log(priv->log_level, "%s", buffer);
+ logger(priv, "%s", buffer);
}
}
}
@@ -228,23 +260,21 @@ log_end_list(pcmk__output_t *out) {
G_GNUC_PRINTF(2, 3)
static int
-log_info(pcmk__output_t *out, const char *format, ...) {
- private_data_t *priv = NULL;
- int len = 0;
+log_info(pcmk__output_t *out, const char *format, ...)
+{
va_list ap;
- char* buffer = NULL;
+ private_data_t *priv = NULL;
CRM_ASSERT(out != NULL && out->priv != NULL);
priv = out->priv;
+ /* Informational output does not get indented, to separate it from other
+ * potentially indented list output.
+ */
va_start(ap, format);
- len = vasprintf(&buffer, format, ap);
- CRM_ASSERT(len >= 0);
+ logger_va(priv, priv->log_level, format, ap);
va_end(ap);
- do_crm_log(priv->log_level, "%s", buffer);
-
- free(buffer);
return pcmk_rc_ok;
}
@@ -252,22 +282,16 @@ G_GNUC_PRINTF(2, 3)
static int
log_transient(pcmk__output_t *out, const char *format, ...)
{
- private_data_t *priv = NULL;
- int len = 0;
va_list ap;
- char *buffer = NULL;
+ private_data_t *priv = NULL;
CRM_ASSERT(out != NULL && out->priv != NULL);
priv = out->priv;
va_start(ap, format);
- len = vasprintf(&buffer, format, ap);
- CRM_ASSERT(len >= 0);
+ logger_va(priv, QB_MAX(priv->log_level, LOG_DEBUG), format, ap);
va_end(ap);
- do_crm_log(QB_MAX(priv->log_level, LOG_DEBUG), "%s", buffer);
-
- free(buffer);
return pcmk_rc_ok;
}
@@ -351,3 +375,33 @@ pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level) {
priv = out->priv;
priv->log_level = log_level;
}
+
+/*!
+ * \internal
+ * \brief Set the file, function, line, and tags used to filter log output
+ *
+ * \param[in,out] out Logger output object
+ * \param[in] file File name to filter with (or NULL for default)
+ * \param[in] function Function name to filter with (or NULL for default)
+ * \param[in] line Line number to filter with (or 0 for default)
+ * \param[in] tags Tags to filter with (or 0 for none)
+ *
+ * \note Custom filters should generally be used only in short areas of a single
+ * function. When done, callers should call this function again with
+ * NULL/0 arguments to reset the filters.
+ */
+void
+pcmk__output_set_log_filter(pcmk__output_t *out, const char *file,
+ const char *function, uint32_t line, uint32_t tags)
+{
+ private_data_t *priv = NULL;
+
+ CRM_ASSERT((out != NULL) && (out->priv != NULL));
+ CRM_CHECK(pcmk__str_eq(out->fmt_name, "log", pcmk__str_none), return);
+
+ priv = out->priv;
+ priv->file = file;
+ priv->function = function;
+ priv->line = line;
+ priv->tags = tags;
+}
diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c
index 0972638..ba61145 100644
--- a/lib/common/output_xml.c
+++ b/lib/common/output_xml.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-2022 the Pacemaker project contributors
+ * Copyright 2019-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -13,6 +13,10 @@
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
+#include <crm/crm.h>
+#include <crm/common/output.h>
+#include <crm/common/xml.h>
+#include <crm/common/xml_internal.h> /* pcmk__xml2fd */
#include <glib.h>
#include <crm/common/cmdline_internal.h>
@@ -43,8 +47,8 @@ typedef struct subst_s {
static subst_t substitutions[] = {
{ "Active Resources", "resources" },
- { "Allocation Scores", "allocations" },
- { "Allocation Scores and Utilization Information", "allocations_utilizations" },
+ { "Assignment Scores", "allocations" },
+ { "Assignment Scores and Utilization Information", "allocations_utilizations" },
{ "Cluster Summary", "summary" },
{ "Current cluster status", "cluster_status" },
{ "Executing Cluster Transition", "transition" },
@@ -190,10 +194,7 @@ xml_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_
}
if (print) {
- char *buf = dump_xml_formatted_with_text(priv->root);
- fprintf(out->dest, "%s", buf);
- fflush(out->dest);
- free(buf);
+ pcmk__xml2fd(fileno(out->dest), priv->root);
}
if (copy_dest != NULL) {
@@ -286,7 +287,10 @@ xml_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
CRM_ASSERT(out != NULL);
parent = pcmk__output_create_xml_node(out, name, NULL);
- cdata_node = xmlNewCDataBlock(getDocPtr(parent), (pcmkXmlStr) buf, strlen(buf));
+ if (parent == NULL) {
+ return;
+ }
+ cdata_node = xmlNewCDataBlock(parent->doc, (pcmkXmlStr) buf, strlen(buf));
xmlAddChild(parent, cdata_node);
}
diff --git a/lib/common/patchset.c b/lib/common/patchset.c
index 8c1362d..34e27fb 100644
--- a/lib/common/patchset.c
+++ b/lib/common/patchset.c
@@ -41,6 +41,14 @@ add_xml_changes_to_patchset(xmlNode *xml, xmlNode *patchset)
xml_node_private_t *nodepriv = xml->_private;
const char *value = NULL;
+ if (nodepriv == NULL) {
+ /* Elements that shouldn't occur in a CIB don't have _private set. They
+ * should be stripped out, ignored, or have an error thrown by any code
+ * that processes their parent, so we ignore any changes to them.
+ */
+ return;
+ }
+
// If this XML node is new, just report that
if (patchset && pcmk_is_set(nodepriv->flags, pcmk__xf_created)) {
GString *xpath = pcmk__element_xpath(xml->parent);
@@ -93,7 +101,7 @@ add_xml_changes_to_patchset(xmlNode *xml, xmlNode *patchset)
} else {
crm_xml_add(attr, XML_DIFF_OP, "set");
- value = crm_element_value(xml, (const char *) pIter->name);
+ value = pcmk__xml_attr_value(pIter);
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, value);
}
}
@@ -189,7 +197,7 @@ xml_repair_v1_diff(xmlNode *last, xmlNode *next, xmlNode *local_diff,
return;
}
- tag = "diff-removed";
+ tag = XML_TAG_DIFF_REMOVED;
diff_child = find_xml_node(local_diff, tag, FALSE);
if (diff_child == NULL) {
diff_child = create_xml_node(local_diff, tag);
@@ -210,7 +218,7 @@ xml_repair_v1_diff(xmlNode *last, xmlNode *next, xmlNode *local_diff,
}
}
- tag = "diff-added";
+ tag = XML_TAG_DIFF_ADDED;
diff_child = find_xml_node(local_diff, tag, FALSE);
if (diff_child == NULL) {
diff_child = create_xml_node(local_diff, tag);
@@ -229,7 +237,8 @@ xml_repair_v1_diff(xmlNode *last, xmlNode *next, xmlNode *local_diff,
}
for (xmlAttrPtr a = pcmk__xe_first_attr(next); a != NULL; a = a->next) {
- const char *p_value = crm_element_value(next, (const char *) a->name);
+
+ const char *p_value = pcmk__xml_attr_value(a);
xmlSetProp(cib, a->name, (pcmkXmlStr) p_value);
}
@@ -246,7 +255,7 @@ xml_create_patchset_v1(xmlNode *source, xmlNode *target, bool config,
if (patchset) {
CRM_LOG_ASSERT(xml_document_dirty(target));
xml_repair_v1_diff(source, target, patchset, config);
- crm_xml_add(patchset, "format", "1");
+ crm_xml_add(patchset, PCMK_XA_FORMAT, "1");
}
return patchset;
}
@@ -276,7 +285,7 @@ xml_create_patchset_v2(xmlNode *source, xmlNode *target)
docpriv = target->doc->_private;
patchset = create_xml_node(NULL, XML_TAG_DIFF);
- crm_xml_add_int(patchset, "format", 2);
+ crm_xml_add_int(patchset, PCMK_XA_FORMAT, 2);
version = create_xml_node(patchset, XML_DIFF_VERSION);
@@ -389,7 +398,7 @@ patchset_process_digest(xmlNode *patch, xmlNode *source, xmlNode *target,
*/
CRM_LOG_ASSERT(!xml_document_dirty(target));
- crm_element_value_int(patch, "format", &format);
+ crm_element_value_int(patch, PCMK_XA_FORMAT, &format);
if ((format > 1) && !with_digest) {
return;
}
@@ -418,7 +427,6 @@ process_v1_removals(xmlNode *target, xmlNode *patch)
xmlNode *cIter = NULL;
char *id = NULL;
- const char *name = NULL;
const char *value = NULL;
if ((target == NULL) || (patch == NULL)) {
@@ -431,18 +439,15 @@ process_v1_removals(xmlNode *target, xmlNode *patch)
subtract_xml_comment(target->parent, target, patch, &dummy);
}
- name = crm_element_name(target);
- CRM_CHECK(name != NULL, return);
- CRM_CHECK(pcmk__str_eq(crm_element_name(target), crm_element_name(patch),
- pcmk__str_casei),
- return);
+ CRM_CHECK(pcmk__xe_is(target, (const char *) patch->name), return);
CRM_CHECK(pcmk__str_eq(ID(target), ID(patch), pcmk__str_casei), return);
// Check for XML_DIFF_MARKER in a child
id = crm_element_value_copy(target, XML_ATTR_ID);
value = crm_element_value(patch, XML_DIFF_MARKER);
if ((value != NULL) && (strcmp(value, "removed:top") == 0)) {
- crm_trace("We are the root of the deletion: %s.id=%s", name, id);
+ crm_trace("We are the root of the deletion: %s.id=%s",
+ target->name, id);
free_xml(target);
free(id);
return;
@@ -482,18 +487,17 @@ process_v1_additions(xmlNode *parent, xmlNode *target, xmlNode *patch)
}
// Check for XML_DIFF_MARKER in a child
+ name = (const char *) patch->name;
value = crm_element_value(patch, XML_DIFF_MARKER);
if ((target == NULL) && (value != NULL)
&& (strcmp(value, "added:top") == 0)) {
id = ID(patch);
- name = crm_element_name(patch);
crm_trace("We are the root of the addition: %s.id=%s", name, id);
add_node_copy(parent, patch);
return;
} else if (target == NULL) {
id = ID(patch);
- name = crm_element_name(patch);
crm_err("Could not locate: %s.id=%s", name, id);
return;
}
@@ -502,17 +506,13 @@ process_v1_additions(xmlNode *parent, xmlNode *target, xmlNode *patch)
pcmk__xc_update(parent, target, patch);
}
- name = crm_element_name(target);
- CRM_CHECK(name != NULL, return);
- CRM_CHECK(pcmk__str_eq(crm_element_name(target), crm_element_name(patch),
- pcmk__str_casei),
- return);
+ CRM_CHECK(pcmk__xe_is(target, name), return);
CRM_CHECK(pcmk__str_eq(ID(target), ID(patch), pcmk__str_casei), return);
for (xIter = pcmk__xe_first_attr(patch); xIter != NULL;
xIter = xIter->next) {
const char *p_name = (const char *) xIter->name;
- const char *p_value = crm_element_value(patch, p_name);
+ const char *p_value = pcmk__xml_attr_value(xIter);
xml_remove_prop(target, p_name); // Preserve patch order
crm_xml_add(target, p_name, p_value);
@@ -547,7 +547,7 @@ find_patch_xml_node(const xmlNode *patchset, int format, bool added,
switch (format) {
case 1:
- label = added? "diff-added" : "diff-removed";
+ label = added? XML_TAG_DIFF_ADDED : XML_TAG_DIFF_REMOVED;
*patch_node = find_xml_node(patchset, label, FALSE);
cib_node = find_xml_node(*patch_node, "cib", FALSE);
if (cib_node != NULL) {
@@ -582,7 +582,7 @@ xml_patch_versions(const xmlNode *patchset, int add[3], int del[3])
};
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
/* Process removals */
if (!find_patch_xml_node(patchset, format, FALSE, &tmp)) {
@@ -614,12 +614,11 @@ xml_patch_versions(const xmlNode *patchset, int add[3], int del[3])
*
* \param[in] xml Root of current CIB
* \param[in] patchset Patchset to check
- * \param[in] format Patchset version
*
* \return Standard Pacemaker return code
*/
static int
-xml_patch_version_check(const xmlNode *xml, const xmlNode *patchset, int format)
+xml_patch_version_check(const xmlNode *xml, const xmlNode *patchset)
{
int lpc = 0;
bool changed = FALSE;
@@ -701,8 +700,8 @@ apply_v1_patchset(xmlNode *xml, const xmlNode *patchset)
int root_nodes_seen = 0;
xmlNode *child_diff = NULL;
- xmlNode *added = find_xml_node(patchset, "diff-added", FALSE);
- xmlNode *removed = find_xml_node(patchset, "diff-removed", FALSE);
+ xmlNode *added = find_xml_node(patchset, XML_TAG_DIFF_ADDED, FALSE);
+ xmlNode *removed = find_xml_node(patchset, XML_TAG_DIFF_REMOVED, FALSE);
xmlNode *old = copy_xml(xml);
crm_trace("Subtraction Phase");
@@ -981,7 +980,7 @@ apply_v2_patchset(xmlNode *xml, const xmlNode *patchset)
for (xmlAttrPtr pIter = pcmk__xe_first_attr(attrs); pIter != NULL;
pIter = pIter->next) {
const char *name = (const char *) pIter->name;
- const char *value = crm_element_value(attrs, name);
+ const char *value = pcmk__xml_attr_value(pIter);
crm_xml_add(match, name, value);
}
@@ -1022,6 +1021,10 @@ apply_v2_patchset(xmlNode *xml, const xmlNode *patchset)
}
child = xmlDocCopyNode(change->children, match->doc, 1);
+ if (child == NULL) {
+ return ENOMEM;
+ }
+
if (match_child) {
crm_trace("Adding %s at position %d", child->name, position);
xmlAddPrevSibling(match_child, child);
@@ -1098,43 +1101,31 @@ xml_apply_patchset(xmlNode *xml, xmlNode *patchset, bool check_version)
int format = 1;
int rc = pcmk_ok;
xmlNode *old = NULL;
- const char *digest = crm_element_value(patchset, XML_ATTR_DIGEST);
+ const char *digest = NULL;
if (patchset == NULL) {
return rc;
}
- pcmk__if_tracing(
- {
- pcmk__output_t *logger_out = NULL;
-
- rc = pcmk_rc2legacy(pcmk__log_output_new(&logger_out));
- CRM_CHECK(rc == pcmk_ok, return rc);
+ pcmk__log_xml_patchset(LOG_TRACE, patchset);
- pcmk__output_set_log_level(logger_out, LOG_TRACE);
- rc = logger_out->message(logger_out, "xml-patchset", patchset);
- logger_out->finish(logger_out, pcmk_rc2exitc(rc), true,
- NULL);
- pcmk__output_free(logger_out);
- rc = pcmk_ok;
- },
- {}
- );
-
- crm_element_value_int(patchset, "format", &format);
if (check_version) {
- rc = pcmk_rc2legacy(xml_patch_version_check(xml, patchset, format));
+ rc = pcmk_rc2legacy(xml_patch_version_check(xml, patchset));
if (rc != pcmk_ok) {
return rc;
}
}
- if (digest) {
- // Make it available for logging if result doesn't have expected digest
- old = copy_xml(xml);
+ digest = crm_element_value(patchset, XML_ATTR_DIGEST);
+ if (digest != NULL) {
+ /* Make original XML available for logging in case result doesn't have
+ * expected digest
+ */
+ pcmk__if_tracing(old = copy_xml(xml), {});
}
if (rc == pcmk_ok) {
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
rc = pcmk_rc2legacy(apply_v1_patchset(xml, patchset));
@@ -1195,9 +1186,9 @@ xmlNode *
diff_xml_object(xmlNode *old, xmlNode *new, gboolean suppress)
{
xmlNode *tmp1 = NULL;
- xmlNode *diff = create_xml_node(NULL, "diff");
- xmlNode *removed = create_xml_node(diff, "diff-removed");
- xmlNode *added = create_xml_node(diff, "diff-added");
+ xmlNode *diff = create_xml_node(NULL, XML_TAG_DIFF);
+ xmlNode *removed = create_xml_node(diff, XML_TAG_DIFF_REMOVED);
+ xmlNode *added = create_xml_node(diff, XML_TAG_DIFF_ADDED);
crm_xml_add(diff, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
@@ -1268,11 +1259,12 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
}
id = ID(left);
+ name = (const char *) left->name;
if (right == NULL) {
xmlNode *deleted = NULL;
crm_trace("Processing <%s " XML_ATTR_ID "=%s> (complete copy)",
- crm_element_name(left), id);
+ name, id);
deleted = add_node_copy(parent, left);
crm_xml_add(deleted, XML_DIFF_MARKER, marker);
@@ -1280,11 +1272,8 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
return deleted;
}
- name = crm_element_name(left);
CRM_CHECK(name != NULL, return NULL);
- CRM_CHECK(pcmk__str_eq(crm_element_name(left), crm_element_name(right),
- pcmk__str_casei),
- return NULL);
+ CRM_CHECK(pcmk__xe_is(left, (const char *) right->name), return NULL);
// Check for XML_DIFF_MARKER in a child
value = crm_element_value(right, XML_DIFF_MARKER);
@@ -1367,7 +1356,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
break;
} else {
- const char *left_value = crm_element_value(left, prop_name);
+ const char *left_value = pcmk__xml_attr_value(xIter);
xmlSetProp(diff, (pcmkXmlStr) prop_name, (pcmkXmlStr) value);
crm_xml_add(diff, prop_name, left_value);
@@ -1375,7 +1364,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
} else {
/* Only now do we need the left value */
- const char *left_value = crm_element_value(left, prop_name);
+ const char *left_value = pcmk__xml_attr_value(xIter);
if (strcmp(left_value, right_val) == 0) {
/* unchanged */
@@ -1386,8 +1375,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
xmlAttrPtr pIter = NULL;
crm_trace("Changes detected to %s in "
- "<%s " XML_ATTR_ID "=%s>",
- prop_name, crm_element_name(left), id);
+ "<%s " XML_ATTR_ID "=%s>", prop_name, name, id);
for (pIter = pcmk__xe_first_attr(left); pIter != NULL;
pIter = pIter->next) {
const char *p_name = (const char *) pIter->name;
@@ -1401,8 +1389,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
} else {
crm_trace("Changes detected to %s (%s -> %s) in "
"<%s " XML_ATTR_ID "=%s>",
- prop_name, left_value, right_val,
- crm_element_name(left), id);
+ prop_name, left_value, right_val, name, id);
crm_xml_add(diff, prop_name, left_value);
}
}
@@ -1434,8 +1421,8 @@ apply_xml_diff(xmlNode *old_xml, xmlNode *diff, xmlNode **new_xml)
const char *version = crm_element_value(diff, XML_ATTR_CRM_VERSION);
xmlNode *child_diff = NULL;
- xmlNode *added = find_xml_node(diff, "diff-added", FALSE);
- xmlNode *removed = find_xml_node(diff, "diff-removed", FALSE);
+ xmlNode *added = find_xml_node(diff, XML_TAG_DIFF_ADDED, FALSE);
+ xmlNode *removed = find_xml_node(diff, XML_TAG_DIFF_REMOVED, FALSE);
CRM_CHECK(new_xml != NULL, return FALSE);
diff --git a/lib/common/patchset_display.c b/lib/common/patchset_display.c
index 731d437..5cc0b52 100644
--- a/lib/common/patchset_display.c
+++ b/lib/common/patchset_display.c
@@ -47,7 +47,7 @@ xml_show_patchset_header(pcmk__output_t *out, const xmlNode *patchset)
xml_patch_versions(patchset, add, del);
if ((add[0] != del[0]) || (add[1] != del[1]) || (add[2] != del[2])) {
- const char *fmt = crm_element_value(patchset, "format");
+ const char *fmt = crm_element_value(patchset, PCMK_XA_FORMAT);
const char *digest = crm_element_value(patchset, XML_ATTR_DIGEST);
out->info(out, "Diff: --- %d.%d.%d %s", del[0], del[1], del[2], fmt);
@@ -80,7 +80,7 @@ static int
xml_show_patchset_v1_recursive(pcmk__output_t *out, const char *prefix,
const xmlNode *data, int depth, uint32_t options)
{
- if (!xml_has_children(data)
+ if ((data->children == NULL)
|| (crm_element_value(data, XML_DIFF_MARKER) != NULL)) {
// Found a change; clear the pcmk__xml_fmt_diff_short option if set
@@ -143,7 +143,7 @@ xml_show_patchset_v1(pcmk__output_t *out, const xmlNode *patchset,
* However, v1 patchsets can only exist during rolling upgrades from
* Pacemaker 1.1.11, so not worth worrying about.
*/
- removed = find_xml_node(patchset, "diff-removed", FALSE);
+ removed = find_xml_node(patchset, XML_TAG_DIFF_REMOVED, FALSE);
for (child = pcmk__xml_first_child(removed); child != NULL;
child = pcmk__xml_next(child)) {
int temp_rc = xml_show_patchset_v1_recursive(out, "- ", child, 0,
@@ -159,7 +159,7 @@ xml_show_patchset_v1(pcmk__output_t *out, const xmlNode *patchset,
}
is_first = true;
- added = find_xml_node(patchset, "diff-added", FALSE);
+ added = find_xml_node(patchset, XML_TAG_DIFF_ADDED, FALSE);
for (child = pcmk__xml_first_child(added); child != NULL;
child = pcmk__xml_next(child)) {
int temp_rc = xml_show_patchset_v1_recursive(out, "+ ", child, 0,
@@ -303,11 +303,11 @@ xml_show_patchset_v2(pcmk__output_t *out, const xmlNode *patchset)
*
* \note \p args should contain only the XML patchset
*/
-PCMK__OUTPUT_ARGS("xml-patchset", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("xml-patchset", "const xmlNode *")
static int
xml_patchset_default(pcmk__output_t *out, va_list args)
{
- xmlNodePtr patchset = va_arg(args, xmlNodePtr);
+ const xmlNode *patchset = va_arg(args, const xmlNode *);
int format = 1;
@@ -316,7 +316,7 @@ xml_patchset_default(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
return xml_show_patchset_v1(out, patchset, pcmk__xml_fmt_pretty);
@@ -342,13 +342,13 @@ xml_patchset_default(pcmk__output_t *out, va_list args)
*
* \note \p args should contain only the XML patchset
*/
-PCMK__OUTPUT_ARGS("xml-patchset", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("xml-patchset", "const xmlNode *")
static int
xml_patchset_log(pcmk__output_t *out, va_list args)
{
static struct qb_log_callsite *patchset_cs = NULL;
- xmlNodePtr patchset = va_arg(args, xmlNodePtr);
+ const xmlNode *patchset = va_arg(args, const xmlNode *);
uint8_t log_level = pcmk__output_get_log_level(out);
int format = 1;
@@ -373,7 +373,7 @@ xml_patchset_log(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
if (log_level < LOG_DEBUG) {
@@ -404,11 +404,11 @@ xml_patchset_log(pcmk__output_t *out, va_list args)
*
* \note \p args should contain only the XML patchset
*/
-PCMK__OUTPUT_ARGS("xml-patchset", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("xml-patchset", "const xmlNode *")
static int
xml_patchset_xml(pcmk__output_t *out, va_list args)
{
- xmlNodePtr patchset = va_arg(args, xmlNodePtr);
+ const xmlNode *patchset = va_arg(args, const xmlNode *);
if (patchset != NULL) {
char *buf = dump_xml_formatted_with_text(patchset);
@@ -490,7 +490,7 @@ xml_log_patchset(uint8_t log_level, const char *function,
goto done;
}
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
if (log_level < LOG_DEBUG) {
diff --git a/lib/common/remote.c b/lib/common/remote.c
index 8c5969a..fe19296 100644
--- a/lib/common/remote.c
+++ b/lib/common/remote.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2022 the Pacemaker project contributors
+ * Copyright 2008-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -167,7 +167,8 @@ set_minimum_dh_bits(const gnutls_session_t *session)
{
int dh_min_bits;
- pcmk__scan_min_int(getenv("PCMK_dh_min_bits"), &dh_min_bits, 0);
+ pcmk__scan_min_int(pcmk__env_option(PCMK__ENV_DH_MIN_BITS), &dh_min_bits,
+ 0);
/* This function is deprecated since GnuTLS 3.1.7, in favor of letting
* the priority string imply the DH requirements, but this is the only
@@ -186,8 +187,11 @@ get_bound_dh_bits(unsigned int dh_bits)
int dh_min_bits;
int dh_max_bits;
- pcmk__scan_min_int(getenv("PCMK_dh_min_bits"), &dh_min_bits, 0);
- pcmk__scan_min_int(getenv("PCMK_dh_max_bits"), &dh_max_bits, 0);
+ pcmk__scan_min_int(pcmk__env_option(PCMK__ENV_DH_MIN_BITS), &dh_min_bits,
+ 0);
+ pcmk__scan_min_int(pcmk__env_option(PCMK__ENV_DH_MAX_BITS), &dh_max_bits,
+ 0);
+
if ((dh_max_bits > 0) && (dh_max_bits < dh_min_bits)) {
crm_warn("Ignoring PCMK_dh_max_bits less than PCMK_dh_min_bits");
dh_max_bits = 0;
@@ -228,7 +232,7 @@ pcmk__new_tls_session(int csock, unsigned int conn_type,
* http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication
*/
- prio_base = getenv("PCMK_tls_priorities");
+ prio_base = pcmk__env_option(PCMK__ENV_TLS_PRIORITIES);
if (prio_base == NULL) {
prio_base = PCMK_GNUTLS_PRIORITIES;
}
@@ -485,7 +489,7 @@ remote_send_iovs(pcmk__remote_t *remote, struct iovec *iov, int iovs)
* \return Standard Pacemaker return code
*/
int
-pcmk__remote_send_xml(pcmk__remote_t *remote, xmlNode *msg)
+pcmk__remote_send_xml(pcmk__remote_t *remote, const xmlNode *msg)
{
int rc = pcmk_rc_ok;
static uint64_t id = 0;
@@ -558,16 +562,17 @@ pcmk__remote_message_xml(pcmk__remote_t *remote)
rc = BZ2_bzBuffToBuffDecompress(uncompressed + header->payload_offset, &size_u,
remote->buffer + header->payload_offset,
header->payload_compressed, 1, 0);
+ rc = pcmk__bzlib2rc(rc);
- if (rc != BZ_OK && header->version > REMOTE_MSG_VERSION) {
+ if (rc != pcmk_rc_ok && header->version > REMOTE_MSG_VERSION) {
crm_warn("Couldn't decompress v%d message, we only understand v%d",
header->version, REMOTE_MSG_VERSION);
free(uncompressed);
return NULL;
- } else if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ } else if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d",
+ pcmk_rc_str(rc), rc);
free(uncompressed);
return NULL;
}
@@ -1079,13 +1084,16 @@ pcmk__connect_remote(const char *host, int port, int timeout, int *timer_id,
hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_CANONNAME;
+
rc = getaddrinfo(server, NULL, &hints, &res);
- if (rc != 0) {
+ rc = pcmk__gaierror2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_err("Unable to get IP address info for %s: %s",
- server, gai_strerror(rc));
- rc = ENOTCONN;
+ server, pcmk_rc_str(rc));
goto async_cleanup;
}
+
if (!res || !res->ai_addr) {
crm_err("Unable to get IP address info for %s: no result", server);
rc = ENOTCONN;
@@ -1252,13 +1260,14 @@ crm_default_remote_port(void)
static int port = 0;
if (port == 0) {
- const char *env = getenv("PCMK_remote_port");
+ const char *env = pcmk__env_option(PCMK__ENV_REMOTE_PORT);
if (env) {
errno = 0;
port = strtol(env, NULL, 10);
if (errno || (port < 1) || (port > 65535)) {
- crm_warn("Environment variable PCMK_remote_port has invalid value '%s', using %d instead",
+ crm_warn("Environment variable PCMK_" PCMK__ENV_REMOTE_PORT
+ " has invalid value '%s', using %d instead",
env, DEFAULT_REMOTE_PORT);
port = DEFAULT_REMOTE_PORT;
}
diff --git a/lib/common/results.c b/lib/common/results.c
index 93d79eb..dde8b27 100644
--- a/lib/common/results.c
+++ b/lib/common/results.c
@@ -15,6 +15,7 @@
#include <bzlib.h>
#include <errno.h>
+#include <netdb.h>
#include <stdlib.h>
#include <string.h>
#include <qb/qbdefs.h>
@@ -305,6 +306,18 @@ static const struct pcmk__rc_info {
"Bad XML patch format",
-pcmk_err_generic,
},
+ { "pcmk_rc_no_transaction",
+ "No active transaction found",
+ -pcmk_err_generic,
+ },
+ { "pcmk_rc_ns_resolution",
+ "Nameserver resolution error",
+ -pcmk_err_generic,
+ },
+ { "pcmk_rc_compression",
+ "Compression/decompression error",
+ -pcmk_err_generic,
+ },
};
/*!
@@ -716,6 +729,7 @@ pcmk_rc2exitc(int rc)
case ENOSYS:
case EOVERFLOW:
case pcmk_rc_underflow:
+ case pcmk_rc_compression:
return CRM_EX_SOFTWARE;
case EBADMSG:
@@ -759,10 +773,12 @@ pcmk_rc2exitc(int rc)
case ENODEV:
case ENOENT:
case ENXIO:
+ case pcmk_rc_no_transaction:
case pcmk_rc_unknown_format:
return CRM_EX_NOSUCH;
case pcmk_rc_node_unknown:
+ case pcmk_rc_ns_resolution:
return CRM_EX_NOHOST;
case ETIME:
@@ -837,37 +853,83 @@ pcmk_rc2ocf(int rc)
// Other functions
-const char *
-bz2_strerror(int rc)
+/*!
+ * \brief Map a getaddrinfo() return code to the most similar Pacemaker
+ * return code
+ *
+ * \param[in] gai getaddrinfo() return code
+ *
+ * \return Most similar Pacemaker return code
+ */
+int
+pcmk__gaierror2rc(int gai)
{
- // See ftp://sources.redhat.com/pub/bzip2/docs/manual_3.html#SEC17
- switch (rc) {
+ switch (gai) {
+ case 0:
+ return pcmk_rc_ok;
+
+ case EAI_AGAIN:
+ return EAGAIN;
+
+ case EAI_BADFLAGS:
+ case EAI_SERVICE:
+ return EINVAL;
+
+ case EAI_FAMILY:
+ return EAFNOSUPPORT;
+
+ case EAI_MEMORY:
+ return ENOMEM;
+
+ case EAI_NONAME:
+ return pcmk_rc_node_unknown;
+
+ case EAI_SOCKTYPE:
+ return ESOCKTNOSUPPORT;
+
+ case EAI_SYSTEM:
+ return errno;
+
+ default:
+ return pcmk_rc_ns_resolution;
+ }
+}
+
+/*!
+ * \brief Map a bz2 return code to the most similar Pacemaker return code
+ *
+ * \param[in] bz2 bz2 return code
+ *
+ * \return Most similar Pacemaker return code
+ */
+int
+pcmk__bzlib2rc(int bz2)
+{
+ switch (bz2) {
case BZ_OK:
case BZ_RUN_OK:
case BZ_FLUSH_OK:
case BZ_FINISH_OK:
case BZ_STREAM_END:
- return "Ok";
- case BZ_CONFIG_ERROR:
- return "libbz2 has been improperly compiled on your platform";
- case BZ_SEQUENCE_ERROR:
- return "library functions called in the wrong order";
- case BZ_PARAM_ERROR:
- return "parameter is out of range or otherwise incorrect";
+ return pcmk_rc_ok;
+
case BZ_MEM_ERROR:
- return "memory allocation failed";
+ return ENOMEM;
+
case BZ_DATA_ERROR:
- return "data integrity error is detected during decompression";
case BZ_DATA_ERROR_MAGIC:
- return "the compressed stream does not start with the correct magic bytes";
- case BZ_IO_ERROR:
- return "error reading or writing in the compressed file";
case BZ_UNEXPECTED_EOF:
- return "compressed file finishes before the logical end of stream is detected";
+ return pcmk_rc_bad_input;
+
+ case BZ_IO_ERROR:
+ return EIO;
+
case BZ_OUTBUFF_FULL:
- return "output data will not fit into the buffer provided";
+ return EFBIG;
+
+ default:
+ return pcmk_rc_compression;
}
- return "Data compression error";
}
crm_exit_t
@@ -1039,6 +1101,39 @@ pcmk__copy_result(const pcmk__action_result_t *src, pcmk__action_result_t *dst)
#include <crm/common/results_compat.h>
+const char *
+bz2_strerror(int rc)
+{
+ // See ftp://sources.redhat.com/pub/bzip2/docs/manual_3.html#SEC17
+ switch (rc) {
+ case BZ_OK:
+ case BZ_RUN_OK:
+ case BZ_FLUSH_OK:
+ case BZ_FINISH_OK:
+ case BZ_STREAM_END:
+ return "Ok";
+ case BZ_CONFIG_ERROR:
+ return "libbz2 has been improperly compiled on your platform";
+ case BZ_SEQUENCE_ERROR:
+ return "library functions called in the wrong order";
+ case BZ_PARAM_ERROR:
+ return "parameter is out of range or otherwise incorrect";
+ case BZ_MEM_ERROR:
+ return "memory allocation failed";
+ case BZ_DATA_ERROR:
+ return "data integrity error is detected during decompression";
+ case BZ_DATA_ERROR_MAGIC:
+ return "the compressed stream does not start with the correct magic bytes";
+ case BZ_IO_ERROR:
+ return "error reading or writing in the compressed file";
+ case BZ_UNEXPECTED_EOF:
+ return "compressed file finishes before the logical end of stream is detected";
+ case BZ_OUTBUFF_FULL:
+ return "output data will not fit into the buffer provided";
+ }
+ return "Data compression error";
+}
+
crm_exit_t
crm_errno2exit(int rc)
{
diff --git a/lib/common/scheduler.c b/lib/common/scheduler.c
new file mode 100644
index 0000000..20e6fdf
--- /dev/null
+++ b/lib/common/scheduler.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdint.h> // uint32_t
+
+uint32_t pcmk__warnings = 0;
diff --git a/lib/common/schemas.c b/lib/common/schemas.c
index 88a3051..b3c09eb 100644
--- a/lib/common/schemas.c
+++ b/lib/common/schemas.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -432,34 +432,8 @@ crm_schema_init(void)
NULL, NULL, FALSE, -1);
}
-#if 0
-static void
-relaxng_invalid_stderr(void *userData, xmlErrorPtr error)
-{
- /*
- Structure xmlError
- struct _xmlError {
- int domain : What part of the library raised this er
- int code : The error code, e.g. an xmlParserError
- char * message : human-readable informative error messag
- xmlErrorLevel level : how consequent is the error
- char * file : the filename
- int line : the line number if available
- char * str1 : extra string information
- char * str2 : extra string information
- char * str3 : extra string information
- int int1 : extra number information
- int int2 : column number of the error or 0 if N/A
- void * ctxt : the parser context if available
- void * node : the node in the tree
- }
- */
- crm_err("Structured error: line=%d, level=%d %s", error->line, error->level, error->message);
-}
-#endif
-
static gboolean
-validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
+validate_with_relaxng(xmlDocPtr doc, xmlRelaxNGValidityErrorFunc error_handler, void *error_handler_context, const char *relaxng_file,
relaxng_ctx_cache_t **cached_ctx)
{
int rc = 0;
@@ -476,15 +450,14 @@ validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
crm_debug("Creating RNG parser context");
ctx = calloc(1, sizeof(relaxng_ctx_cache_t));
- xmlLoadExtDtdDefaultValue = 1;
ctx->parser = xmlRelaxNGNewParserCtxt(relaxng_file);
CRM_CHECK(ctx->parser != NULL, goto cleanup);
- if (to_logs) {
+ if (error_handler) {
xmlRelaxNGSetParserErrors(ctx->parser,
- (xmlRelaxNGValidityErrorFunc) xml_log,
- (xmlRelaxNGValidityWarningFunc) xml_log,
- GUINT_TO_POINTER(LOG_ERR));
+ (xmlRelaxNGValidityErrorFunc) error_handler,
+ (xmlRelaxNGValidityWarningFunc) error_handler,
+ error_handler_context);
} else {
xmlRelaxNGSetParserErrors(ctx->parser,
(xmlRelaxNGValidityErrorFunc) fprintf,
@@ -500,11 +473,11 @@ validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
ctx->valid = xmlRelaxNGNewValidCtxt(ctx->rng);
CRM_CHECK(ctx->valid != NULL, goto cleanup);
- if (to_logs) {
+ if (error_handler) {
xmlRelaxNGSetValidErrors(ctx->valid,
- (xmlRelaxNGValidityErrorFunc) xml_log,
- (xmlRelaxNGValidityWarningFunc) xml_log,
- GUINT_TO_POINTER(LOG_ERR));
+ (xmlRelaxNGValidityErrorFunc) error_handler,
+ (xmlRelaxNGValidityWarningFunc) error_handler,
+ error_handler_context);
} else {
xmlRelaxNGSetValidErrors(ctx->valid,
(xmlRelaxNGValidityErrorFunc) fprintf,
@@ -513,10 +486,6 @@ validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
}
}
- /* xmlRelaxNGSetValidStructuredErrors( */
- /* valid, relaxng_invalid_stderr, valid); */
-
- xmlLineNumbersDefault(1);
rc = xmlRelaxNGValidateDoc(ctx->valid, doc);
if (rc > 0) {
valid = FALSE;
@@ -590,39 +559,36 @@ crm_schema_cleanup(void)
}
static gboolean
-validate_with(xmlNode *xml, int method, gboolean to_logs)
+validate_with(xmlNode *xml, int method, xmlRelaxNGValidityErrorFunc error_handler, void* error_handler_context)
{
- xmlDocPtr doc = NULL;
gboolean valid = FALSE;
char *file = NULL;
+ struct schema_s *schema = NULL;
+ relaxng_ctx_cache_t **cache = NULL;
if (method < 0) {
return FALSE;
}
- if (known_schemas[method].validator == schema_validator_none) {
+ schema = &(known_schemas[method]);
+ if (schema->validator == schema_validator_none) {
return TRUE;
}
- CRM_CHECK(xml != NULL, return FALSE);
-
- if (pcmk__str_eq(known_schemas[method].name, "pacemaker-next",
- pcmk__str_none)) {
+ if (pcmk__str_eq(schema->name, "pacemaker-next", pcmk__str_none)) {
crm_warn("The pacemaker-next schema is deprecated and will be removed "
"in a future release.");
}
- doc = getDocPtr(xml);
file = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_rng,
- known_schemas[method].name);
+ schema->name);
crm_trace("Validating with %s (type=%d)",
- pcmk__s(file, "missing schema"), known_schemas[method].validator);
- switch (known_schemas[method].validator) {
+ pcmk__s(file, "missing schema"), schema->validator);
+ switch (schema->validator) {
case schema_validator_rng:
- valid =
- validate_with_relaxng(doc, to_logs, file,
- (relaxng_ctx_cache_t **) & (known_schemas[method].cache));
+ cache = (relaxng_ctx_cache_t **) &(schema->cache);
+ valid = validate_with_relaxng(xml->doc, error_handler, error_handler_context, file, cache);
break;
default:
crm_err("Unknown validator type: %d",
@@ -639,7 +605,7 @@ validate_with_silent(xmlNode *xml, int method)
{
bool rc, sl_backup = silent_logging;
silent_logging = TRUE;
- rc = validate_with(xml, method, TRUE);
+ rc = validate_with(xml, method, (xmlRelaxNGValidityErrorFunc) xml_log, GUINT_TO_POINTER(LOG_ERR));
silent_logging = sl_backup;
return rc;
}
@@ -676,7 +642,7 @@ dump_file(const char *filename)
}
gboolean
-validate_xml_verbose(xmlNode *xml_blob)
+validate_xml_verbose(const xmlNode *xml_blob)
{
int fd = 0;
xmlDoc *doc = NULL;
@@ -692,7 +658,7 @@ validate_xml_verbose(xmlNode *xml_blob)
dump_file(filename);
- doc = xmlParseFile(filename);
+ doc = xmlReadFile(filename, NULL, 0);
xml = xmlDocGetRootElement(doc);
rc = validate_xml(xml, NULL, FALSE);
free_xml(xml);
@@ -706,8 +672,16 @@ validate_xml_verbose(xmlNode *xml_blob)
gboolean
validate_xml(xmlNode *xml_blob, const char *validation, gboolean to_logs)
{
+ return pcmk__validate_xml(xml_blob, validation, to_logs ? (xmlRelaxNGValidityErrorFunc) xml_log : NULL, GUINT_TO_POINTER(LOG_ERR));
+}
+
+gboolean
+pcmk__validate_xml(xmlNode *xml_blob, const char *validation, xmlRelaxNGValidityErrorFunc error_handler, void* error_handler_context)
+{
int version = 0;
+ CRM_CHECK((xml_blob != NULL) && (xml_blob->doc != NULL), return FALSE);
+
if (validation == NULL) {
validation = crm_element_value(xml_blob, XML_ATTR_VALIDATION);
}
@@ -717,7 +691,7 @@ validate_xml(xmlNode *xml_blob, const char *validation, gboolean to_logs)
bool valid = FALSE;
for (lpc = 0; lpc < xml_schema_max; lpc++) {
- if (validate_with(xml_blob, lpc, FALSE)) {
+ if (validate_with(xml_blob, lpc, NULL, NULL)) {
valid = TRUE;
crm_xml_add(xml_blob, XML_ATTR_VALIDATION,
known_schemas[lpc].name);
@@ -735,7 +709,7 @@ validate_xml(xmlNode *xml_blob, const char *validation, gboolean to_logs)
if (strcmp(validation, PCMK__VALUE_NONE) == 0) {
return TRUE;
} else if (version < xml_schema_max) {
- return validate_with(xml_blob, version, to_logs);
+ return validate_with(xml_blob, version, error_handler, error_handler_context);
}
crm_err("Unknown validator: %s", validation);
@@ -884,47 +858,17 @@ cib_upgrade_err(void *ctx, const char *fmt, ...)
va_end(ap);
}
-
-/* Denotes temporary emergency fix for "xmldiff'ing not text-node-ready";
- proper fix is most likely to teach __xml_diff_object and friends to
- deal with XML_TEXT_NODE (and more?), i.e., those nodes currently
- missing "_private" field (implicitly as NULL) which clashes with
- unchecked accesses (e.g. in __xml_offset) -- the outcome may be that
- those unexpected XML nodes will simply be ignored for the purpose of
- diff'ing, or it may be made more robust, or per the user's preference
- (which then may be exposed as crm_diff switch).
-
- Said XML_TEXT_NODE may appear unexpectedly due to how upgrade-2.10.xsl
- is arranged.
-
- The emergency fix is simple: reparse XSLT output with blank-ignoring
- parser. */
-#ifndef PCMK_SCHEMAS_EMERGENCY_XSLT
-#define PCMK_SCHEMAS_EMERGENCY_XSLT 1
-#endif
-
static xmlNode *
apply_transformation(xmlNode *xml, const char *transform, gboolean to_logs)
{
char *xform = NULL;
xmlNode *out = NULL;
xmlDocPtr res = NULL;
- xmlDocPtr doc = NULL;
xsltStylesheet *xslt = NULL;
-#if PCMK_SCHEMAS_EMERGENCY_XSLT != 0
- xmlChar *emergency_result;
- int emergency_txt_len;
- int emergency_res;
-#endif
-
- CRM_CHECK(xml != NULL, return FALSE);
- doc = getDocPtr(xml);
+
xform = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_xslt,
transform);
- xmlLoadExtDtdDefaultValue = 1;
- xmlSubstituteEntitiesDefault(1);
-
/* for capturing, e.g., what's emitted via <xsl:message> */
if (to_logs) {
xsltSetGenericErrorFunc(NULL, cib_upgrade_err);
@@ -935,22 +879,12 @@ apply_transformation(xmlNode *xml, const char *transform, gboolean to_logs)
xslt = xsltParseStylesheetFile((pcmkXmlStr) xform);
CRM_CHECK(xslt != NULL, goto cleanup);
- res = xsltApplyStylesheet(xslt, doc, NULL);
+ res = xsltApplyStylesheet(xslt, xml->doc, NULL);
CRM_CHECK(res != NULL, goto cleanup);
xsltSetGenericErrorFunc(NULL, NULL); /* restore default one */
-
-#if PCMK_SCHEMAS_EMERGENCY_XSLT != 0
- emergency_res = xsltSaveResultToString(&emergency_result,
- &emergency_txt_len, res, xslt);
- xmlFreeDoc(res);
- CRM_CHECK(emergency_res == 0, goto cleanup);
- out = string2xml((const char *) emergency_result);
- free(emergency_result);
-#else
out = xmlDocGetRootElement(res);
-#endif
cleanup:
if (xslt) {
@@ -1055,12 +989,15 @@ update_validation(xmlNode **xml_blob, int *best, int max, gboolean transform,
int max_stable_schemas = xml_latest_schema_index();
int lpc = 0, match = -1, rc = pcmk_ok;
int next = -1; /* -1 denotes "inactive" value */
+ xmlRelaxNGValidityErrorFunc error_handler =
+ to_logs ? (xmlRelaxNGValidityErrorFunc) xml_log : NULL;
CRM_CHECK(best != NULL, return -EINVAL);
*best = 0;
- CRM_CHECK(xml_blob != NULL, return -EINVAL);
- CRM_CHECK(*xml_blob != NULL, return -EINVAL);
+ CRM_CHECK((xml_blob != NULL) && (*xml_blob != NULL)
+ && ((*xml_blob)->doc != NULL),
+ return -EINVAL);
xml = *xml_blob;
value = crm_element_value_copy(xml, XML_ATTR_VALIDATION);
@@ -1090,7 +1027,7 @@ update_validation(xmlNode **xml_blob, int *best, int max, gboolean transform,
known_schemas[lpc].name ? known_schemas[lpc].name : "<unset>",
lpc, max_stable_schemas);
- if (validate_with(xml, lpc, to_logs) == FALSE) {
+ if (validate_with(xml, lpc, error_handler, GUINT_TO_POINTER(LOG_ERR)) == FALSE) {
if (next != -1) {
crm_info("Configuration not valid for schema: %s",
known_schemas[lpc].name);
@@ -1155,7 +1092,7 @@ update_validation(xmlNode **xml_blob, int *best, int max, gboolean transform,
known_schemas[lpc].transform);
rc = -pcmk_err_transform_failed;
- } else if (validate_with(upgrade, next, to_logs)) {
+ } else if (validate_with(upgrade, next, error_handler, GUINT_TO_POINTER(LOG_ERR))) {
crm_info("Transformation %s.xsl successful",
known_schemas[lpc].transform);
lpc = next;
diff --git a/lib/common/strings.c b/lib/common/strings.c
index b245102..d9d2fda 100644
--- a/lib/common/strings.c
+++ b/lib/common/strings.c
@@ -417,10 +417,7 @@ crm_is_true(const char *s)
{
gboolean ret = FALSE;
- if (s != NULL) {
- crm_str_to_boolean(s, &ret);
- }
- return ret;
+ return (crm_str_to_boolean(s, &ret) < 0)? FALSE : ret;
}
int
@@ -768,12 +765,15 @@ pcmk__compress(const char *data, unsigned int length, unsigned int max,
*result_len = max;
rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length,
CRM_BZ2_BLOCKS, 0, CRM_BZ2_WORK);
+ rc = pcmk__bzlib2rc(rc);
+
free(uncompressed);
- if (rc != BZ_OK) {
- crm_err("Compression of %d bytes failed: %s " CRM_XS " bzerror=%d",
- length, bz2_strerror(rc), rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Compression of %d bytes failed: %s " CRM_XS " rc=%d",
+ length, pcmk_rc_str(rc), rc);
free(compressed);
- return pcmk_rc_error;
+ return rc;
}
#ifdef CLOCK_MONOTONIC
diff --git a/lib/common/tests/Makefile.am b/lib/common/tests/Makefile.am
index b147309..c0407e5 100644
--- a/lib/common/tests/Makefile.am
+++ b/lib/common/tests/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -9,6 +9,7 @@
SUBDIRS = \
acl \
+ actions \
agents \
cmdline \
flags \
@@ -17,7 +18,6 @@ SUBDIRS = \
iso8601 \
lists \
nvpair \
- operations \
options \
output \
results \
diff --git a/lib/common/tests/acl/Makefile.am b/lib/common/tests/acl/Makefile.am
index 50408f9..19903db 100644
--- a/lib/common/tests/acl/Makefile.am
+++ b/lib/common/tests/acl/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,10 +12,9 @@ include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__is_user_in_group_test \
- pcmk_acl_required_test \
- xml_acl_denied_test \
- xml_acl_enabled_test
+check_PROGRAMS = pcmk__is_user_in_group_test \
+ pcmk_acl_required_test \
+ xml_acl_denied_test \
+ xml_acl_enabled_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/operations/Makefile.am b/lib/common/tests/actions/Makefile.am
index 4687e1b..6890b84 100644
--- a/lib/common/tests/operations/Makefile.am
+++ b/lib/common/tests/actions/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,12 +11,12 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = copy_in_properties_test \
- expand_plus_plus_test \
- fix_plus_plus_recursive_test \
- parse_op_key_test \
- pcmk_is_probe_test \
- pcmk_xe_is_probe_test \
- pcmk_xe_mask_probe_failure_test
+check_PROGRAMS = copy_in_properties_test \
+ expand_plus_plus_test \
+ fix_plus_plus_recursive_test \
+ parse_op_key_test \
+ pcmk_is_probe_test \
+ pcmk_xe_is_probe_test \
+ pcmk_xe_mask_probe_failure_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/operations/copy_in_properties_test.c b/lib/common/tests/actions/copy_in_properties_test.c
index 7882551..7882551 100644
--- a/lib/common/tests/operations/copy_in_properties_test.c
+++ b/lib/common/tests/actions/copy_in_properties_test.c
diff --git a/lib/common/tests/operations/expand_plus_plus_test.c b/lib/common/tests/actions/expand_plus_plus_test.c
index 41471f9..41471f9 100644
--- a/lib/common/tests/operations/expand_plus_plus_test.c
+++ b/lib/common/tests/actions/expand_plus_plus_test.c
diff --git a/lib/common/tests/operations/fix_plus_plus_recursive_test.c b/lib/common/tests/actions/fix_plus_plus_recursive_test.c
index b3c7cc2..b3c7cc2 100644
--- a/lib/common/tests/operations/fix_plus_plus_recursive_test.c
+++ b/lib/common/tests/actions/fix_plus_plus_recursive_test.c
diff --git a/lib/common/tests/operations/parse_op_key_test.c b/lib/common/tests/actions/parse_op_key_test.c
index 1b1bfff..1b1bfff 100644
--- a/lib/common/tests/operations/parse_op_key_test.c
+++ b/lib/common/tests/actions/parse_op_key_test.c
diff --git a/lib/common/tests/operations/pcmk_is_probe_test.c b/lib/common/tests/actions/pcmk_is_probe_test.c
index 4a65e3f..4a65e3f 100644
--- a/lib/common/tests/operations/pcmk_is_probe_test.c
+++ b/lib/common/tests/actions/pcmk_is_probe_test.c
diff --git a/lib/common/tests/operations/pcmk_xe_is_probe_test.c b/lib/common/tests/actions/pcmk_xe_is_probe_test.c
index 62b21d9..62b21d9 100644
--- a/lib/common/tests/operations/pcmk_xe_is_probe_test.c
+++ b/lib/common/tests/actions/pcmk_xe_is_probe_test.c
diff --git a/lib/common/tests/operations/pcmk_xe_mask_probe_failure_test.c b/lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c
index 9e38019..9e38019 100644
--- a/lib/common/tests/operations/pcmk_xe_mask_probe_failure_test.c
+++ b/lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c
diff --git a/lib/common/tests/agents/Makefile.am b/lib/common/tests/agents/Makefile.am
index 7a54b7d..b3837d7 100644
--- a/lib/common/tests/agents/Makefile.am
+++ b/lib/common/tests/agents/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,10 +11,10 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = crm_generate_ra_key_test \
- crm_parse_agent_spec_test \
- pcmk__effective_rc_test \
- pcmk_get_ra_caps_test \
- pcmk_stonith_param_test
+check_PROGRAMS = crm_generate_ra_key_test \
+ crm_parse_agent_spec_test \
+ pcmk__effective_rc_test \
+ pcmk_get_ra_caps_test \
+ pcmk_stonith_param_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/agents/crm_parse_agent_spec_test.c b/lib/common/tests/agents/crm_parse_agent_spec_test.c
index cfd75f0..1d44459 100644
--- a/lib/common/tests/agents/crm_parse_agent_spec_test.c
+++ b/lib/common/tests/agents/crm_parse_agent_spec_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,14 +22,22 @@ all_params_null(void **state) {
static void
no_prov_or_type(void **state) {
- assert_int_equal(crm_parse_agent_spec("ocf", NULL, NULL, NULL), -EINVAL);
- assert_int_equal(crm_parse_agent_spec("ocf:", NULL, NULL, NULL), -EINVAL);
- assert_int_equal(crm_parse_agent_spec("ocf::", NULL, NULL, NULL), -EINVAL);
+ char *std = NULL;
+ char *prov = NULL;
+ char *ty = NULL;
+
+ assert_int_equal(crm_parse_agent_spec("ocf", &std, &prov, &ty), -EINVAL);
+ assert_int_equal(crm_parse_agent_spec("ocf:", &std, &prov, &ty), -EINVAL);
+ assert_int_equal(crm_parse_agent_spec("ocf::", &std, &prov, &ty), -EINVAL);
}
static void
no_type(void **state) {
- assert_int_equal(crm_parse_agent_spec("ocf:pacemaker:", NULL, NULL, NULL), -EINVAL);
+ char *std = NULL;
+ char *prov = NULL;
+ char *ty = NULL;
+
+ assert_int_equal(crm_parse_agent_spec("ocf:pacemaker:", &std, &prov, &ty), -EINVAL);
}
static void
diff --git a/lib/common/tests/cmdline/Makefile.am b/lib/common/tests/cmdline/Makefile.am
index d781ed5..792425b 100644
--- a/lib/common/tests/cmdline/Makefile.am
+++ b/lib/common/tests/cmdline/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,6 +12,7 @@ include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
check_PROGRAMS = pcmk__cmdline_preproc_test \
- pcmk__quote_cmdline_test
+ pcmk__new_common_args_test \
+ pcmk__quote_cmdline_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c
index 863fbb9..299fec6 100644
--- a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c
+++ b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -112,6 +112,16 @@ negative_score_2(void **state) {
}
static void
+negative_score_3(void **state) {
+ const char *argv[] = { "crm_attribute", "-p", "-v", "-INFINITY", NULL };
+ const gchar *expected[] = { "crm_attribute", "-p", "-v", "-INFINITY", NULL };
+
+ gchar **processed = pcmk__cmdline_preproc((char **) argv, "pv");
+ LISTS_EQ(processed, expected);
+ g_strfreev(processed);
+}
+
+static void
string_arg_with_dash(void **state) {
const char *argv[] = { "crm_mon", "-n", "crm_mon_options", "-v", "--opt1 --opt2", NULL };
const gchar *expected[] = { "crm_mon", "-n", "crm_mon_options", "-v", "--opt1 --opt2", NULL };
@@ -151,6 +161,7 @@ PCMK__UNIT_TEST(NULL, NULL,
cmocka_unit_test(long_arg),
cmocka_unit_test(negative_score),
cmocka_unit_test(negative_score_2),
+ cmocka_unit_test(negative_score_3),
cmocka_unit_test(string_arg_with_dash),
cmocka_unit_test(string_arg_with_dash_2),
cmocka_unit_test(string_arg_with_dash_3))
diff --git a/lib/common/tests/cmdline/pcmk__new_common_args_test.c b/lib/common/tests/cmdline/pcmk__new_common_args_test.c
new file mode 100644
index 0000000..6b70465
--- /dev/null
+++ b/lib/common/tests/cmdline/pcmk__new_common_args_test.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/common/cmdline_internal.h>
+
+#include "mock_private.h"
+
+#include <glib.h>
+
+static void
+calloc_fails(void **state)
+{
+ pcmk__assert_exits(CRM_EX_OSERR,
+ {
+ pcmk__mock_calloc = true; // calloc() will return NULL
+ expect_value(__wrap_calloc, nmemb, 1);
+ expect_value(__wrap_calloc, size, sizeof(pcmk__common_args_t));
+ pcmk__new_common_args("boring summary");
+ pcmk__mock_calloc = false; // Use real calloc()
+ }
+ );
+}
+
+static void
+strdup_fails(void **state)
+{
+ pcmk__assert_exits(CRM_EX_OSERR,
+ {
+ pcmk__mock_strdup = true; // strdup() will return NULL
+ expect_string(__wrap_strdup, s, "boring summary");
+ pcmk__new_common_args("boring summary");
+ pcmk__mock_strdup = false; // Use the real strdup()
+ }
+ );
+}
+
+static void
+success(void **state)
+{
+ pcmk__common_args_t *args = pcmk__new_common_args("boring summary");
+ assert_string_equal(args->summary, "boring summary");
+ assert_null(args->output_as_descr);
+ assert_false(args->version);
+ assert_false(args->quiet);
+ assert_int_equal(args->verbosity, 0);
+ assert_null(args->output_ty);
+ assert_null(args->output_dest);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(calloc_fails),
+ cmocka_unit_test(strdup_fails),
+ cmocka_unit_test(success))
diff --git a/lib/common/tests/flags/Makefile.am b/lib/common/tests/flags/Makefile.am
index 16d8ffb..22a101a 100644
--- a/lib/common/tests/flags/Makefile.am
+++ b/lib/common/tests/flags/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,10 +11,9 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__clear_flags_as_test \
- pcmk__set_flags_as_test \
- pcmk_all_flags_set_test \
- pcmk_any_flags_set_test
+check_PROGRAMS = pcmk__clear_flags_as_test \
+ pcmk__set_flags_as_test \
+ pcmk_all_flags_set_test \
+ pcmk_any_flags_set_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/io/Makefile.am b/lib/common/tests/io/Makefile.am
index c26482c..f7519d8 100644
--- a/lib/common/tests/io/Makefile.am
+++ b/lib/common/tests/io/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,8 +11,7 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__full_path_test \
- pcmk__get_tmpdir_test
+check_PROGRAMS = pcmk__full_path_test \
+ pcmk__get_tmpdir_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/lists/Makefile.am b/lib/common/tests/lists/Makefile.am
index ae0c0b6..0fa1e15 100644
--- a/lib/common/tests/lists/Makefile.am
+++ b/lib/common/tests/lists/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,9 +12,8 @@ include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__list_of_1_test \
- pcmk__list_of_multiple_test \
- pcmk__subtract_lists_test
+check_PROGRAMS = pcmk__list_of_1_test \
+ pcmk__list_of_multiple_test \
+ pcmk__subtract_lists_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/nvpair/Makefile.am b/lib/common/tests/nvpair/Makefile.am
index 7acaba3..7f406bd 100644
--- a/lib/common/tests/nvpair/Makefile.am
+++ b/lib/common/tests/nvpair/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,8 +11,8 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__xe_attr_is_true_test \
- pcmk__xe_get_bool_attr_test \
- pcmk__xe_set_bool_attr_test
+check_PROGRAMS = pcmk__xe_attr_is_true_test \
+ pcmk__xe_get_bool_attr_test \
+ pcmk__xe_set_bool_attr_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/options/Makefile.am b/lib/common/tests/options/Makefile.am
index 9a5fa98..cc1008e 100644
--- a/lib/common/tests/options/Makefile.am
+++ b/lib/common/tests/options/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,9 +11,8 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__env_option_test \
- pcmk__set_env_option_test \
- pcmk__env_option_enabled_test
+check_PROGRAMS = pcmk__env_option_test \
+ pcmk__set_env_option_test \
+ pcmk__env_option_enabled_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/options/pcmk__set_env_option_test.c b/lib/common/tests/options/pcmk__set_env_option_test.c
index 753bf74..22fd795 100644
--- a/lib/common/tests/options/pcmk__set_env_option_test.c
+++ b/lib/common/tests/options/pcmk__set_env_option_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -20,18 +20,18 @@ bad_input_string(void **state)
// Never call setenv()
pcmk__mock_setenv = true;
- pcmk__set_env_option(NULL, "new_value");
- pcmk__set_env_option("", "new_value");
- pcmk__set_env_option("name=val", "new_value");
+ pcmk__set_env_option(NULL, "new_value", true);
+ pcmk__set_env_option("", "new_value", true);
+ pcmk__set_env_option("name=val", "new_value", true);
pcmk__mock_setenv = false;
// Never call unsetenv()
pcmk__mock_unsetenv = true;
- pcmk__set_env_option(NULL, NULL);
- pcmk__set_env_option("", NULL);
- pcmk__set_env_option("name=val", NULL);
+ pcmk__set_env_option(NULL, NULL, true);
+ pcmk__set_env_option("", NULL, true);
+ pcmk__set_env_option("name=val", NULL, true);
pcmk__mock_unsetenv = false;
}
@@ -53,11 +53,11 @@ input_too_long_for_both(void **state)
// Never call setenv() or unsetenv()
pcmk__mock_setenv = true;
- pcmk__set_env_option(long_opt, "new_value");
+ pcmk__set_env_option(long_opt, "new_value", true);
pcmk__mock_setenv = false;
pcmk__mock_unsetenv = true;
- pcmk__set_env_option(long_opt, NULL);
+ pcmk__set_env_option(long_opt, NULL, true);
pcmk__mock_unsetenv = false;
}
@@ -87,7 +87,7 @@ input_too_long_for_pcmk(void **state)
expect_string(__wrap_setenv, value, "new_value");
expect_value(__wrap_setenv, overwrite, 1);
will_return(__wrap_setenv, 0);
- pcmk__set_env_option(long_opt, "new_value");
+ pcmk__set_env_option(long_opt, "new_value", true);
pcmk__mock_setenv = false;
@@ -96,7 +96,7 @@ input_too_long_for_pcmk(void **state)
expect_string(__wrap_unsetenv, name, buf);
will_return(__wrap_unsetenv, 0);
- pcmk__set_env_option(long_opt, NULL);
+ pcmk__set_env_option(long_opt, NULL, true);
pcmk__mock_unsetenv = false;
}
@@ -115,7 +115,7 @@ valid_inputs_set(void **state)
expect_string(__wrap_setenv, value, "new_value");
expect_value(__wrap_setenv, overwrite, 1);
will_return(__wrap_setenv, 0);
- pcmk__set_env_option("env_var", "new_value");
+ pcmk__set_env_option("env_var", "new_value", true);
// Empty string is also a valid value
expect_string(__wrap_setenv, name, "PCMK_env_var");
@@ -126,7 +126,7 @@ valid_inputs_set(void **state)
expect_string(__wrap_setenv, value, "");
expect_value(__wrap_setenv, overwrite, 1);
will_return(__wrap_setenv, 0);
- pcmk__set_env_option("env_var", "");
+ pcmk__set_env_option("env_var", "", true);
pcmk__mock_setenv = false;
}
@@ -141,7 +141,33 @@ valid_inputs_unset(void **state)
will_return(__wrap_unsetenv, 0);
expect_string(__wrap_unsetenv, name, "HA_env_var");
will_return(__wrap_unsetenv, 0);
- pcmk__set_env_option("env_var", NULL);
+ pcmk__set_env_option("env_var", NULL, true);
+
+ pcmk__mock_unsetenv = false;
+}
+
+static void
+disable_compat(void **state)
+{
+ // Make sure we set only "PCMK_<option>" and not "HA_<option>"
+ pcmk__mock_setenv = true;
+
+ expect_string(__wrap_setenv, name, "PCMK_env_var");
+ expect_string(__wrap_setenv, value, "new_value");
+ expect_value(__wrap_setenv, overwrite, 1);
+ will_return(__wrap_setenv, 0);
+ pcmk__set_env_option("env_var", "new_value", false);
+
+ pcmk__mock_setenv = false;
+
+ // Make sure we clear both "PCMK_<option>" and "HA_<option>"
+ pcmk__mock_unsetenv = true;
+
+ expect_string(__wrap_unsetenv, name, "PCMK_env_var");
+ will_return(__wrap_unsetenv, 0);
+ expect_string(__wrap_unsetenv, name, "HA_env_var");
+ will_return(__wrap_unsetenv, 0);
+ pcmk__set_env_option("env_var", NULL, false);
pcmk__mock_unsetenv = false;
}
@@ -151,4 +177,5 @@ PCMK__UNIT_TEST(NULL, NULL,
cmocka_unit_test(input_too_long_for_both),
cmocka_unit_test(input_too_long_for_pcmk),
cmocka_unit_test(valid_inputs_set),
- cmocka_unit_test(valid_inputs_unset))
+ cmocka_unit_test(valid_inputs_unset),
+ cmocka_unit_test(disable_compat))
diff --git a/lib/common/tests/output/Makefile.am b/lib/common/tests/output/Makefile.am
index 6ac7b5f..30f1494 100644
--- a/lib/common/tests/output/Makefile.am
+++ b/lib/common/tests/output/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,14 +11,14 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__call_message_test \
- pcmk__output_and_clear_error_test \
- pcmk__output_free_test \
- pcmk__output_new_test \
- pcmk__register_format_test \
- pcmk__register_formats_test \
- pcmk__register_message_test \
- pcmk__register_messages_test \
- pcmk__unregister_formats_test
+check_PROGRAMS = pcmk__call_message_test \
+ pcmk__output_and_clear_error_test \
+ pcmk__output_free_test \
+ pcmk__output_new_test \
+ pcmk__register_format_test \
+ pcmk__register_formats_test \
+ pcmk__register_message_test \
+ pcmk__register_messages_test \
+ pcmk__unregister_formats_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/output/pcmk__output_new_test.c b/lib/common/tests/output/pcmk__output_new_test.c
index de4268c..a05d9a7 100644
--- a/lib/common/tests/output/pcmk__output_new_test.c
+++ b/lib/common/tests/output/pcmk__output_new_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -95,9 +95,15 @@ fopen_fails(void **state) {
pcmk__output_t *out = NULL;
pcmk__mock_fopen = true;
+#if defined(HAVE_FOPEN64) && defined(_FILE_OFFSET_BITS) && (_FILE_OFFSET_BITS == 64) && (SIZEOF_LONG < 8)
+ expect_string(__wrap_fopen64, pathname, "destfile");
+ expect_string(__wrap_fopen64, mode, "w");
+ will_return(__wrap_fopen64, EPERM);
+#else
expect_string(__wrap_fopen, pathname, "destfile");
expect_string(__wrap_fopen, mode, "w");
will_return(__wrap_fopen, EPERM);
+#endif
assert_int_equal(pcmk__output_new(&out, "text", "destfile", NULL), EPERM);
diff --git a/lib/common/tests/results/Makefile.am b/lib/common/tests/results/Makefile.am
index 8d51d12..a7d5663 100644
--- a/lib/common/tests/results/Makefile.am
+++ b/lib/common/tests/results/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,6 +11,6 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__results_test
+check_PROGRAMS = pcmk__results_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/results/pcmk__results_test.c b/lib/common/tests/results/pcmk__results_test.c
index 53665d1..016eb7f 100644
--- a/lib/common/tests/results/pcmk__results_test.c
+++ b/lib/common/tests/results/pcmk__results_test.c
@@ -47,15 +47,9 @@ test_for_pcmk_rc2exitc(void **state) {
assert_int_equal(pcmk_rc2exitc(-7777777), CRM_EX_ERROR);
}
-static void
-test_for_bz2_strerror(void **state) {
- assert_string_equal(bz2_strerror(BZ_STREAM_END), "Ok");
-}
-
PCMK__UNIT_TEST(NULL, NULL,
cmocka_unit_test(test_for_pcmk_rc_name),
cmocka_unit_test(test_for_pcmk_rc_str),
cmocka_unit_test(test_for_crm_exit_name),
cmocka_unit_test(test_for_crm_exit_str),
- cmocka_unit_test(test_for_pcmk_rc2exitc),
- cmocka_unit_test(test_for_bz2_strerror))
+ cmocka_unit_test(test_for_pcmk_rc2exitc))
diff --git a/lib/common/tests/scores/Makefile.am b/lib/common/tests/scores/Makefile.am
index 66ca073..cb96155 100644
--- a/lib/common/tests/scores/Makefile.am
+++ b/lib/common/tests/scores/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,9 +11,8 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- char2score_test \
- pcmk__add_scores_test \
- pcmk_readable_score_test
+check_PROGRAMS = char2score_test \
+ pcmk__add_scores_test \
+ pcmk_readable_score_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/scores/pcmk__add_scores_test.c b/lib/common/tests/scores/pcmk__add_scores_test.c
index 85ac232..1309659 100644
--- a/lib/common/tests/scores/pcmk__add_scores_test.c
+++ b/lib/common/tests/scores/pcmk__add_scores_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -53,6 +53,8 @@ result_infinite(void **state)
assert_int_equal(pcmk__add_scores(INT_MAX, INT_MAX), CRM_SCORE_INFINITY);
assert_int_equal(pcmk__add_scores(INT_MIN, INT_MIN), -CRM_SCORE_INFINITY);
assert_int_equal(pcmk__add_scores(2000000, 50), CRM_SCORE_INFINITY);
+ assert_int_equal(pcmk__add_scores(CRM_SCORE_INFINITY/2, CRM_SCORE_INFINITY/2), CRM_SCORE_INFINITY);
+ assert_int_equal(pcmk__add_scores(-CRM_SCORE_INFINITY/2, -CRM_SCORE_INFINITY/2), -CRM_SCORE_INFINITY);
assert_int_equal(pcmk__add_scores(-4000000, 50), -CRM_SCORE_INFINITY);
}
diff --git a/lib/common/tests/strings/Makefile.am b/lib/common/tests/strings/Makefile.am
index 9abb8e9..e66af0d 100644
--- a/lib/common/tests/strings/Makefile.am
+++ b/lib/common/tests/strings/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,31 +11,31 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- crm_get_msec_test \
- crm_is_true_test \
- crm_str_to_boolean_test \
- pcmk__add_word_test \
- pcmk__btoa_test \
- pcmk__char_in_any_str_test \
- pcmk__compress_test \
- pcmk__ends_with_test \
- pcmk__g_strcat_test \
- pcmk__guint_from_hash_test \
- pcmk__numeric_strcasecmp_test \
- pcmk__parse_ll_range_test \
- pcmk__s_test \
- pcmk__scan_double_test \
- pcmk__scan_min_int_test \
- pcmk__scan_port_test \
- pcmk__starts_with_test \
- pcmk__str_any_of_test \
- pcmk__str_in_list_test \
- pcmk__str_table_dup_test \
- pcmk__str_update_test \
- pcmk__strcmp_test \
- pcmk__strkey_table_test \
- pcmk__strikey_table_test \
- pcmk__trim_test
+check_PROGRAMS = crm_get_msec_test \
+ crm_is_true_test \
+ crm_str_to_boolean_test \
+ pcmk__add_word_test \
+ pcmk__btoa_test \
+ pcmk__char_in_any_str_test \
+ pcmk__compress_test \
+ pcmk__ends_with_test \
+ pcmk__g_strcat_test \
+ pcmk__guint_from_hash_test \
+ pcmk__numeric_strcasecmp_test \
+ pcmk__parse_ll_range_test \
+ pcmk__s_test \
+ pcmk__scan_double_test \
+ pcmk__scan_ll_test \
+ pcmk__scan_min_int_test \
+ pcmk__scan_port_test \
+ pcmk__starts_with_test \
+ pcmk__str_any_of_test \
+ pcmk__str_in_list_test \
+ pcmk__str_table_dup_test \
+ pcmk__str_update_test \
+ pcmk__strcmp_test \
+ pcmk__strkey_table_test \
+ pcmk__strikey_table_test \
+ pcmk__trim_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/strings/pcmk__compress_test.c b/lib/common/tests/strings/pcmk__compress_test.c
index 7480937..7b59d9d 100644
--- a/lib/common/tests/strings/pcmk__compress_test.c
+++ b/lib/common/tests/strings/pcmk__compress_test.c
@@ -33,7 +33,7 @@ max_too_small(void **state)
char *result = calloc(1024, sizeof(char));
unsigned int len;
- assert_int_equal(pcmk__compress(SIMPLE_DATA, 40, 10, &result, &len), pcmk_rc_error);
+ assert_int_equal(pcmk__compress(SIMPLE_DATA, 40, 10, &result, &len), EFBIG);
}
static void
diff --git a/lib/common/tests/strings/pcmk__guint_from_hash_test.c b/lib/common/tests/strings/pcmk__guint_from_hash_test.c
index e2b4762..225c5b3 100644
--- a/lib/common/tests/strings/pcmk__guint_from_hash_test.c
+++ b/lib/common/tests/strings/pcmk__guint_from_hash_test.c
@@ -59,6 +59,7 @@ conversion_errors(void **state)
g_hash_table_insert(tbl, strdup("negative"), strdup("-3"));
g_hash_table_insert(tbl, strdup("toobig"), strdup("20000000000000000"));
+ g_hash_table_insert(tbl, strdup("baddata"), strdup("asdf"));
assert_int_equal(pcmk__guint_from_hash(tbl, "negative", 456, &result), ERANGE);
assert_int_equal(result, 456);
@@ -66,6 +67,9 @@ conversion_errors(void **state)
assert_int_equal(pcmk__guint_from_hash(tbl, "toobig", 456, &result), ERANGE);
assert_int_equal(result, 456);
+ assert_int_equal(pcmk__guint_from_hash(tbl, "baddata", 456, &result), EINVAL);
+ assert_int_equal(result, 456);
+
g_hash_table_destroy(tbl);
}
diff --git a/lib/common/tests/strings/pcmk__scan_ll_test.c b/lib/common/tests/strings/pcmk__scan_ll_test.c
new file mode 100644
index 0000000..645ecb4
--- /dev/null
+++ b/lib/common/tests/strings/pcmk__scan_ll_test.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+empty_input_string(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll(NULL, &result, 47), pcmk_rc_ok);
+ assert_int_equal(result, 47);
+}
+
+static void
+bad_input_string(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll("asdf", &result, 47), EINVAL);
+ assert_int_equal(result, 47);
+ assert_int_equal(pcmk__scan_ll("as12", &result, 47), EINVAL);
+ assert_int_equal(result, 47);
+}
+
+static void
+trailing_chars(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll("12as", &result, 47), pcmk_rc_ok);
+ assert_int_equal(result, 12);
+}
+
+static void
+no_result_variable(void **state)
+{
+ assert_int_equal(pcmk__scan_ll("1234", NULL, 47), pcmk_rc_ok);
+ assert_int_equal(pcmk__scan_ll("asdf", NULL, 47), EINVAL);
+}
+
+static void
+typical_case(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll("1234", &result, 47), pcmk_rc_ok);
+ assert_int_equal(result, 1234);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(empty_input_string),
+ cmocka_unit_test(bad_input_string),
+ cmocka_unit_test(trailing_chars),
+ cmocka_unit_test(no_result_variable),
+ cmocka_unit_test(typical_case))
diff --git a/lib/common/tests/utils/Makefile.am b/lib/common/tests/utils/Makefile.am
index edccf09..f028ce4 100644
--- a/lib/common/tests/utils/Makefile.am
+++ b/lib/common/tests/utils/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,15 +11,17 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- compare_version_test \
- crm_meta_name_test \
- crm_meta_value_test \
- crm_user_lookup_test \
- pcmk_daemon_user_test \
- pcmk_str_is_infinity_test \
- pcmk_str_is_minus_infinity_test \
- pcmk__getpid_s_test
+check_PROGRAMS = compare_version_test \
+ crm_meta_name_test \
+ crm_meta_value_test \
+ crm_user_lookup_test \
+ pcmk_daemon_user_test \
+ pcmk_str_is_infinity_test \
+ pcmk_str_is_minus_infinity_test \
+ pcmk__fail_attr_name_test \
+ pcmk__failcount_name_test \
+ pcmk__getpid_s_test \
+ pcmk__lastfailure_name_test
if WRAPPABLE_UNAME
check_PROGRAMS += pcmk_hostname_test
diff --git a/lib/common/tests/utils/pcmk__fail_attr_name_test.c b/lib/common/tests/utils/pcmk__fail_attr_name_test.c
new file mode 100644
index 0000000..c6c25fc
--- /dev/null
+++ b/lib/common/tests/utils/pcmk__fail_attr_name_test.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+null_arguments(void **state)
+{
+ assert_null(pcmk__fail_attr_name(NULL, NULL, NULL, 30000));
+ assert_null(pcmk__fail_attr_name(NULL, "myrsc", "monitor", 30000));
+ assert_null(pcmk__fail_attr_name("xyz", NULL, "monitor", 30000));
+ assert_null(pcmk__fail_attr_name("xyz", "myrsc", NULL, 30000));
+}
+
+static void
+standard_usage(void **state)
+{
+ char *s = NULL;
+
+ assert_string_equal(pcmk__fail_attr_name("xyz", "myrsc", "monitor", 30000),
+ "xyz-myrsc#monitor_30000");
+
+ free(s);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(null_arguments),
+ cmocka_unit_test(standard_usage))
diff --git a/lib/common/tests/utils/pcmk__failcount_name_test.c b/lib/common/tests/utils/pcmk__failcount_name_test.c
new file mode 100644
index 0000000..a801f4d
--- /dev/null
+++ b/lib/common/tests/utils/pcmk__failcount_name_test.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+null_arguments(void **state)
+{
+ assert_null(pcmk__failcount_name(NULL, NULL, 30000));
+ assert_null(pcmk__failcount_name("myrsc", NULL, 30000));
+ assert_null(pcmk__failcount_name(NULL, "monitor", 30000));
+}
+
+static void
+standard_usage(void **state)
+{
+ char *s = NULL;
+
+ assert_string_equal(pcmk__failcount_name("myrsc", "monitor", 30000),
+ "fail-count-myrsc#monitor_30000");
+
+ free(s);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(null_arguments),
+ cmocka_unit_test(standard_usage))
diff --git a/lib/common/tests/utils/pcmk__lastfailure_name_test.c b/lib/common/tests/utils/pcmk__lastfailure_name_test.c
new file mode 100644
index 0000000..eab01f2
--- /dev/null
+++ b/lib/common/tests/utils/pcmk__lastfailure_name_test.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+null_arguments(void **state)
+{
+ assert_null(pcmk__lastfailure_name(NULL, NULL, 30000));
+ assert_null(pcmk__lastfailure_name("myrsc", NULL, 30000));
+ assert_null(pcmk__lastfailure_name(NULL, "monitor", 30000));
+}
+
+static void
+standard_usage(void **state)
+{
+ char *s = NULL;
+
+ assert_string_equal(pcmk__lastfailure_name("myrsc", "monitor", 30000),
+ "last-failure-myrsc#monitor_30000");
+
+ free(s);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(null_arguments),
+ cmocka_unit_test(standard_usage))
diff --git a/lib/common/tests/xml/Makefile.am b/lib/common/tests/xml/Makefile.am
index 0ccdcc3..465c950 100644
--- a/lib/common/tests/xml/Makefile.am
+++ b/lib/common/tests/xml/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,7 +11,7 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__xe_foreach_child_test \
- pcmk__xe_match_test
+check_PROGRAMS = pcmk__xe_foreach_child_test \
+ pcmk__xe_match_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/xml/pcmk__xe_foreach_child_test.c b/lib/common/tests/xml/pcmk__xe_foreach_child_test.c
index 9bcba87..ffb9171 100644
--- a/lib/common/tests/xml/pcmk__xe_foreach_child_test.c
+++ b/lib/common/tests/xml/pcmk__xe_foreach_child_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,7 +14,7 @@
static int compare_name_handler(xmlNode *xml, void *userdata) {
function_called();
- assert_string_equal((char *) userdata, crm_element_name(xml));
+ assert_string_equal((char *) userdata, (const char *) xml->name);
return pcmk_rc_ok;
}
@@ -140,7 +140,8 @@ const char *str3 =
static int any_of_handler(xmlNode *xml, void *userdata) {
function_called();
- assert_true(pcmk__str_any_of(crm_element_name(xml), "node1", "node2", "node3", NULL));
+ assert_true(pcmk__str_any_of((const char *) xml->name,
+ "node1", "node2", "node3", NULL));
return pcmk_rc_ok;
}
@@ -160,7 +161,7 @@ any_of_test(void **state) {
static int stops_on_first_handler(xmlNode *xml, void *userdata) {
function_called();
- if (pcmk__str_eq(crm_element_name(xml), "node1", pcmk__str_none)) {
+ if (pcmk__xe_is(xml, "node1")) {
return pcmk_rc_error;
} else {
return pcmk_rc_ok;
@@ -170,7 +171,7 @@ static int stops_on_first_handler(xmlNode *xml, void *userdata) {
static int stops_on_second_handler(xmlNode *xml, void *userdata) {
function_called();
- if (pcmk__str_eq(crm_element_name(xml), "node2", pcmk__str_none)) {
+ if (pcmk__xe_is(xml, "node2")) {
return pcmk_rc_error;
} else {
return pcmk_rc_ok;
@@ -180,7 +181,7 @@ static int stops_on_second_handler(xmlNode *xml, void *userdata) {
static int stops_on_third_handler(xmlNode *xml, void *userdata) {
function_called();
- if (pcmk__str_eq(crm_element_name(xml), "node3", pcmk__str_none)) {
+ if (pcmk__xe_is(xml, "node3")) {
return pcmk_rc_error;
} else {
return pcmk_rc_ok;
diff --git a/lib/common/tests/xpath/Makefile.am b/lib/common/tests/xpath/Makefile.am
index 94abeee..d4c504b 100644
--- a/lib/common/tests/xpath/Makefile.am
+++ b/lib/common/tests/xpath/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,6 +11,6 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__xpath_node_id_test
+check_PROGRAMS = pcmk__xpath_node_id_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/watchdog.c b/lib/common/watchdog.c
index ff2d273..e569214 100644
--- a/lib/common/watchdog.c
+++ b/lib/common/watchdog.c
@@ -20,10 +20,6 @@
#include <dirent.h>
#include <signal.h>
-#ifdef _POSIX_MEMLOCK
-# include <sys/mman.h>
-#endif
-
static pid_t sbd_pid = 0;
static void
@@ -56,6 +52,7 @@ panic_local(void)
int rc = pcmk_ok;
uid_t uid = geteuid();
pid_t ppid = getppid();
+ const char *panic_action = pcmk__env_option(PCMK__ENV_PANIC_ACTION);
if(uid != 0 && ppid > 1) {
/* We're a non-root pacemaker daemon (pacemaker-based,
@@ -93,13 +90,15 @@ panic_local(void)
/* We're either pacemakerd, or a pacemaker daemon running as root */
- if (pcmk__str_eq("crash", getenv("PCMK_panic_action"), pcmk__str_casei)) {
+ if (pcmk__str_eq(panic_action, "crash", pcmk__str_casei)) {
sysrq_trigger('c');
- } else if (pcmk__str_eq("sync-crash", getenv("PCMK_panic_action"), pcmk__str_casei)) {
+
+ } else if (pcmk__str_eq(panic_action, "sync-crash", pcmk__str_casei)) {
sync();
sysrq_trigger('c');
+
} else {
- if (pcmk__str_eq("sync-reboot", getenv("PCMK_panic_action"), pcmk__str_casei)) {
+ if (pcmk__str_eq(panic_action, "sync-reboot", pcmk__str_casei)) {
sync();
}
sysrq_trigger('b');
diff --git a/lib/common/xml.c b/lib/common/xml.c
index 22078ce..53ebff7 100644
--- a/lib/common/xml.c
+++ b/lib/common/xml.c
@@ -42,7 +42,8 @@
* parsing without XML_PARSE_RECOVER, and if that fails, try parsing again with
* it, logging a warning if it succeeds.
*/
-#define PCMK__XML_PARSE_OPTS (XML_PARSE_NOBLANKS | XML_PARSE_RECOVER)
+#define PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER (XML_PARSE_NOBLANKS)
+#define PCMK__XML_PARSE_OPTS_WITH_RECOVER (XML_PARSE_NOBLANKS | XML_PARSE_RECOVER)
bool
pcmk__tracking_xml_changes(xmlNode *xml, bool lazy)
@@ -85,8 +86,8 @@ pcmk__set_xml_doc_flag(xmlNode *xml, enum xml_private_flags flag)
}
// Mark document, element, and all element's parents as changed
-static inline void
-mark_xml_node_dirty(xmlNode *xml)
+void
+pcmk__mark_xml_node_dirty(xmlNode *xml)
{
pcmk__set_xml_doc_flag(xml, pcmk__xf_dirty);
set_parent_flag(xml, pcmk__xf_dirty);
@@ -114,12 +115,15 @@ void
pcmk__mark_xml_created(xmlNode *xml)
{
xmlNode *cIter = NULL;
- xml_node_private_t *nodepriv = xml->_private;
+ xml_node_private_t *nodepriv = NULL;
+
+ CRM_ASSERT(xml != NULL);
+ nodepriv = xml->_private;
if (nodepriv && pcmk__tracking_xml_changes(xml, FALSE)) {
if (!pcmk_is_set(nodepriv->flags, pcmk__xf_created)) {
pcmk__set_xml_flags(nodepriv, pcmk__xf_created);
- mark_xml_node_dirty(xml);
+ pcmk__mark_xml_node_dirty(xml);
}
for (cIter = pcmk__xml_first_child(xml); cIter != NULL;
cIter = pcmk__xml_next(cIter)) {
@@ -128,17 +132,6 @@ pcmk__mark_xml_created(xmlNode *xml)
}
}
-void
-pcmk__mark_xml_attr_dirty(xmlAttr *a)
-{
- xmlNode *parent = a->parent;
- xml_node_private_t *nodepriv = a->_private;
-
- pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_modified);
- pcmk__clear_xml_flags(nodepriv, pcmk__xf_deleted);
- mark_xml_node_dirty(parent);
-}
-
#define XML_DOC_PRIVATE_MAGIC 0x81726354UL
#define XML_NODE_PRIVATE_MAGIC 0x54637281UL
@@ -250,7 +243,7 @@ new_private_data(xmlNode *node)
/* XML_ELEMENT_NODE doesn't get picked up here, node->doc is
* not hooked up at the point we are called
*/
- mark_xml_node_dirty(node);
+ pcmk__mark_xml_node_dirty(node);
}
break;
}
@@ -321,19 +314,6 @@ pcmk__xml_position(const xmlNode *xml, enum xml_private_flags ignore_if_set)
return position;
}
-// This also clears attribute's flags if not marked as deleted
-static bool
-marked_as_deleted(xmlAttrPtr a, void *user_data)
-{
- xml_node_private_t *nodepriv = a->_private;
-
- if (pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
- return true;
- }
- nodepriv->flags = pcmk__xf_none;
- return false;
-}
-
// Remove all attributes marked as deleted from an XML node
static void
accept_attr_deletions(xmlNode *xml)
@@ -342,7 +322,7 @@ accept_attr_deletions(xmlNode *xml)
((xml_node_private_t *) xml->_private)->flags = pcmk__xf_none;
// Remove this XML node's attributes that were marked as deleted
- pcmk__xe_remove_matching_attrs(xml, marked_as_deleted, NULL);
+ pcmk__xe_remove_matching_attrs(xml, pcmk__marked_as_deleted, NULL);
// Recursively do the same for this XML node's children
for (xmlNodePtr cIter = pcmk__xml_first_child(xml); cIter != NULL;
@@ -371,7 +351,7 @@ pcmk__xml_match(const xmlNode *haystack, const xmlNode *needle, bool exact)
const char *id = ID(needle);
const char *attr = (id == NULL)? NULL : XML_ATTR_ID;
- return pcmk__xe_match(haystack, crm_element_name(needle), attr, id);
+ return pcmk__xe_match(haystack, (const char *) needle->name, attr, id);
}
}
@@ -404,11 +384,7 @@ xmlNode *
find_xml_node(const xmlNode *root, const char *search_path, gboolean must_find)
{
xmlNode *a_child = NULL;
- const char *name = "NULL";
-
- if (root != NULL) {
- name = crm_element_name(root);
- }
+ const char *name = (root == NULL)? "<NULL>" : (const char *) root->name;
if (search_path == NULL) {
crm_warn("Will never find <NULL>");
@@ -418,7 +394,6 @@ find_xml_node(const xmlNode *root, const char *search_path, gboolean must_find)
for (a_child = pcmk__xml_first_child(root); a_child != NULL;
a_child = pcmk__xml_next(a_child)) {
if (strcmp((const char *)a_child->name, search_path) == 0) {
-/* crm_trace("returning node (%s).", crm_element_name(a_child)); */
return a_child;
}
}
@@ -473,7 +448,7 @@ pcmk__xe_match(const xmlNode *parent, const char *node_name,
(attr_n? attr_n : ""),
(attr_n? "=" : ""),
(attr_n? attr_v : ""),
- crm_element_name(parent));
+ (const char *) parent->name);
return NULL;
}
@@ -643,31 +618,17 @@ pcmk__xe_remove_matching_attrs(xmlNode *element,
}
}
-xmlDoc *
-getDocPtr(xmlNode * node)
-{
- xmlDoc *doc = NULL;
-
- CRM_CHECK(node != NULL, return NULL);
-
- doc = node->doc;
- if (doc == NULL) {
- doc = xmlNewDoc((pcmkXmlStr) "1.0");
- xmlDocSetRootElement(doc, node);
- xmlSetTreeDoc(node, doc);
- }
- return doc;
-}
-
xmlNode *
add_node_copy(xmlNode * parent, xmlNode * src_node)
{
xmlNode *child = NULL;
- xmlDoc *doc = getDocPtr(parent);
- CRM_CHECK(src_node != NULL, return NULL);
+ CRM_CHECK((parent != NULL) && (src_node != NULL), return NULL);
- child = xmlDocCopyNode(src_node, doc, 1);
+ child = xmlDocCopyNode(src_node, parent->doc, 1);
+ if (child == NULL) {
+ return NULL;
+ }
xmlAddChild(parent, child);
pcmk__mark_xml_created(child);
return child;
@@ -686,13 +647,22 @@ create_xml_node(xmlNode * parent, const char *name)
if (parent == NULL) {
doc = xmlNewDoc((pcmkXmlStr) "1.0");
+ if (doc == NULL) {
+ return NULL;
+ }
+
node = xmlNewDocRawNode(doc, NULL, (pcmkXmlStr) name, NULL);
+ if (node == NULL) {
+ xmlFreeDoc(doc);
+ return NULL;
+ }
xmlDocSetRootElement(doc, node);
} else {
- doc = getDocPtr(parent);
- node = xmlNewDocRawNode(doc, NULL, (pcmkXmlStr) name, NULL);
- xmlAddChild(parent, node);
+ node = xmlNewChild(parent, NULL, (pcmkXmlStr) name, NULL);
+ if (node == NULL) {
+ return NULL;
+ }
}
pcmk__mark_xml_created(node);
return node;
@@ -823,7 +793,6 @@ copy_xml(xmlNode * src)
CRM_ASSERT(copy != NULL);
xmlDocSetRootElement(doc, copy);
- xmlSetTreeDoc(copy, doc);
return copy;
}
@@ -833,7 +802,7 @@ string2xml(const char *input)
xmlNode *xml = NULL;
xmlDocPtr output = NULL;
xmlParserCtxtPtr ctxt = NULL;
- xmlErrorPtr last_error = NULL;
+ const xmlError *last_error = NULL;
if (input == NULL) {
crm_err("Can't parse NULL input");
@@ -847,7 +816,17 @@ string2xml(const char *input)
xmlCtxtResetLastError(ctxt);
xmlSetGenericErrorFunc(ctxt, pcmk__log_xmllib_err);
output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
- PCMK__XML_PARSE_OPTS);
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
+
if (output) {
xml = xmlDocGetRootElement(output);
}
@@ -933,9 +912,11 @@ decompress_file(const char *filename)
}
bz_file = BZ2_bzReadOpen(&rc, input, 0, 0, NULL, 0);
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_err("Could not prepare to read compressed %s: %s "
- CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc);
+ CRM_XS " rc=%d", filename, pcmk_rc_str(rc), rc);
BZ2_bzReadClose(&rc, bz_file);
fclose(input);
return NULL;
@@ -957,9 +938,11 @@ decompress_file(const char *filename)
buffer[length] = '\0';
- if (rc != BZ_STREAM_END) {
- crm_err("Could not read compressed %s: %s "
- CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc);
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Could not read compressed %s: %s " CRM_XS " rc=%d",
+ filename, pcmk_rc_str(rc), rc);
free(buffer);
buffer = NULL;
}
@@ -1010,7 +993,7 @@ filename2xml(const char *filename)
xmlDocPtr output = NULL;
bool uncompressed = true;
xmlParserCtxtPtr ctxt = NULL;
- xmlErrorPtr last_error = NULL;
+ const xmlError *last_error = NULL;
/* create a parser context */
ctxt = xmlNewParserCtxt();
@@ -1026,16 +1009,45 @@ filename2xml(const char *filename)
if (pcmk__str_eq(filename, "-", pcmk__str_null_matches)) {
/* STDIN_FILENO == fileno(stdin) */
output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL,
- PCMK__XML_PARSE_OPTS);
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
} else if (uncompressed) {
- output = xmlCtxtReadFile(ctxt, filename, NULL, PCMK__XML_PARSE_OPTS);
+ output = xmlCtxtReadFile(ctxt, filename, NULL,
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadFile(ctxt, filename, NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
} else {
char *input = decompress_file(filename);
output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
- PCMK__XML_PARSE_OPTS);
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
+
free(input);
}
@@ -1134,7 +1146,7 @@ crm_xml_set_id(xmlNode *xml, const char *format, ...)
* \internal
* \brief Write XML to a file stream
*
- * \param[in] xml_node XML to write
+ * \param[in] xml XML to write
* \param[in] filename Name of file being written (for logging only)
* \param[in,out] stream Open file stream corresponding to filename
* \param[in] compress Whether to compress XML before writing
@@ -1143,18 +1155,18 @@ crm_xml_set_id(xmlNode *xml, const char *format, ...)
* \return Standard Pacemaker return code
*/
static int
-write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
+write_xml_stream(const xmlNode *xml, const char *filename, FILE *stream,
bool compress, unsigned int *nbytes)
{
int rc = pcmk_rc_ok;
char *buffer = NULL;
*nbytes = 0;
- crm_log_xml_trace(xml_node, "writing");
+ crm_log_xml_trace(xml, "writing");
- buffer = dump_xml_formatted(xml_node);
+ buffer = dump_xml_formatted(xml);
CRM_CHECK(buffer && strlen(buffer),
- crm_log_xml_warn(xml_node, "formatting failed");
+ crm_log_xml_warn(xml, "formatting failed");
rc = pcmk_rc_error;
goto bail);
@@ -1164,24 +1176,30 @@ write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
rc = BZ_OK;
bz_file = BZ2_bzWriteOpen(&rc, stream, 5, 0, 30);
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_warn("Not compressing %s: could not prepare file stream: %s "
- CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc);
+ CRM_XS " rc=%d", filename, pcmk_rc_str(rc), rc);
} else {
BZ2_bzWrite(&rc, bz_file, buffer, strlen(buffer));
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_warn("Not compressing %s: could not compress data: %s "
- CRM_XS " bzerror=%d errno=%d",
- filename, bz2_strerror(rc), rc, errno);
+ CRM_XS " rc=%d errno=%d",
+ filename, pcmk_rc_str(rc), rc, errno);
}
}
- if (rc == BZ_OK) {
+ if (rc == pcmk_rc_ok) {
BZ2_bzWriteClose(&rc, bz_file, 0, &in, nbytes);
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_warn("Not compressing %s: could not write compressed data: %s "
- CRM_XS " bzerror=%d errno=%d",
- filename, bz2_strerror(rc), rc, errno);
+ CRM_XS " rc=%d errno=%d",
+ filename, pcmk_rc_str(rc), rc, errno);
*nbytes = 0; // retry without compression
} else {
crm_trace("Compressed XML for %s from %u bytes to %u",
@@ -1226,7 +1244,7 @@ write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
/*!
* \brief Write XML to a file descriptor
*
- * \param[in] xml_node XML to write
+ * \param[in] xml XML to write
* \param[in] filename Name of file being written (for logging only)
* \param[in] fd Open file descriptor corresponding to filename
* \param[in] compress Whether to compress XML before writing
@@ -1234,18 +1252,19 @@ write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
* \return Number of bytes written on success, -errno otherwise
*/
int
-write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress)
+write_xml_fd(const xmlNode *xml, const char *filename, int fd,
+ gboolean compress)
{
FILE *stream = NULL;
unsigned int nbytes = 0;
int rc = pcmk_rc_ok;
- CRM_CHECK(xml_node && (fd > 0), return -EINVAL);
+ CRM_CHECK((xml != NULL) && (fd > 0), return -EINVAL);
stream = fdopen(fd, "w");
if (stream == NULL) {
return -errno;
}
- rc = write_xml_stream(xml_node, filename, stream, compress, &nbytes);
+ rc = write_xml_stream(xml, filename, stream, compress, &nbytes);
if (rc != pcmk_rc_ok) {
return pcmk_rc2legacy(rc);
}
@@ -1255,25 +1274,25 @@ write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress
/*!
* \brief Write XML to a file
*
- * \param[in] xml_node XML to write
+ * \param[in] xml XML to write
* \param[in] filename Name of file to write
* \param[in] compress Whether to compress XML before writing
*
* \return Number of bytes written on success, -errno otherwise
*/
int
-write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress)
+write_xml_file(const xmlNode *xml, const char *filename, gboolean compress)
{
FILE *stream = NULL;
unsigned int nbytes = 0;
int rc = pcmk_rc_ok;
- CRM_CHECK(xml_node && filename, return -EINVAL);
+ CRM_CHECK((xml != NULL) && (filename != NULL), return -EINVAL);
stream = fopen(filename, "w");
if (stream == NULL) {
return -errno;
}
- rc = write_xml_stream(xml_node, filename, stream, compress, &nbytes);
+ rc = write_xml_stream(xml, filename, stream, compress, &nbytes);
if (rc != pcmk_rc_ok) {
return pcmk_rc2legacy(rc);
}
@@ -1382,37 +1401,6 @@ crm_xml_escape(const char *text)
/*!
* \internal
- * \brief Append an XML attribute to a buffer
- *
- * \param[in] attr Attribute to append
- * \param[in,out] buffer Where to append the content (must not be \p NULL)
- */
-static void
-dump_xml_attr(const xmlAttr *attr, GString *buffer)
-{
- char *p_value = NULL;
- const char *p_name = NULL;
- xml_node_private_t *nodepriv = NULL;
-
- if (attr == NULL || attr->children == NULL) {
- return;
- }
-
- nodepriv = attr->_private;
- if (nodepriv && pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
- return;
- }
-
- p_name = (const char *) attr->name;
- p_value = crm_xml_escape((const char *)attr->children->content);
- pcmk__g_strcat(buffer, " ", p_name, "=\"", pcmk__s(p_value, "<null>"), "\"",
- NULL);
-
- free(p_value);
-}
-
-/*!
- * \internal
* \brief Append a string representation of an XML element to a buffer
*
* \param[in] data XML whose representation to append
@@ -1424,24 +1412,21 @@ static void
dump_xml_element(const xmlNode *data, uint32_t options, GString *buffer,
int depth)
{
- const char *name = crm_element_name(data);
bool pretty = pcmk_is_set(options, pcmk__xml_fmt_pretty);
bool filtered = pcmk_is_set(options, pcmk__xml_fmt_filtered);
int spaces = pretty? (2 * depth) : 0;
- CRM_ASSERT(name != NULL);
-
for (int lpc = 0; lpc < spaces; lpc++) {
g_string_append_c(buffer, ' ');
}
- pcmk__g_strcat(buffer, "<", name, NULL);
+ pcmk__g_strcat(buffer, "<", data->name, NULL);
for (const xmlAttr *attr = pcmk__xe_first_attr(data); attr != NULL;
attr = attr->next) {
if (!filtered || !pcmk__xa_filterable((const char *) (attr->name))) {
- dump_xml_attr(attr, buffer);
+ pcmk__dump_xml_attr(attr, buffer);
}
}
@@ -1457,16 +1442,16 @@ dump_xml_element(const xmlNode *data, uint32_t options, GString *buffer,
}
if (data->children) {
- xmlNode *xChild = NULL;
- for(xChild = data->children; xChild != NULL; xChild = xChild->next) {
- pcmk__xml2text(xChild, options, buffer, depth + 1);
+ for (const xmlNode *child = data->children; child != NULL;
+ child = child->next) {
+ pcmk__xml2text(child, options, buffer, depth + 1);
}
for (int lpc = 0; lpc < spaces; lpc++) {
g_string_append_c(buffer, ' ');
}
- pcmk__g_strcat(buffer, "</", name, ">", NULL);
+ pcmk__g_strcat(buffer, "</", data->name, ">", NULL);
if (pretty) {
g_string_append_c(buffer, '\n');
@@ -1559,7 +1544,45 @@ dump_xml_comment(const xmlNode *data, uint32_t options, GString *buffer,
}
}
-#define PCMK__XMLDUMP_STATS 0
+/*!
+ * \internal
+ * \brief Get a string representation of an XML element type
+ *
+ * \param[in] type XML element type
+ *
+ * \return String representation of \p type
+ */
+static const char *
+xml_element_type2str(xmlElementType type)
+{
+ static const char *const element_type_names[] = {
+ [XML_ELEMENT_NODE] = "element",
+ [XML_ATTRIBUTE_NODE] = "attribute",
+ [XML_TEXT_NODE] = "text",
+ [XML_CDATA_SECTION_NODE] = "CDATA section",
+ [XML_ENTITY_REF_NODE] = "entity reference",
+ [XML_ENTITY_NODE] = "entity",
+ [XML_PI_NODE] = "PI",
+ [XML_COMMENT_NODE] = "comment",
+ [XML_DOCUMENT_NODE] = "document",
+ [XML_DOCUMENT_TYPE_NODE] = "document type",
+ [XML_DOCUMENT_FRAG_NODE] = "document fragment",
+ [XML_NOTATION_NODE] = "notation",
+ [XML_HTML_DOCUMENT_NODE] = "HTML document",
+ [XML_DTD_NODE] = "DTD",
+ [XML_ELEMENT_DECL] = "element declaration",
+ [XML_ATTRIBUTE_DECL] = "attribute declaration",
+ [XML_ENTITY_DECL] = "entity declaration",
+ [XML_NAMESPACE_DECL] = "namespace declaration",
+ [XML_XINCLUDE_START] = "XInclude start",
+ [XML_XINCLUDE_END] = "XInclude end",
+ };
+
+ if ((type < 0) || (type >= PCMK__NELEM(element_type_names))) {
+ return "unrecognized type";
+ }
+ return element_type_names[type];
+}
/*!
* \internal
@@ -1571,7 +1594,8 @@ dump_xml_comment(const xmlNode *data, uint32_t options, GString *buffer,
* \param[in] depth Current indentation level
*/
void
-pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
+pcmk__xml2text(const xmlNode *data, uint32_t options, GString *buffer,
+ int depth)
{
if (data == NULL) {
crm_trace("Nothing to dump");
@@ -1581,60 +1605,6 @@ pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
CRM_ASSERT(buffer != NULL);
CRM_CHECK(depth >= 0, depth = 0);
- if (pcmk_is_set(options, pcmk__xml_fmt_full)) {
- /* libxml's serialization reuse is a good idea, sadly we cannot
- apply it for the filtered cases (preceding filtering pass
- would preclude further reuse of such in-situ modified XML
- in generic context and is likely not a win performance-wise),
- and there's also a historically unstable throughput argument
- (likely stemming from memory allocation overhead, eventhough
- that shall be minimized with defaults preset in crm_xml_init) */
-#if (PCMK__XMLDUMP_STATS - 0)
- time_t next, new = time(NULL);
-#endif
- xmlDoc *doc;
- xmlOutputBuffer *xml_buffer;
-
- doc = getDocPtr(data);
- /* doc will only be NULL if data is */
- CRM_CHECK(doc != NULL, return);
-
- xml_buffer = xmlAllocOutputBuffer(NULL);
- CRM_ASSERT(xml_buffer != NULL);
-
- /* XXX we could setup custom allocation scheme for the particular
- buffer, but it's subsumed with crm_xml_init that needs to
- be invoked prior to entering this function as such, since
- its other branch vitally depends on it -- what can be done
- about this all is to have a facade parsing functions that
- would 100% mark entering libxml code for us, since we don't
- do anything as crazy as swapping out the binary form of the
- parsed tree (but those would need to be strictly used as
- opposed to libxml's raw functions) */
-
- xmlNodeDumpOutput(xml_buffer, doc, data, 0,
- pcmk_is_set(options, pcmk__xml_fmt_pretty), NULL);
- /* attempt adding final NL - failing shouldn't be fatal here */
- (void) xmlOutputBufferWrite(xml_buffer, sizeof("\n") - 1, "\n");
- if (xml_buffer->buffer != NULL) {
- g_string_append(buffer,
- (const gchar *) xmlBufContent(xml_buffer->buffer));
- }
-
-#if (PCMK__XMLDUMP_STATS - 0)
- next = time(NULL);
- if ((now + 1) < next) {
- crm_log_xml_trace(data, "Long time");
- crm_err("xmlNodeDumpOutput() -> %lld bytes took %ds",
- (long long) buffer->len, next - now);
- }
-#endif
-
- /* asserted allocation before so there should be something to remove */
- (void) xmlOutputBufferClose(xml_buffer);
- return;
- }
-
switch(data->type) {
case XML_ELEMENT_NODE:
/* Handle below */
@@ -1642,11 +1612,6 @@ pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
break;
case XML_TEXT_NODE:
if (pcmk_is_set(options, pcmk__xml_fmt_text)) {
- /* @COMPAT: Remove when log_data_element() is removed. There are
- * no other internal code paths that set pcmk__xml_fmt_text.
- * Keep an empty case handler so that we don't log an unhandled
- * type warning.
- */
dump_xml_text(data, options, buffer, depth);
}
break;
@@ -1657,39 +1622,23 @@ pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
dump_xml_cdata(data, options, buffer, depth);
break;
default:
- crm_warn("Unhandled type: %d", data->type);
+ crm_warn("Cannot convert XML %s node to text " CRM_XS " type=%d",
+ xml_element_type2str(data->type), data->type);
break;
-
- /*
- XML_ATTRIBUTE_NODE = 2
- XML_ENTITY_REF_NODE = 5
- XML_ENTITY_NODE = 6
- XML_PI_NODE = 7
- XML_DOCUMENT_NODE = 9
- XML_DOCUMENT_TYPE_NODE = 10
- XML_DOCUMENT_FRAG_NODE = 11
- XML_NOTATION_NODE = 12
- XML_HTML_DOCUMENT_NODE = 13
- XML_DTD_NODE = 14
- XML_ELEMENT_DECL = 15
- XML_ATTRIBUTE_DECL = 16
- XML_ENTITY_DECL = 17
- XML_NAMESPACE_DECL = 18
- XML_XINCLUDE_START = 19
- XML_XINCLUDE_END = 20
- XML_DOCB_DOCUMENT_NODE = 21
- */
}
}
char *
-dump_xml_formatted_with_text(xmlNode * an_xml_node)
+dump_xml_formatted_with_text(const xmlNode *xml)
{
+ /* libxml's xmlNodeDumpOutput() would work here since we're not specifically
+ * filtering out any nodes. However, use pcmk__xml2text() for consistency,
+ * to escape attribute values, and to allow a const argument.
+ */
char *buffer = NULL;
GString *g_buffer = g_string_sized_new(1024);
- pcmk__xml2text(an_xml_node, pcmk__xml_fmt_pretty|pcmk__xml_fmt_full,
- g_buffer, 0);
+ pcmk__xml2text(xml, pcmk__xml_fmt_pretty|pcmk__xml_fmt_text, g_buffer, 0);
pcmk__str_update(&buffer, g_buffer->str);
g_string_free(g_buffer, TRUE);
@@ -1697,12 +1646,12 @@ dump_xml_formatted_with_text(xmlNode * an_xml_node)
}
char *
-dump_xml_formatted(xmlNode * an_xml_node)
+dump_xml_formatted(const xmlNode *xml)
{
char *buffer = NULL;
GString *g_buffer = g_string_sized_new(1024);
- pcmk__xml2text(an_xml_node, pcmk__xml_fmt_pretty, g_buffer, 0);
+ pcmk__xml2text(xml, pcmk__xml_fmt_pretty, g_buffer, 0);
pcmk__str_update(&buffer, g_buffer->str);
g_string_free(g_buffer, TRUE);
@@ -1710,30 +1659,46 @@ dump_xml_formatted(xmlNode * an_xml_node)
}
char *
-dump_xml_unformatted(xmlNode * an_xml_node)
+dump_xml_unformatted(const xmlNode *xml)
{
char *buffer = NULL;
GString *g_buffer = g_string_sized_new(1024);
- pcmk__xml2text(an_xml_node, 0, g_buffer, 0);
+ pcmk__xml2text(xml, 0, g_buffer, 0);
pcmk__str_update(&buffer, g_buffer->str);
g_string_free(g_buffer, TRUE);
return buffer;
}
-gboolean
-xml_has_children(const xmlNode * xml_root)
+int
+pcmk__xml2fd(int fd, xmlNode *cur)
{
- if (xml_root != NULL && xml_root->children != NULL) {
- return TRUE;
+ bool success;
+
+ xmlOutputBuffer *fd_out = xmlOutputBufferCreateFd(fd, NULL);
+ CRM_ASSERT(fd_out != NULL);
+ xmlNodeDumpOutput(fd_out, cur->doc, cur, 0, pcmk__xml_fmt_pretty, NULL);
+
+ success = xmlOutputBufferWrite(fd_out, sizeof("\n") - 1, "\n") != -1;
+
+ success = xmlOutputBufferClose(fd_out) != -1 && success;
+
+ if (!success) {
+ return EIO;
}
- return FALSE;
+
+ fsync(fd);
+ return pcmk_rc_ok;
}
void
xml_remove_prop(xmlNode * obj, const char *name)
{
+ if (crm_element_value(obj, name) == NULL) {
+ return;
+ }
+
if (pcmk__check_acl(obj, NULL, pcmk__xf_acl_write) == FALSE) {
crm_trace("Cannot remove %s from %s", name, obj->name);
@@ -1750,7 +1715,7 @@ xml_remove_prop(xmlNode * obj, const char *name)
}
void
-save_xml_to_file(xmlNode * xml, const char *desc, const char *filename)
+save_xml_to_file(const xmlNode *xml, const char *desc, const char *filename)
{
char *f = NULL;
@@ -1864,7 +1829,7 @@ mark_attr_moved(xmlNode *new_xml, const char *element, xmlAttr *old_attr,
old_attr->name, p_old, p_new, element);
// Mark document, element, and all element's parents as changed
- mark_xml_node_dirty(new_xml);
+ pcmk__mark_xml_node_dirty(new_xml);
// Mark attribute as changed
pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_moved);
@@ -1886,10 +1851,10 @@ xml_diff_old_attrs(xmlNode *old_xml, xmlNode *new_xml)
xmlAttr *attr_iter = pcmk__xe_first_attr(old_xml);
while (attr_iter != NULL) {
+ const char *name = (const char *) attr_iter->name;
xmlAttr *old_attr = attr_iter;
xmlAttr *new_attr = xmlHasProp(new_xml, attr_iter->name);
- const char *name = (const char *) attr_iter->name;
- const char *old_value = crm_element_value(old_xml, name);
+ const char *old_value = pcmk__xml_attr_value(attr_iter);
attr_iter = attr_iter->next;
if (new_attr == NULL) {
@@ -1943,7 +1908,7 @@ mark_created_attrs(xmlNode *new_xml)
const char *attr_name = (const char *) new_attr->name;
crm_trace("Created new attribute %s=%s in %s",
- attr_name, crm_element_value(new_xml, attr_name),
+ attr_name, pcmk__xml_attr_value(new_attr),
new_xml->name);
/* Check ACLs (we can't use the remove-then-create trick because it
@@ -2017,7 +1982,7 @@ mark_child_moved(xmlNode *old_child, xmlNode *new_parent, xmlNode *new_child,
crm_trace("Child element %s with id='%s' moved from position %d to %d under %s",
new_child->name, (ID(new_child)? ID(new_child) : "<no id>"),
p_old, p_new, new_parent->name);
- mark_xml_node_dirty(new_parent);
+ pcmk__mark_xml_node_dirty(new_parent);
pcmk__set_xml_flags(nodepriv, pcmk__xf_moved);
if (p_old > p_new) {
@@ -2102,9 +2067,10 @@ xml_calculate_significant_changes(xmlNode *old_xml, xmlNode *new_xml)
void
xml_calculate_changes(xmlNode *old_xml, xmlNode *new_xml)
{
- CRM_CHECK(pcmk__str_eq(crm_element_name(old_xml), crm_element_name(new_xml), pcmk__str_casei),
+ CRM_CHECK((old_xml != NULL) && (new_xml != NULL)
+ && pcmk__xe_is(old_xml, (const char *) new_xml->name)
+ && pcmk__str_eq(ID(old_xml), ID(new_xml), pcmk__str_none),
return);
- CRM_CHECK(pcmk__str_eq(ID(old_xml), ID(new_xml), pcmk__str_casei), return);
if(xml_tracking_changes(new_xml) == FALSE) {
xml_track_changes(new_xml, NULL, NULL, FALSE);
@@ -2118,10 +2084,13 @@ can_prune_leaf(xmlNode * xml_node)
{
xmlNode *cIter = NULL;
gboolean can_prune = TRUE;
- const char *name = crm_element_name(xml_node);
- if (pcmk__strcase_any_of(name, XML_TAG_RESOURCE_REF, XML_CIB_TAG_OBJ_REF,
- XML_ACL_TAG_ROLE_REF, XML_ACL_TAG_ROLE_REFv1, NULL)) {
+ CRM_CHECK(xml_node != NULL, return FALSE);
+
+ if (pcmk__strcase_any_of((const char *) xml_node->name,
+ XML_TAG_RESOURCE_REF, XML_CIB_TAG_OBJ_REF,
+ XML_ACL_TAG_ROLE_REF, XML_ACL_TAG_ROLE_REFv1,
+ NULL)) {
return FALSE;
}
@@ -2257,7 +2226,7 @@ pcmk__xml_update(xmlNode *parent, xmlNode *target, xmlNode *update,
return;
}
- object_name = crm_element_name(update);
+ object_name = (const char *) update->name;
object_href_val = ID(update);
if (object_href_val != NULL) {
object_href = XML_ATTR_ID;
@@ -2294,9 +2263,7 @@ pcmk__xml_update(xmlNode *parent, xmlNode *target, xmlNode *update,
#endif
}
- CRM_CHECK(pcmk__str_eq(crm_element_name(target), crm_element_name(update),
- pcmk__str_casei),
- return);
+ CRM_CHECK(pcmk__xe_is(target, (const char *) update->name), return);
if (as_diff == FALSE) {
/* So that expand_plus_plus() gets called */
@@ -2345,7 +2312,7 @@ update_xml_child(xmlNode * child, xmlNode * to_update)
CRM_CHECK(child != NULL, return FALSE);
CRM_CHECK(to_update != NULL, return FALSE);
- if (!pcmk__str_eq(crm_element_name(to_update), crm_element_name(child), pcmk__str_none)) {
+ if (!pcmk__xe_is(to_update, (const char *) child->name)) {
can_update = FALSE;
} else if (!pcmk__str_eq(ID(to_update), ID(child), pcmk__str_none)) {
@@ -2379,7 +2346,7 @@ find_xml_children(xmlNode ** children, xmlNode * root,
CRM_CHECK(root != NULL, return FALSE);
CRM_CHECK(children != NULL, return FALSE);
- if (tag != NULL && !pcmk__str_eq(tag, crm_element_name(root), pcmk__str_casei)) {
+ if ((tag != NULL) && !pcmk__xe_is(root, tag)) {
} else if (value != NULL && !pcmk__str_eq(value, crm_element_value(root, field), pcmk__str_casei)) {
@@ -2422,7 +2389,7 @@ replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean
if (up_id == NULL || (child_id && strcmp(child_id, up_id) == 0)) {
can_delete = TRUE;
}
- if (!pcmk__str_eq(crm_element_name(update), crm_element_name(child), pcmk__str_casei)) {
+ if (!pcmk__xe_is(update, (const char *) child->name)) {
can_delete = FALSE;
}
if (can_delete && delete_only) {
@@ -2444,23 +2411,23 @@ replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean
free_xml(child);
} else {
- xmlNode *tmp = copy_xml(update);
- xmlDoc *doc = tmp->doc;
- xmlNode *old = NULL;
+ xmlNode *old = child;
+ xmlNode *new = xmlCopyNode(update, 1);
- xml_accept_changes(tmp);
- old = xmlReplaceNode(child, tmp);
+ CRM_ASSERT(new != NULL);
- if(xml_tracking_changes(tmp)) {
- /* Replaced sections may have included relevant ACLs */
- pcmk__apply_acl(tmp);
- }
+ // May be unnecessary but avoids slight changes to some test outputs
+ reset_xml_node_flags(new);
- xml_calculate_changes(old, tmp);
- xmlDocSetRootElement(doc, old);
- free_xml(old);
+ old = xmlReplaceNode(old, new);
+
+ if (xml_tracking_changes(new)) {
+ // Replaced sections may have included relevant ACLs
+ pcmk__apply_acl(new);
+ }
+ xml_calculate_changes(old, new);
+ xmlFreeNode(old);
}
- child = NULL;
return TRUE;
} else if (can_delete) {
@@ -2491,14 +2458,10 @@ sorted_xml(xmlNode *input, xmlNode *parent, gboolean recursive)
xmlNode *child = NULL;
GSList *nvpairs = NULL;
xmlNode *result = NULL;
- const char *name = NULL;
CRM_CHECK(input != NULL, return NULL);
- name = crm_element_name(input);
- CRM_CHECK(name != NULL, return NULL);
-
- result = create_xml_node(parent, name);
+ result = create_xml_node(parent, (const char *) input->name);
nvpairs = pcmk_xml_attrs2nvpairs(input);
nvpairs = pcmk_sort_nvpairs(nvpairs);
pcmk_nvpairs2xml_attrs(nvpairs, result);
@@ -2547,10 +2510,9 @@ xmlNode *
crm_next_same_xml(const xmlNode *sibling)
{
xmlNode *match = pcmk__xe_next(sibling);
- const char *name = crm_element_name(sibling);
while (match != NULL) {
- if (!strcmp(crm_element_name(match), name)) {
+ if (pcmk__xe_is(match, (const char *) sibling->name)) {
return match;
}
match = pcmk__xe_next(match);
@@ -2592,7 +2554,6 @@ crm_xml_cleanup(void)
xmlNode *
expand_idref(xmlNode * input, xmlNode * top)
{
- const char *tag = NULL;
const char *ref = NULL;
xmlNode *result = input;
@@ -2603,12 +2564,10 @@ expand_idref(xmlNode * input, xmlNode * top)
top = input;
}
- tag = crm_element_name(result);
ref = crm_element_value(result, XML_ATTR_IDREF);
-
if (ref != NULL) {
char *xpath_string = crm_strdup_printf("//%s[@" XML_ATTR_ID "='%s']",
- tag, ref);
+ result->name, ref);
result = get_xpath_object(xpath_string, top, LOG_ERR);
if (result == NULL) {
@@ -2630,7 +2589,7 @@ pcmk__xml_artefact_root(enum pcmk__xml_artefact_ns ns)
char *ret = NULL;
if (base == NULL) {
- base = getenv("PCMK_schema_directory");
+ base = pcmk__env_option(PCMK__ENV_SCHEMA_DIRECTORY);
}
if (pcmk__str_empty(base)) {
base = CRM_SCHEMA_DIRECTORY;
@@ -2741,6 +2700,21 @@ crm_destroy_xml(gpointer data)
free_xml(data);
}
+xmlDoc *
+getDocPtr(xmlNode *node)
+{
+ xmlDoc *doc = NULL;
+
+ CRM_CHECK(node != NULL, return NULL);
+
+ doc = node->doc;
+ if (doc == NULL) {
+ doc = xmlNewDoc((pcmkXmlStr) "1.0");
+ xmlDocSetRootElement(doc, node);
+ }
+ return doc;
+}
+
int
add_node_nocopy(xmlNode *parent, const char *name, xmlNode *child)
{
@@ -2749,5 +2723,14 @@ add_node_nocopy(xmlNode *parent, const char *name, xmlNode *child)
return 1;
}
+gboolean
+xml_has_children(const xmlNode * xml_root)
+{
+ if (xml_root != NULL && xml_root->children != NULL) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/common/xml_attr.c b/lib/common/xml_attr.c
new file mode 100644
index 0000000..427d267
--- /dev/null
+++ b/lib/common/xml_attr.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <time.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <bzlib.h>
+
+#include <libxml/parser.h>
+#include <libxml/tree.h>
+#include <libxml/xmlIO.h> /* xmlAllocOutputBuffer */
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/common/xml_internal.h> // PCMK__XML_LOG_BASE, etc.
+#include "crmcommon_private.h"
+
+void
+pcmk__mark_xml_attr_dirty(xmlAttr *a)
+{
+ xmlNode *parent = a->parent;
+ xml_node_private_t *nodepriv = a->_private;
+
+ pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_modified);
+ pcmk__clear_xml_flags(nodepriv, pcmk__xf_deleted);
+ pcmk__mark_xml_node_dirty(parent);
+}
+
+// This also clears attribute's flags if not marked as deleted
+bool
+pcmk__marked_as_deleted(xmlAttrPtr a, void *user_data)
+{
+ xml_node_private_t *nodepriv = a->_private;
+
+ if (pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
+ return true;
+ }
+ nodepriv->flags = pcmk__xf_none;
+ return false;
+}
+
+/*!
+ * \internal
+ * \brief Append an XML attribute to a buffer
+ *
+ * \param[in] attr Attribute to append
+ * \param[in,out] buffer Where to append the content (must not be \p NULL)
+ */
+void
+pcmk__dump_xml_attr(const xmlAttr *attr, GString *buffer)
+{
+ char *p_value = NULL;
+ const char *p_name = NULL;
+ xml_node_private_t *nodepriv = NULL;
+
+ if (attr == NULL || attr->children == NULL) {
+ return;
+ }
+
+ nodepriv = attr->_private;
+ if (nodepriv && pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
+ return;
+ }
+
+ p_name = (const char *) attr->name;
+ p_value = crm_xml_escape((const char *)attr->children->content);
+ pcmk__g_strcat(buffer, " ", p_name, "=\"", pcmk__s(p_value, "<null>"), "\"",
+ NULL);
+
+ free(p_value);
+} \ No newline at end of file
diff --git a/lib/common/xml_display.c b/lib/common/xml_display.c
index e2d46ce..18cd3b9 100644
--- a/lib/common/xml_display.c
+++ b/lib/common/xml_display.c
@@ -92,7 +92,6 @@ static int
show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
const xmlNode *data, int depth, uint32_t options)
{
- const char *name = crm_element_name(data);
int spaces = pcmk_is_set(options, pcmk__xml_fmt_pretty)? (2 * depth) : 0;
int rc = pcmk_rc_no_output;
@@ -104,7 +103,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
for (int lpc = 0; lpc < spaces; lpc++) {
g_string_append_c(buffer, ' ');
}
- pcmk__g_strcat(buffer, "<", name, NULL);
+ pcmk__g_strcat(buffer, "<", data->name, NULL);
for (const xmlAttr *attr = pcmk__xe_first_attr(data); attr != NULL;
attr = attr->next) {
@@ -138,7 +137,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
free(p_copy);
}
- if (xml_has_children(data)
+ if ((data->children != NULL)
&& pcmk_is_set(options, pcmk__xml_fmt_children)) {
g_string_append_c(buffer, '>');
@@ -151,7 +150,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
buffer->str);
}
- if (!xml_has_children(data)) {
+ if (data->children == NULL) {
return rc;
}
@@ -171,7 +170,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
int temp_rc = out->info(out, "%s%s%*s</%s>",
pcmk__s(prefix, ""),
pcmk__str_empty(prefix)? "" : " ",
- spaces, "", name);
+ spaces, "", data->name);
rc = pcmk__output_select_rc(rc, temp_rc);
}
@@ -304,14 +303,14 @@ show_xml_changes_recursive(pcmk__output_t *out, const xmlNode *data, int depth,
nodepriv = attr->_private;
if (pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
- const char *value = crm_element_value(data, name);
+ const char *value = pcmk__xml_attr_value(attr);
temp_rc = out->info(out, "%s %*s @%s=%s",
PCMK__XML_PREFIX_DELETED, spaces, "", name,
value);
} else if (pcmk_is_set(nodepriv->flags, pcmk__xf_dirty)) {
- const char *value = crm_element_value(data, name);
+ const char *value = pcmk__xml_attr_value(attr);
if (pcmk_is_set(nodepriv->flags, pcmk__xf_created)) {
prefix = PCMK__XML_PREFIX_CREATED;
@@ -447,9 +446,6 @@ log_data_element(int log_level, const char *file, const char *function,
if (pcmk_is_set(legacy_options, xml_log_option_formatted)) {
options |= pcmk__xml_fmt_pretty;
}
- if (pcmk_is_set(legacy_options, xml_log_option_full_fledged)) {
- options |= pcmk__xml_fmt_full;
- }
if (pcmk_is_set(legacy_options, xml_log_option_open)) {
options |= pcmk__xml_fmt_open;
}
@@ -480,7 +476,7 @@ log_data_element(int log_level, const char *file, const char *function,
}
if (pcmk_is_set(options, pcmk__xml_fmt_pretty)
- && (!xml_has_children(data)
+ && ((data->children == NULL)
|| (crm_element_value(data, XML_DIFF_MARKER) != NULL))) {
if (pcmk_is_set(options, pcmk__xml_fmt_diff_plus)) {
diff --git a/lib/common/xpath.c b/lib/common/xpath.c
index 1f5c0a8..d90f1c5 100644
--- a/lib/common/xpath.c
+++ b/lib/common/xpath.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -136,9 +136,8 @@ dedupXpathResults(xmlXPathObjectPtr xpathObj)
/* the caller needs to check if the result contains a xmlDocPtr or xmlNodePtr */
xmlXPathObjectPtr
-xpath_search(xmlNode * xml_top, const char *path)
+xpath_search(const xmlNode *xml_top, const char *path)
{
- xmlDocPtr doc = NULL;
xmlXPathObjectPtr xpathObj = NULL;
xmlXPathContextPtr xpathCtx = NULL;
const xmlChar *xpathExpr = (pcmkXmlStr) path;
@@ -147,9 +146,7 @@ xpath_search(xmlNode * xml_top, const char *path)
CRM_CHECK(xml_top != NULL, return NULL);
CRM_CHECK(strlen(path) > 0, return NULL);
- doc = getDocPtr(xml_top);
-
- xpathCtx = xmlXPathNewContext(doc);
+ xpathCtx = xmlXPathNewContext(xml_top->doc);
CRM_ASSERT(xpathCtx != NULL);
xpathObj = xmlXPathEvalExpression(xpathExpr, xpathCtx);
@@ -298,9 +295,9 @@ pcmk__element_xpath(const xmlNode *xml)
if (parent == NULL) {
g_string_append_c(xpath, '/');
} else if (parent->parent == NULL) {
- g_string_append(xpath, TYPE(xml));
+ g_string_append(xpath, (const gchar *) xml->name);
} else {
- pcmk__g_strcat(xpath, "/", TYPE(xml), NULL);
+ pcmk__g_strcat(xpath, "/", (const char *) xml->name, NULL);
}
id = ID(xml);
diff --git a/lib/fencing/Makefile.am b/lib/fencing/Makefile.am
index a72b7d6..5302035 100644
--- a/lib/fencing/Makefile.am
+++ b/lib/fencing/Makefile.am
@@ -14,15 +14,19 @@ noinst_HEADERS = fencing_private.h
lib_LTLIBRARIES = libstonithd.la
-libstonithd_la_LDFLAGS = -version-info 34:3:8
+libstonithd_la_LDFLAGS = -version-info 34:4:8
libstonithd_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libstonithd_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libstonithd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-libstonithd_la_LIBADD += $(top_builddir)/lib/services/libcrmservice.la
+libstonithd_la_LIBADD = $(top_builddir)/lib/services/libcrmservice.la
+libstonithd_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
-libstonithd_la_SOURCES = st_actions.c st_client.c st_output.c st_rhcs.c
+## Library sources (*must* use += format for bumplibs)
+libstonithd_la_SOURCES = st_actions.c
+libstonithd_la_SOURCES += st_client.c
if BUILD_LHA_SUPPORT
libstonithd_la_SOURCES += st_lha.c
endif
+libstonithd_la_SOURCES += st_output.c
+libstonithd_la_SOURCES += st_rhcs.c
diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
index e2783d5..1d32cc1 100644
--- a/lib/fencing/st_client.c
+++ b/lib/fencing/st_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -515,7 +515,7 @@ stonith_api_device_metadata(stonith_t *stonith, int call_options,
enum stonith_namespace ns = stonith_get_namespace(agent, namespace);
if (timeout_sec <= 0) {
- timeout_sec = CRMD_METADATA_CALL_TIMEOUT;
+ timeout_sec = PCMK_DEFAULT_METADATA_TIMEOUT_MS;
}
crm_trace("Looking up metadata for %s agent %s",
@@ -553,7 +553,7 @@ stonith_api_query(stonith_t * stonith, int call_options, const char *target,
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __func__);
crm_xml_add(data, F_STONITH_TARGET, target);
- crm_xml_add(data, F_STONITH_ACTION, "off");
+ crm_xml_add(data, F_STONITH_ACTION, PCMK_ACTION_OFF);
rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout);
if (rc < 0) {
@@ -625,7 +625,8 @@ stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **l
int rc;
xmlNode *output = NULL;
- rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output);
+ rc = stonith_api_call(stonith, call_options, id, PCMK_ACTION_LIST, NULL,
+ timeout, &output);
if (output && list_info) {
const char *list_str;
@@ -647,14 +648,16 @@ stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **l
static int
stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout)
{
- return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL);
+ return stonith_api_call(stonith, call_options, id, PCMK_ACTION_MONITOR,
+ NULL, timeout, NULL);
}
static int
stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port,
int timeout)
{
- return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL);
+ return stonith_api_call(stonith, call_options, id, PCMK_ACTION_STATUS, port,
+ timeout, NULL);
}
static int
@@ -689,7 +692,8 @@ static int
stonith_api_confirm(stonith_t * stonith, int call_options, const char *target)
{
stonith__set_call_options(call_options, target, st_opt_manual_ack);
- return stonith_api_fence(stonith, call_options, target, "off", 0, 0);
+ return stonith_api_fence(stonith, call_options, target, PCMK_ACTION_OFF, 0,
+ 0);
}
static int
@@ -1105,13 +1109,20 @@ stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd)
if (stonith_fd) {
/* No mainloop */
native->ipc = crm_ipc_new("stonith-ng", 0);
-
- if (native->ipc && crm_ipc_connect(native->ipc)) {
- *stonith_fd = crm_ipc_get_fd(native->ipc);
- } else if (native->ipc) {
- crm_ipc_close(native->ipc);
- crm_ipc_destroy(native->ipc);
- native->ipc = NULL;
+ if (native->ipc != NULL) {
+ rc = pcmk__connect_generic_ipc(native->ipc);
+ if (rc == pcmk_rc_ok) {
+ rc = pcmk__ipc_fd(native->ipc, stonith_fd);
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Couldn't get file descriptor for IPC: %s",
+ pcmk_rc_str(rc));
+ }
+ }
+ if (rc != pcmk_rc_ok) {
+ crm_ipc_close(native->ipc);
+ crm_ipc_destroy(native->ipc);
+ native->ipc = NULL;
+ }
}
} else {
@@ -1765,7 +1776,7 @@ stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id,
}
if (timeout_sec <= 0) {
- timeout_sec = CRMD_METADATA_CALL_TIMEOUT; // Questionable
+ timeout_sec = PCMK_DEFAULT_METADATA_TIMEOUT_MS; // Questionable
}
switch (stonith_get_namespace(agent, namespace_s)) {
@@ -1961,7 +1972,7 @@ stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off)
{
int rc = pcmk_ok;
stonith_t *st = stonith_api_new();
- const char *action = off? "off" : "reboot";
+ const char *action = off? PCMK_ACTION_OFF : PCMK_ACTION_REBOOT;
api_log_open();
if (st == NULL) {
@@ -2098,9 +2109,9 @@ stonith_action_str(const char *action)
{
if (action == NULL) {
return "fencing";
- } else if (!strcmp(action, "on")) {
+ } else if (strcmp(action, PCMK_ACTION_ON) == 0) {
return "unfencing";
- } else if (!strcmp(action, "off")) {
+ } else if (strcmp(action, PCMK_ACTION_OFF) == 0) {
return "turning off";
} else {
return action;
@@ -2160,7 +2171,8 @@ parse_list_line(const char *line, int len, GList **output)
line + entry_start, entry_start, i);
free(entry);
- } else if (pcmk__strcase_any_of(entry, "on", "off", NULL)) {
+ } else if (pcmk__strcase_any_of(entry, PCMK_ACTION_ON,
+ PCMK_ACTION_OFF, NULL)) {
/* Some agents print the target status in the list output,
* though none are known now (the separate list-status command
* is used for this, but it can also print "UNKNOWN"). To handle
diff --git a/lib/fencing/st_lha.c b/lib/fencing/st_lha.c
index d477ded..fd26217 100644
--- a/lib/fencing/st_lha.c
+++ b/lib/fencing/st_lha.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -41,10 +41,10 @@ static const char META_TEMPLATE[] =
" <shortdesc lang=\"en\">%s</shortdesc>\n"
"%s\n"
" <actions>\n"
- " <action name=\"start\" timeout=\"20\" />\n"
+ " <action name=\"start\" timeout=\"%s\" />\n"
" <action name=\"stop\" timeout=\"15\" />\n"
- " <action name=\"status\" timeout=\"20\" />\n"
- " <action name=\"monitor\" timeout=\"20\" interval=\"3600\"/>\n"
+ " <action name=\"status\" timeout=\"%s\" />\n"
+ " <action name=\"monitor\" timeout=\"%s\" interval=\"3600\"/>\n"
" <action name=\"meta-data\" timeout=\"15\" />\n"
" </actions>\n"
" <special tag=\"heartbeat\">\n"
@@ -200,6 +200,7 @@ stonith__lha_metadata(const char *agent, int timeout, char **output)
char *meta_param = NULL;
char *meta_longdesc = NULL;
char *meta_shortdesc = NULL;
+ const char *timeout_str = NULL;
stonith_obj = (*st_new_fn) (agent);
if (stonith_obj) {
@@ -236,8 +237,10 @@ stonith__lha_metadata(const char *agent, int timeout, char **output)
xml_meta_shortdesc =
(char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_shortdesc);
+ timeout_str = pcmk__readable_interval(PCMK_DEFAULT_ACTION_TIMEOUT_MS);
buffer = crm_strdup_printf(META_TEMPLATE, agent, xml_meta_longdesc,
- xml_meta_shortdesc, meta_param);
+ xml_meta_shortdesc, meta_param,
+ timeout_str, timeout_str, timeout_str);
xmlFree(xml_meta_longdesc);
xmlFree(xml_meta_shortdesc);
diff --git a/lib/fencing/st_rhcs.c b/lib/fencing/st_rhcs.c
index ec80793..854d333 100644
--- a/lib/fencing/st_rhcs.c
+++ b/lib/fencing/st_rhcs.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -180,14 +180,17 @@ stonith__rhcs_get_metadata(const char *agent, int timeout_sec,
xpathObj = xpath_search(xml, "//action[@name='stop']");
if (numXpathResults(xpathObj) <= 0) {
xmlNode *tmp = NULL;
+ const char *timeout_str = NULL;
+
+ timeout_str = pcmk__readable_interval(PCMK_DEFAULT_ACTION_TIMEOUT_MS);
tmp = create_xml_node(actions, "action");
- crm_xml_add(tmp, "name", "stop");
- crm_xml_add(tmp, "timeout", CRM_DEFAULT_OP_TIMEOUT_S);
+ crm_xml_add(tmp, "name", PCMK_ACTION_STOP);
+ crm_xml_add(tmp, "timeout", timeout_str);
tmp = create_xml_node(actions, "action");
- crm_xml_add(tmp, "name", "start");
- crm_xml_add(tmp, "timeout", CRM_DEFAULT_OP_TIMEOUT_S);
+ crm_xml_add(tmp, "name", PCMK_ACTION_START);
+ crm_xml_add(tmp, "timeout", timeout_str);
}
freeXpathObject(xpathObj);
@@ -292,7 +295,7 @@ stonith__rhcs_validate(stonith_t *st, int call_options, const char *target,
host_arg = NULL;
}
- action = stonith__action_create(agent, "validate-all", target, 0,
+ action = stonith__action_create(agent, PCMK_ACTION_VALIDATE_ALL, target, 0,
remaining_timeout, params, NULL, host_arg);
rc = stonith__execute(action);
diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am
index e9ac906..a9b9c67 100644
--- a/lib/lrmd/Makefile.am
+++ b/lib/lrmd/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2012-2020 the Pacemaker project contributors
+# Copyright 2012-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -10,12 +10,17 @@ include $(top_srcdir)/mk/common.mk
lib_LTLIBRARIES = liblrmd.la
-liblrmd_la_LDFLAGS = -version-info 29:6:1
+liblrmd_la_LDFLAGS = -version-info 30:0:2
liblrmd_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
liblrmd_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/fencing/libstonithd.la
-liblrmd_la_SOURCES = lrmd_client.c proxy_common.c lrmd_alerts.c lrmd_output.c
+liblrmd_la_LIBADD = $(top_builddir)/lib/fencing/libstonithd.la
+liblrmd_la_LIBADD += $(top_builddir)/lib/services/libcrmservice.la
+liblrmd_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
+
+## Library sources (*must* use += format for bumplibs)
+liblrmd_la_SOURCES = lrmd_alerts.c
+liblrmd_la_SOURCES += lrmd_client.c
+liblrmd_la_SOURCES += lrmd_output.c
+liblrmd_la_SOURCES += proxy_common.c
diff --git a/lib/lrmd/lrmd_alerts.c b/lib/lrmd/lrmd_alerts.c
index 588ff97..2a8c988 100644
--- a/lib/lrmd/lrmd_alerts.c
+++ b/lib/lrmd/lrmd_alerts.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -355,7 +355,7 @@ lrmd_send_resource_alert(lrmd_t *lrmd, const GList *alert_list,
target_rc = rsc_op_expected_rc(op);
if ((op->interval_ms == 0) && (target_rc == op->rc)
- && pcmk__str_eq(op->op_type, RSC_STATUS, pcmk__str_casei)) {
+ && pcmk__str_eq(op->op_type, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
/* Don't send alerts for probes with the expected result. Leave it up to
* the agent whether to alert for 'failed' probes. (Even if we find a
diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c
index c565728..400d3b0 100644
--- a/lib/lrmd/lrmd_client.c
+++ b/lib/lrmd/lrmd_client.c
@@ -544,7 +544,20 @@ lrmd_ipc_connection_destroy(gpointer userdata)
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
- crm_info("IPC connection destroyed");
+ switch (native->type) {
+ case pcmk__client_ipc:
+ crm_info("Disconnected from local executor");
+ break;
+#ifdef HAVE_GNUTLS_GNUTLS_H
+ case pcmk__client_tls:
+ crm_info("Disconnected from remote executor on %s",
+ native->remote_nodename);
+ break;
+#endif
+ default:
+ crm_err("Unsupported executor connection type %d (bug?)",
+ native->type);
+ }
/* Prevent these from being cleaned up in lrmd_api_disconnect() */
native->ipc = NULL;
@@ -588,7 +601,9 @@ lrmd_tls_connection_destroy(gpointer userdata)
}
free(native->remote->buffer);
+ free(native->remote->start_state);
native->remote->buffer = NULL;
+ native->remote->start_state = NULL;
native->source = 0;
native->sock = 0;
native->psk_cred_c = NULL;
@@ -980,6 +995,7 @@ lrmd_handshake(lrmd_t * lrmd, const char *name)
const char *version = crm_element_value(reply, F_LRMD_PROTOCOL_VERSION);
const char *msg_type = crm_element_value(reply, F_LRMD_OPERATION);
const char *tmp_ticket = crm_element_value(reply, F_LRMD_CLIENTID);
+ const char *start_state = crm_element_value(reply, PCMK__XA_NODE_START_STATE);
long long uptime = -1;
crm_element_value_int(reply, F_LRMD_RC, &rc);
@@ -992,6 +1008,10 @@ lrmd_handshake(lrmd_t * lrmd, const char *name)
crm_element_value_ll(reply, PCMK__XA_UPTIME, &uptime);
native->remote->uptime = uptime;
+ if (start_state) {
+ native->remote->start_state = strdup(start_state);
+ }
+
if (rc == -EPROTO) {
crm_err("Executor protocol version mismatch between client (%s) and server (%s)",
LRMD_PROTOCOL_VERSION, version);
@@ -1038,11 +1058,15 @@ lrmd_ipc_connect(lrmd_t * lrmd, int *fd)
if (fd) {
/* No mainloop */
native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0);
- if (native->ipc && crm_ipc_connect(native->ipc)) {
- *fd = crm_ipc_get_fd(native->ipc);
- } else if (native->ipc) {
- crm_perror(LOG_ERR, "Connection to executor failed");
- rc = -ENOTCONN;
+ if (native->ipc != NULL) {
+ rc = pcmk__connect_generic_ipc(native->ipc);
+ if (rc == pcmk_rc_ok) {
+ rc = pcmk__ipc_fd(native->ipc, fd);
+ }
+ if (rc != pcmk_rc_ok) {
+ crm_err("Connection to executor failed: %s", pcmk_rc_str(rc));
+ rc = -ENOTCONN;
+ }
}
} else {
native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks);
@@ -1238,7 +1262,7 @@ lrmd__init_remote_key(gnutls_datum_t *key)
bool env_is_fallback = false;
if (need_env) {
- env_location = getenv("PCMK_authkey_location");
+ env_location = pcmk__env_option(PCMK__ENV_AUTHKEY_LOCATION);
need_env = false;
}
@@ -1657,15 +1681,15 @@ lrmd_api_disconnect(lrmd_t * lrmd)
lrmd_private_t *native = lrmd->lrmd_private;
int rc = pcmk_ok;
- crm_info("Disconnecting %s %s executor connection",
- pcmk__client_type_str(native->type),
- (native->remote_nodename? native->remote_nodename : "local"));
switch (native->type) {
case pcmk__client_ipc:
+ crm_debug("Disconnecting from local executor");
lrmd_ipc_disconnect(lrmd);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
+ crm_debug("Disconnecting from remote executor on %s",
+ native->remote_nodename);
lrmd_tls_disconnect(lrmd);
break;
#endif
@@ -1964,8 +1988,8 @@ lrmd_api_get_metadata_params(lrmd_t *lrmd, const char *standard,
g_hash_table_insert(params_table, strdup(param->key), strdup(param->value));
}
action = services__create_resource_action(type, standard, provider, type,
- CRMD_ACTION_METADATA, 0,
- CRMD_METADATA_CALL_TIMEOUT,
+ PCMK_ACTION_META_DATA, 0,
+ PCMK_DEFAULT_METADATA_TIMEOUT_MS,
params_table, 0);
lrmd_key_value_freeall(params);
@@ -2421,14 +2445,15 @@ lrmd__metadata_async(const lrmd_rsc_info_t *rsc,
if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_STONITH) == 0) {
return stonith__metadata_async(rsc->type,
- CRMD_METADATA_CALL_TIMEOUT / 1000,
+ PCMK_DEFAULT_METADATA_TIMEOUT_MS / 1000,
callback, user_data);
}
action = services__create_resource_action(pcmk__s(rsc->id, rsc->type),
rsc->standard, rsc->provider,
- rsc->type, CRMD_ACTION_METADATA,
- 0, CRMD_METADATA_CALL_TIMEOUT,
+ rsc->type,
+ PCMK_ACTION_META_DATA, 0,
+ PCMK_DEFAULT_METADATA_TIMEOUT_MS,
NULL, 0);
if (action == NULL) {
pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR,
@@ -2531,3 +2556,15 @@ lrmd__uptime(lrmd_t *lrmd)
return native->remote->uptime;
}
}
+
+const char *
+lrmd__node_start_state(lrmd_t *lrmd)
+{
+ lrmd_private_t *native = lrmd->lrmd_private;
+
+ if (native->remote == NULL) {
+ return NULL;
+ } else {
+ return native->remote->start_state;
+ }
+}
diff --git a/lib/pacemaker/Makefile.am b/lib/pacemaker/Makefile.am
index ebf3b6d..06f8dfb 100644
--- a/lib/pacemaker/Makefile.am
+++ b/lib/pacemaker/Makefile.am
@@ -16,24 +16,24 @@ noinst_HEADERS = libpacemaker_private.h
## libraries
lib_LTLIBRARIES = libpacemaker.la
-## SOURCES
-
-libpacemaker_la_LDFLAGS = -version-info 7:0:6
+libpacemaker_la_LDFLAGS = -version-info 8:0:7
libpacemaker_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpacemaker_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libpacemaker_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/lrmd/liblrmd.la \
- $(top_builddir)/lib/fencing/libstonithd.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+libpacemaker_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/cib/libcib.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/lrmd/liblrmd.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/fencing/libstonithd.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/services/libcrmservice.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
# -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version
-# Use += rather than backlashed continuation lines for parsing by bumplibs
+
+## Library sources (*must* use += format for bumplibs)
libpacemaker_la_SOURCES =
libpacemaker_la_SOURCES += pcmk_acl.c
+libpacemaker_la_SOURCES += pcmk_agents.c
libpacemaker_la_SOURCES += pcmk_cluster_queries.c
libpacemaker_la_SOURCES += pcmk_fence.c
libpacemaker_la_SOURCES += pcmk_graph_consumer.c
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 192d5a7..c4a0c90 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -14,7 +14,20 @@
* declared with G_GNUC_INTERNAL for efficiency.
*/
-#include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
+#include <crm/lrmd_events.h> // lrmd_event_data_t
+#include <crm/common/scheduler.h> // pcmk_action_t, pcmk_node_t, etc.
+#include <crm/pengine/internal.h> // pe__location_t
+
+// Colocation flags
+enum pcmk__coloc_flags {
+ pcmk__coloc_none = 0U,
+
+ // Primary is affected even if already active
+ pcmk__coloc_influence = (1U << 0),
+
+ // Colocation was explicitly configured in CIB
+ pcmk__coloc_explicit = (1U << 1),
+};
// Flags to modify the behavior of add_colocated_node_scores()
enum pcmk__coloc_select {
@@ -52,18 +65,30 @@ enum pcmk__updated {
(flags_to_clear), #flags_to_clear); \
} while (0)
-// Resource allocation methods
+// Resource assignment methods
struct resource_alloc_functions_s {
/*!
* \internal
* \brief Assign a resource to a node
*
- * \param[in,out] rsc Resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
+ * node, set next role to stopped and update
+ * existing actions (if \p rsc is not a
+ * primitive, this applies to its primitive
+ * descendants instead)
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource()
+ * can completely undo the assignment. A successful assignment can be
+ * either undone or left alone as final. A failed assignment has the
+ * same effect as calling pcmk__unassign_resource(); there are no side
+ * effects on roles or actions.
*/
- pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer);
+ pcmk_node_t *(*assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail);
/*!
* \internal
@@ -71,7 +96,7 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource to create actions for
*/
- void (*create_actions)(pe_resource_t *rsc);
+ void (*create_actions)(pcmk_resource_t *rsc);
/*!
* \internal
@@ -82,7 +107,7 @@ struct resource_alloc_functions_s {
*
* \return true if any probe was created, otherwise false
*/
- bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node);
+ bool (*create_probe)(pcmk_resource_t *rsc, pcmk_node_t *node);
/*!
* \internal
@@ -90,14 +115,14 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource to create implicit constraints for
*/
- void (*internal_constraints)(pe_resource_t *rsc);
+ void (*internal_constraints)(pcmk_resource_t *rsc);
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -105,17 +130,17 @@ struct resource_alloc_functions_s {
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
- void (*apply_coloc_score) (pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation,
- bool for_dependent);
+ void (*apply_coloc_score)(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation,
+ bool for_dependent);
/*!
* \internal
* \brief Create list of all resources in colocations with a given resource
*
* Given a resource, create a list of all resources involved in mandatory
- * colocations with it, whether directly or indirectly via chained colocations.
+ * colocations with it, whether directly or via chained colocations.
*
* \param[in] rsc Resource to add to colocated list
* \param[in] orig_rsc Resource originally requested
@@ -127,8 +152,8 @@ struct resource_alloc_functions_s {
* \p colocated_rscs and \p orig_rsc, and the desired resource as
* \p rsc. The recursive calls will use other values.
*/
- GList *(*colocated_resources)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+ GList *(*colocated_resources)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
/*!
@@ -148,8 +173,9 @@ struct resource_alloc_functions_s {
* \note The pcmk__with_this_colocations() wrapper should usually be used
* instead of using this method directly.
*/
- void (*with_this_colocations)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+ void (*with_this_colocations)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
/*!
* \internal
@@ -169,8 +195,9 @@ struct resource_alloc_functions_s {
* \note The pcmk__this_with_colocations() wrapper should usually be used
* instead of using this method directly.
*/
- void (*this_with_colocations)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+ void (*this_with_colocations)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
/*!
* \internal
@@ -180,17 +207,31 @@ struct resource_alloc_functions_s {
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
- * \param[in,out] rsc Resource to check colocations for
- * \param[in] log_id Resource ID to use in logs (if NULL, use \p rsc ID)
- * \param[in,out] nodes Nodes to update
- * \param[in] attr Colocation attribute (NULL to use default)
- * \param[in] factor Incorporate scores multiplied by this factor
- * \param[in] flags Bitmask of enum pcmk__coloc_select values
+ * \param[in,out] source_rsc Resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p *nodes
+ * \param[in] log_id Resource ID for logs (if \c NULL, use
+ * \p source_rsc ID)
+ * \param[in,out] nodes Nodes to update (set initial contents to
+ * \c NULL to copy allowed nodes from
+ * \p source_rsc)
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; if \c NULL,
+ * <tt>source_rsc</tt>'s own matching node scores
+ * will not be added, and \p *nodes must be
+ * \c NULL as well)
+ * \param[in] factor Incorporate scores multiplied by this factor
+ * \param[in] flags Bitmask of enum pcmk__coloc_select values
*
+ * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation,
+ * and the \c pcmk__coloc_select_this_with flag are used together (and
+ * only by \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
*/
- void (*add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+ void (*add_colocated_node_scores)(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id, GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
/*!
@@ -200,7 +241,7 @@ struct resource_alloc_functions_s {
* \param[in,out] rsc Resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
- void (*apply_location)(pe_resource_t *rsc, pe__location_t *location);
+ void (*apply_location)(pcmk_resource_t *rsc, pe__location_t *location);
/*!
* \internal
@@ -214,8 +255,7 @@ struct resource_alloc_functions_s {
* of node. For collective resources, the flags can differ due to
* multiple instances possibly being involved.
*/
- enum pe_action_flags (*action_flags)(pe_action_t *action,
- const pe_node_t *node);
+ uint32_t (*action_flags)(pcmk_action_t *action, const pcmk_node_t *node);
/*!
* \internal
@@ -226,26 +266,33 @@ struct resource_alloc_functions_s {
* ordering. Effects may cascade to other orderings involving the actions as
* well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this
- * node (only used when interleaving instances)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates
- * (may include pe_action_optional to affect only
- * mandatory actions, and pe_action_runnable to
- * affect only runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this
+ * node (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates
+ * (may include pcmk_action_optional to affect
+ * only mandatory actions and pcmk_action_runnable
+ * to affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
- uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+ uint32_t (*update_ordered_actions)(pcmk_action_t *first,
+ pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
- void (*output_actions)(pe_resource_t *rsc);
+ /*!
+ * \internal
+ * \brief Output a summary of scheduled actions for a resource
+ *
+ * \param[in,out] rsc Resource to output actions for
+ */
+ void (*output_actions)(pcmk_resource_t *rsc);
/*!
* \internal
@@ -253,7 +300,7 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource whose actions should be added
*/
- void (*add_actions_to_graph)(pe_resource_t *rsc);
+ void (*add_actions_to_graph)(pcmk_resource_t *rsc);
/*!
* \internal
@@ -265,7 +312,7 @@ struct resource_alloc_functions_s {
* \param[in] rsc Resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
- void (*add_graph_meta)(const pe_resource_t *rsc, xmlNode *xml);
+ void (*add_graph_meta)(const pcmk_resource_t *rsc, xmlNode *xml);
/*!
* \internal
@@ -275,15 +322,15 @@ struct resource_alloc_functions_s {
* resources colocated with it, to determine whether a node has sufficient
* capacity. Given a resource and a table of utilization values, it will add
* the resource's utilization to the existing values, if the resource has
- * not yet been allocated to a node.
+ * not yet been assigned to a node.
*
* \param[in] rsc Resource with utilization to add
- * \param[in] orig_rsc Resource being allocated (for logging only)
+ * \param[in] orig_rsc Resource being assigned (for logging only)
* \param[in] all_rscs List of all resources that will be summed
* \param[in,out] utilization Table of utilization values to add to
*/
- void (*add_utilization)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+ void (*add_utilization)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization);
/*!
@@ -292,95 +339,98 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource to check for shutdown lock
*/
- void (*shutdown_lock)(pe_resource_t *rsc);
+ void (*shutdown_lock)(pcmk_resource_t *rsc);
};
// Actions (pcmk_sched_actions.c)
G_GNUC_INTERNAL
-void pcmk__update_action_for_orderings(pe_action_t *action,
- pe_working_set_t *data_set);
+void pcmk__update_action_for_orderings(pcmk_action_t *action,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+uint32_t pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__log_action(const char *pre_text, const pe_action_t *action,
+void pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
bool details);
G_GNUC_INTERNAL
-pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
- guint interval_ms, const pe_node_t *node);
+pcmk_action_t *pcmk__new_cancel_action(pcmk_resource_t *rsc, const char *name,
+ guint interval_ms,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
+pcmk_action_t *pcmk__new_shutdown_action(pcmk_node_t *node);
G_GNUC_INTERNAL
-bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
+bool pcmk__action_locks_rsc_to_node(const pcmk_action_t *action);
G_GNUC_INTERNAL
-void pcmk__deduplicate_action_inputs(pe_action_t *action);
+void pcmk__deduplicate_action_inputs(pcmk_action_t *action);
G_GNUC_INTERNAL
-void pcmk__output_actions(pe_working_set_t *data_set);
+void pcmk__output_actions(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
+bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op);
G_GNUC_INTERNAL
-void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
+void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler);
// Recurring actions (pcmk_sched_recurring.c)
G_GNUC_INTERNAL
-void pcmk__create_recurring_actions(pe_resource_t *rsc);
+void pcmk__create_recurring_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id,
+void pcmk__schedule_cancel(pcmk_resource_t *rsc, const char *call_id,
const char *task, guint interval_ms,
- const pe_node_t *node, const char *reason);
+ const pcmk_node_t *node, const char *reason);
G_GNUC_INTERNAL
-void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
- guint interval_ms, pe_node_t *node);
+void pcmk__reschedule_recurring(pcmk_resource_t *rsc, const char *task,
+ guint interval_ms, pcmk_node_t *node);
G_GNUC_INTERNAL
-bool pcmk__action_is_recurring(const pe_action_t *action);
+bool pcmk__action_is_recurring(const pcmk_action_t *action);
// Producing transition graphs (pcmk_graph_producer.c)
G_GNUC_INTERNAL
-bool pcmk__graph_has_loop(const pe_action_t *init_action,
- const pe_action_t *action,
- pe_action_wrapper_t *input);
+bool pcmk__graph_has_loop(const pcmk_action_t *init_action,
+ const pcmk_action_t *action,
+ pcmk__related_action_t *input);
G_GNUC_INTERNAL
-void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc);
+void pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__create_graph(pe_working_set_t *data_set);
+void pcmk__create_graph(pcmk_scheduler_t *scheduler);
// Fencing (pcmk_sched_fencing.c)
G_GNUC_INTERNAL
-void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
+void pcmk__order_vs_fence(pcmk_action_t *stonith_op,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
- pe_action_t *action, enum pe_ordering order);
+void pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_action_t *action,
+ enum pcmk__action_relation_flags order);
G_GNUC_INTERNAL
-void pcmk__fence_guest(pe_node_t *node);
+void pcmk__fence_guest(pcmk_node_t *node);
G_GNUC_INTERNAL
-bool pcmk__node_unfenced(const pe_node_t *node);
+bool pcmk__node_unfenced(const pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
@@ -388,48 +438,48 @@ void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
// Injected scheduler inputs (pcmk_sched_injections.c)
-void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
+void pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib,
const pcmk_injections_t *injections);
// Constraints of any type (pcmk_sched_constraints.c)
G_GNUC_INTERNAL
-pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
+pcmk_resource_t *pcmk__find_constraint_resource(GList *rsc_list,
+ const char *id);
G_GNUC_INTERNAL
xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
- const pe_working_set_t *data_set);
+ const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__valid_resource_or_tag(const pe_working_set_t *data_set,
- const char *id, pe_resource_t **rsc,
- pe_tag_t **tag);
+bool pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler,
+ const char *id, pcmk_resource_t **rsc,
+ pcmk_tag_t **tag);
G_GNUC_INTERNAL
bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
- bool convert_rsc, const pe_working_set_t *data_set);
+ bool convert_rsc, const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__create_internal_constraints(pe_working_set_t *data_set);
+void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler);
// Location constraints
G_GNUC_INTERNAL
-void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__unpack_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
- int node_weight, const char *discover_mode,
- pe_node_t *foo_node,
- pe_working_set_t *data_set);
+pe__location_t *pcmk__new_location(const char *id, pcmk_resource_t *rsc,
+ int node_score, const char *discover_mode,
+ pcmk_node_t *foo_node);
G_GNUC_INTERNAL
-void pcmk__apply_locations(pe_working_set_t *data_set);
+void pcmk__apply_locations(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint);
+void pcmk__apply_location(pcmk_resource_t *rsc, pe__location_t *constraint);
// Colocation constraints (pcmk_sched_colocation.c)
@@ -440,54 +490,104 @@ enum pcmk__coloc_affects {
pcmk__coloc_affects_role,
};
+/*!
+ * \internal
+ * \brief Get the value of a colocation's node attribute
+ *
+ * When looking up a colocation node attribute on a bundle node for a bundle
+ * primitive, we should always look on the bundle node's assigned host,
+ * regardless of the value of XML_RSC_ATTR_TARGET. At most one resource (the
+ * bundle primitive, if any) can run on a bundle node, so any colocation must
+ * necessarily be evaluated with respect to the bundle node (the container).
+ *
+ * \param[in] node Node on which to look up the attribute
+ * \param[in] attr Name of attribute to look up
+ * \param[in] rsc Resource on whose behalf to look up the attribute
+ *
+ * \return Value of \p attr on \p node or on the host of \p node, as appropriate
+ */
+static inline const char *
+pcmk__colocation_node_attr(const pcmk_node_t *node, const char *attr,
+ const pcmk_resource_t *rsc)
+{
+ const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
+ const bool force_host = pe__is_bundle_node(node)
+ && pe_rsc_is_bundled(rsc)
+ && (top == pe__bundled_resource(rsc));
+
+ return pe__node_attribute_calculated(node, attr, rsc,
+ pcmk__rsc_node_assigned, force_host);
+}
+
G_GNUC_INTERNAL
-enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation,
+enum pcmk__coloc_affects pcmk__colocation_affects(const pcmk_resource_t
+ *dependent,
+ const pcmk_resource_t
+ *primary,
+ const pcmk__colocation_t
+ *colocation,
bool preview);
G_GNUC_INTERNAL
-void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation);
+void pcmk__apply_coloc_to_scores(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
-void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__apply_coloc_to_priority(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
-void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+void pcmk__add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id, GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
void pcmk__add_dependent_scores(gpointer data, gpointer user_data);
G_GNUC_INTERNAL
-void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__colocation_intersect_nodes(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation,
+ const GList *primary_nodes,
+ bool merge_scores);
G_GNUC_INTERNAL
-void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation);
+void pcmk__unpack_colocation(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__add_this_with_list(GList **list, GList *addition);
+void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation);
+void pcmk__add_this_with_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__add_with_this_list(GList **list, GList *addition);
+void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__add_with_this_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+GList *pcmk__with_this_colocations(const pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+GList *pcmk__this_with_colocations(const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__new_colocation(const char *id, const char *node_attr, int score,
- pe_resource_t *dependent, pe_resource_t *primary,
+ pcmk_resource_t *dependent, pcmk_resource_t *primary,
const char *dependent_role, const char *primary_role,
- bool influence, pe_working_set_t *data_set);
+ uint32_t flags);
G_GNUC_INTERNAL
-void pcmk__block_colocation_dependents(pe_action_t *action,
- pe_working_set_t *data_set);
+void pcmk__block_colocation_dependents(pcmk_action_t *action);
/*!
* \internal
@@ -503,7 +603,7 @@ void pcmk__block_colocation_dependents(pe_action_t *action,
*/
static inline bool
pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
- const pe_resource_t *rsc)
+ const pcmk_resource_t *rsc)
{
if (rsc == NULL) {
rsc = colocation->primary;
@@ -521,8 +621,9 @@ pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
* This also avoids problematic scenarios where two containers want to
* perpetually swap places.
*/
- if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes)
- && !pcmk_is_set(rsc->flags, pe_rsc_failed)
+ if (pcmk_is_set(colocation->dependent->flags,
+ pcmk_rsc_remote_nesting_allowed)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_failed)
&& pcmk__list_of_1(rsc->running_on)) {
return false;
}
@@ -530,33 +631,34 @@ pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
/* The dependent in a colocation influences the primary's location
* if the influence option is true or the primary is not yet active.
*/
- return colocation->influence || (rsc->running_on == NULL);
+ return pcmk_is_set(colocation->flags, pcmk__coloc_influence)
+ || (rsc->running_on == NULL);
}
// Ordering constraints (pcmk_sched_ordering.c)
G_GNUC_INTERNAL
-void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task,
- pe_action_t *first_action, pe_resource_t *then_rsc,
- char *then_task, pe_action_t *then_action,
- uint32_t flags, pe_working_set_t *data_set);
+void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task,
+ pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
+ char *then_task, pcmk_action_t *then_action,
+ uint32_t flags, pcmk_scheduler_t *sched);
G_GNUC_INTERNAL
-void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__unpack_ordering(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
+void pcmk__disable_invalid_orderings(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__order_stops_before_shutdown(pe_node_t *node,
- pe_action_t *shutdown_op);
+void pcmk__order_stops_before_shutdown(pcmk_node_t *node,
+ pcmk_action_t *shutdown_op);
G_GNUC_INTERNAL
-void pcmk__apply_orderings(pe_working_set_t *data_set);
+void pcmk__apply_orderings(pcmk_scheduler_t *sched);
G_GNUC_INTERNAL
-void pcmk__order_after_each(pe_action_t *after, GList *list);
+void pcmk__order_after_each(pcmk_action_t *after, GList *list);
/*!
@@ -567,7 +669,7 @@ void pcmk__order_after_each(pe_action_t *after, GList *list);
* \param[in,out] first_task Action key for 'first' action
* \param[in] then_rsc Resource for 'then' action
* \param[in,out] then_task Action key for 'then' action
- * \param[in] flags Bitmask of enum pe_ordering flags
+ * \param[in] flags Group of enum pcmk__action_relation_flags
*/
#define pcmk__order_resource_actions(first_rsc, first_task, \
then_rsc, then_task, flags) \
@@ -579,260 +681,329 @@ void pcmk__order_after_each(pe_action_t *after, GList *list);
NULL, (flags), (first_rsc)->cluster)
#define pcmk__order_starts(rsc1, rsc2, flags) \
- pcmk__order_resource_actions((rsc1), CRMD_ACTION_START, \
- (rsc2), CRMD_ACTION_START, (flags))
+ pcmk__order_resource_actions((rsc1), PCMK_ACTION_START, \
+ (rsc2), PCMK_ACTION_START, (flags))
#define pcmk__order_stops(rsc1, rsc2, flags) \
- pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP, \
- (rsc2), CRMD_ACTION_STOP, (flags))
+ pcmk__order_resource_actions((rsc1), PCMK_ACTION_STOP, \
+ (rsc2), PCMK_ACTION_STOP, (flags))
// Ticket constraints (pcmk_sched_tickets.c)
G_GNUC_INTERNAL
-void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
// Promotable clone resources (pcmk_sched_promotable.c)
G_GNUC_INTERNAL
-void pcmk__add_promotion_scores(pe_resource_t *rsc);
+void pcmk__add_promotion_scores(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__require_promotion_tickets(pe_resource_t *rsc);
+void pcmk__require_promotion_tickets(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__set_instance_roles(pe_resource_t *rsc);
+void pcmk__set_instance_roles(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__create_promotable_actions(pe_resource_t *clone);
+void pcmk__create_promotable_actions(pcmk_resource_t *clone);
G_GNUC_INTERNAL
-void pcmk__promotable_restart_ordering(pe_resource_t *rsc);
+void pcmk__promotable_restart_ordering(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__order_promotable_instances(pe_resource_t *clone);
+void pcmk__order_promotable_instances(pcmk_resource_t *clone);
G_GNUC_INTERNAL
-void pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
- pe_resource_t *dependent,
- const pcmk__colocation_t *colocation);
+void pcmk__update_dependent_with_promotable(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
+ const pcmk__colocation_t
+ *colocation);
G_GNUC_INTERNAL
-void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
- pe_resource_t *dependent,
- const pcmk__colocation_t *colocation);
+void pcmk__update_promotable_dependent_priority(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
+ const pcmk__colocation_t
+ *colocation);
// Pacemaker Remote nodes (pcmk_sched_remote.c)
G_GNUC_INTERNAL
-bool pcmk__is_failed_remote_node(const pe_node_t *node);
+bool pcmk__is_failed_remote_node(const pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
+void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc,
- const pe_node_t *node);
+bool pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-pe_node_t *pcmk__connection_host_for_action(const pe_action_t *action);
+pcmk_node_t *pcmk__connection_host_for_action(const pcmk_action_t *action);
G_GNUC_INTERNAL
-void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
+void pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params);
G_GNUC_INTERNAL
-void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action);
+void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml,
+ const pcmk_action_t *action);
// Primitives (pcmk_sched_primitive.c)
G_GNUC_INTERNAL
-pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer);
+pcmk_node_t *pcmk__primitive_assign(pcmk_resource_t *rsc,
+ const pcmk_node_t *prefer,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__primitive_create_actions(pe_resource_t *rsc);
+void pcmk__primitive_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__primitive_internal_constraints(pe_resource_t *rsc);
+void pcmk__primitive_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-enum pe_action_flags pcmk__primitive_action_flags(pe_action_t *action,
- const pe_node_t *node);
+uint32_t pcmk__primitive_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_primitive_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+void pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
-void pcmk__primitive_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+void pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
-void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node,
+void pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
bool optional);
G_GNUC_INTERNAL
-void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
+void pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
-void pcmk__primitive_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+void pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
-void pcmk__primitive_shutdown_lock(pe_resource_t *rsc);
+void pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc);
// Groups (pcmk_sched_group.c)
G_GNUC_INTERNAL
-pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer);
+pcmk_node_t *pcmk__group_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__group_create_actions(pe_resource_t *rsc);
+void pcmk__group_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__group_internal_constraints(pe_resource_t *rsc);
+void pcmk__group_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__group_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__group_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_group_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__with_group_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__group_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__group_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc,
+void pcmk__group_add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
const char *log_id,
- GHashTable **nodes, const char *attr,
+ GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
-void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location);
+void pcmk__group_apply_location(pcmk_resource_t *rsc, pe__location_t *location);
G_GNUC_INTERNAL
-enum pe_action_flags pcmk__group_action_flags(pe_action_t *action,
- const pe_node_t *node);
+uint32_t pcmk__group_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-uint32_t pcmk__group_update_ordered_actions(pe_action_t *first,
- pe_action_t *then,
- const pe_node_t *node,
+uint32_t pcmk__group_update_ordered_actions(pcmk_action_t *first,
+ pcmk_action_t *then,
+ const pcmk_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-GList *pcmk__group_colocated_resources(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+GList *pcmk__group_colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
-void pcmk__group_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
- GHashTable *utilization);
+void pcmk__group_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
-void pcmk__group_shutdown_lock(pe_resource_t *rsc);
+void pcmk__group_shutdown_lock(pcmk_resource_t *rsc);
// Clones (pcmk_sched_clone.c)
G_GNUC_INTERNAL
-pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer);
+pcmk_node_t *pcmk__clone_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__clone_create_actions(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+bool pcmk__clone_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__clone_internal_constraints(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__clone_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_clone_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__with_clone_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
+
+G_GNUC_INTERNAL
+void pcmk__clone_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__clone_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__clone_apply_location(pcmk_resource_t *rsc,
+ pe__location_t *constraint);
+
+G_GNUC_INTERNAL
+uint32_t pcmk__clone_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__clone_add_actions_to_graph(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__clone_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
+
+G_GNUC_INTERNAL
+void pcmk__clone_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization);
+
+G_GNUC_INTERNAL
+void pcmk__clone_shutdown_lock(pcmk_resource_t *rsc);
// Bundles (pcmk_sched_bundle.c)
G_GNUC_INTERNAL
-const pe_resource_t *pcmk__get_rsc_in_container(const pe_resource_t *instance);
+pcmk_node_t *pcmk__bundle_assign(pcmk_resource_t *rsc,
+ const pcmk_node_t *prefer, bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__bundle_create_actions(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+bool pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_internal_constraints(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_bundle_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__with_bundle_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__bundle_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__bundle_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__output_bundle_actions(pe_resource_t *rsc);
+void pcmk__bundle_apply_location(pcmk_resource_t *rsc,
+ pe__location_t *constraint);
+
+G_GNUC_INTERNAL
+uint32_t pcmk__bundle_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__output_bundle_actions(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc);
// Clone instances or bundle replica containers (pcmk_sched_instances.c)
G_GNUC_INTERNAL
-void pcmk__assign_instances(pe_resource_t *collective, GList *instances,
+void pcmk__assign_instances(pcmk_resource_t *collective, GList *instances,
int max_total, int max_per_node);
G_GNUC_INTERNAL
-void pcmk__create_instance_actions(pe_resource_t *rsc, GList *instances);
+void pcmk__create_instance_actions(pcmk_resource_t *rsc, GList *instances);
G_GNUC_INTERNAL
-bool pcmk__instance_matches(const pe_resource_t *instance,
- const pe_node_t *node, enum rsc_role_e role,
+bool pcmk__instance_matches(const pcmk_resource_t *instance,
+ const pcmk_node_t *node, enum rsc_role_e role,
bool current);
G_GNUC_INTERNAL
-pe_resource_t *pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
- const pe_resource_t *rsc,
- enum rsc_role_e role,
- bool current);
+pcmk_resource_t *pcmk__find_compatible_instance(const pcmk_resource_t *match_rsc,
+ const pcmk_resource_t *rsc,
+ enum rsc_role_e role,
+ bool current);
G_GNUC_INTERNAL
-uint32_t pcmk__instance_update_ordered_actions(pe_action_t *first,
- pe_action_t *then,
- const pe_node_t *node,
+uint32_t pcmk__instance_update_ordered_actions(pcmk_action_t *first,
+ pcmk_action_t *then,
+ const pcmk_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-enum pe_action_flags pcmk__collective_action_flags(pe_action_t *action,
- const GList *instances,
- const pe_node_t *node);
-
-G_GNUC_INTERNAL
-void pcmk__add_collective_constraints(GList **list,
- const pe_resource_t *instance,
- const pe_resource_t *collective,
- bool with_this);
+uint32_t pcmk__collective_action_flags(pcmk_action_t *action,
+ const GList *instances,
+ const pcmk_node_t *node);
// Injections (pcmk_injections.c)
@@ -865,7 +1036,7 @@ xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
// Nodes (pcmk_sched_nodes.c)
G_GNUC_INTERNAL
-bool pcmk__node_available(const pe_node_t *node, bool consider_score,
+bool pcmk__node_available(const pcmk_node_t *node, bool consider_score,
bool consider_guest);
G_GNUC_INTERNAL
@@ -875,55 +1046,59 @@ G_GNUC_INTERNAL
GHashTable *pcmk__copy_node_table(GHashTable *nodes);
G_GNUC_INTERNAL
-GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node);
+void pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy);
+
+G_GNUC_INTERNAL
+void pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup);
G_GNUC_INTERNAL
-void pcmk__apply_node_health(pe_working_set_t *data_set);
+GList *pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node);
G_GNUC_INTERNAL
-pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
- const pe_node_t *node);
+void pcmk__apply_node_health(pcmk_scheduler_t *scheduler);
+
+G_GNUC_INTERNAL
+pcmk_node_t *pcmk__top_allowed_node(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node);
// Functions applying to more than one variant (pcmk_sched_resource.c)
G_GNUC_INTERNAL
-void pcmk__set_allocation_methods(pe_working_set_t *data_set);
+void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
+bool pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *rsc_entry, bool active_on_node);
G_GNUC_INTERNAL
-GList *pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set);
+GList *pcmk__rscs_matching_id(const char *id,
+ const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-GList *pcmk__colocated_resources(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+GList *pcmk__colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
-void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
-
-G_GNUC_INTERNAL
-void pcmk__output_resource_actions(pe_resource_t *rsc);
+void pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
-bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen,
- bool force);
+void pcmk__output_resource_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
+bool pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__unassign_resource(pe_resource_t *rsc);
+void pcmk__unassign_resource(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
- pe_resource_t **failed);
+bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_resource_t **failed);
G_GNUC_INTERNAL
-void pcmk__sort_resources(pe_working_set_t *data_set);
+void pcmk__sort_resources(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
@@ -935,26 +1110,27 @@ gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
// Functions related to probes (pcmk_sched_probes.c)
G_GNUC_INTERNAL
-bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node);
+bool pcmk__probe_rsc_on_node(pcmk_resource_t *rsc, pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__order_probes(pe_working_set_t *data_set);
+void pcmk__order_probes(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node);
+bool pcmk__probe_resource_list(GList *rscs, pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__schedule_probes(pe_working_set_t *data_set);
+void pcmk__schedule_probes(pcmk_scheduler_t *scheduler);
// Functions related to live migration (pcmk_sched_migration.c)
-void pcmk__create_migration_actions(pe_resource_t *rsc,
- const pe_node_t *current);
+void pcmk__create_migration_actions(pcmk_resource_t *rsc,
+ const pcmk_node_t *current);
void pcmk__abort_dangling_migration(void *data, void *user_data);
-bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current);
+bool pcmk__rsc_can_migrate(const pcmk_resource_t *rsc,
+ const pcmk_node_t *current);
void pcmk__order_migration_equivalents(pe__ordering_t *order);
@@ -962,25 +1138,25 @@ void pcmk__order_migration_equivalents(pe__ordering_t *order);
// Functions related to node utilization (pcmk_sched_utilization.c)
G_GNUC_INTERNAL
-int pcmk__compare_node_capacities(const pe_node_t *node1,
- const pe_node_t *node2);
+int pcmk__compare_node_capacities(const pcmk_node_t *node1,
+ const pcmk_node_t *node2);
G_GNUC_INTERNAL
void pcmk__consume_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc);
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__release_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc);
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc);
+const pcmk_node_t *pcmk__ban_insufficient_capacity(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__create_utilization_constraints(pe_resource_t *rsc,
+void pcmk__create_utilization_constraints(pcmk_resource_t *rsc,
const GList *allowed_nodes);
G_GNUC_INTERNAL
-void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
+void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler);
#endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_acl.c b/lib/pacemaker/pcmk_acl.c
index c2072dc..85c461e 100644
--- a/lib/pacemaker/pcmk_acl.c
+++ b/lib/pacemaker/pcmk_acl.c
@@ -53,7 +53,10 @@ static const xmlChar *NS_DENIED = (const xmlChar *) ACL_NS_PREFIX "denied";
* \param[in,out] ns_recycle_denied
*/
static void
-pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret, xmlNs **ns_recycle_writable, xmlNs **ns_recycle_readable, xmlNs **ns_recycle_denied)
+pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret,
+ xmlNs **ns_recycle_writable,
+ xmlNs **ns_recycle_readable,
+ xmlNs **ns_recycle_denied)
{
if (ns == NS_WRITABLE)
{
@@ -88,10 +91,10 @@ pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret,
}
/*!
- * \brief This function takes some XML, and annotates it with XML
- * namespaces to indicate the ACL permissions.
+ * \brief Annotate a given XML element or property and its siblings with
+ * XML namespaces to indicate ACL permissions
*
- * \param[in,out] xml_modify
+ * \param[in,out] xml_modify XML to annotate
*
* \return A standard Pacemaker return code
* Namely:
@@ -104,7 +107,7 @@ pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret,
* \note This function is recursive
*/
static int
-pcmk__acl_annotate_permissions_recursive(xmlNode *xml_modify)
+annotate_with_siblings(xmlNode *xml_modify)
{
static xmlNs *ns_recycle_writable = NULL,
@@ -123,61 +126,74 @@ pcmk__acl_annotate_permissions_recursive(xmlNode *xml_modify)
for (i_node = xml_modify; i_node != NULL; i_node = i_node->next) {
switch (i_node->type) {
- case XML_ELEMENT_NODE:
- pcmk__set_xml_doc_flag(i_node, pcmk__xf_tracking);
-
- if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_read)) {
- ns = NS_DENIED;
- } else if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_write)) {
- ns = NS_READABLE;
- } else {
- ns = NS_WRITABLE;
- }
- pcmk__acl_mark_node_with_namespace(i_node, ns, &ret, &ns_recycle_writable, &ns_recycle_readable, &ns_recycle_denied);
- /* XXX recursion can be turned into plain iteration to save stack */
- if (i_node->properties != NULL) {
- /* this is not entirely clear, but relies on the very same
- class-hierarchy emulation that libxml2 has firmly baked in
- its API/ABI */
- ret |= pcmk__acl_annotate_permissions_recursive((xmlNodePtr) i_node->properties);
- }
- if (i_node->children != NULL) {
- ret |= pcmk__acl_annotate_permissions_recursive(i_node->children);
- }
- break;
- case XML_ATTRIBUTE_NODE:
- /* we can utilize that parent has already been assigned the ns */
- if (!pcmk__check_acl(i_node->parent,
- (const char *) i_node->name,
- pcmk__xf_acl_read)) {
- ns = NS_DENIED;
- } else if (!pcmk__check_acl(i_node,
- (const char *) i_node->name,
- pcmk__xf_acl_write)) {
- ns = NS_READABLE;
- } else {
- ns = NS_WRITABLE;
- }
- pcmk__acl_mark_node_with_namespace(i_node, ns, &ret, &ns_recycle_writable, &ns_recycle_readable, &ns_recycle_denied);
- break;
- case XML_COMMENT_NODE:
- /* we can utilize that parent has already been assigned the ns */
- if (!pcmk__check_acl(i_node->parent, (const char *) i_node->name, pcmk__xf_acl_read))
- {
- ns = NS_DENIED;
- }
- else if (!pcmk__check_acl(i_node->parent, (const char *) i_node->name, pcmk__xf_acl_write))
- {
- ns = NS_READABLE;
- }
- else
- {
- ns = NS_WRITABLE;
- }
- pcmk__acl_mark_node_with_namespace(i_node, ns, &ret, &ns_recycle_writable, &ns_recycle_readable, &ns_recycle_denied);
- break;
- default:
- break;
+ case XML_ELEMENT_NODE:
+ pcmk__set_xml_doc_flag(i_node, pcmk__xf_tracking);
+
+ if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_read)) {
+ ns = NS_DENIED;
+ } else if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_write)) {
+ ns = NS_READABLE;
+ } else {
+ ns = NS_WRITABLE;
+ }
+ pcmk__acl_mark_node_with_namespace(i_node, ns, &ret,
+ &ns_recycle_writable,
+ &ns_recycle_readable,
+ &ns_recycle_denied);
+ // @TODO Could replace recursion with iteration to save stack
+ if (i_node->properties != NULL) {
+ /* This is not entirely clear, but relies on the very same
+ * class-hierarchy emulation that libxml2 has firmly baked
+ * in its API/ABI
+ */
+ ret |= annotate_with_siblings((xmlNodePtr)
+ i_node->properties);
+ }
+ if (i_node->children != NULL) {
+ ret |= annotate_with_siblings(i_node->children);
+ }
+ break;
+
+ case XML_ATTRIBUTE_NODE:
+ // We can utilize that parent has already been assigned the ns
+ if (!pcmk__check_acl(i_node->parent,
+ (const char *) i_node->name,
+ pcmk__xf_acl_read)) {
+ ns = NS_DENIED;
+ } else if (!pcmk__check_acl(i_node,
+ (const char *) i_node->name,
+ pcmk__xf_acl_write)) {
+ ns = NS_READABLE;
+ } else {
+ ns = NS_WRITABLE;
+ }
+ pcmk__acl_mark_node_with_namespace(i_node, ns, &ret,
+ &ns_recycle_writable,
+ &ns_recycle_readable,
+ &ns_recycle_denied);
+ break;
+
+ case XML_COMMENT_NODE:
+ // We can utilize that parent has already been assigned the ns
+ if (!pcmk__check_acl(i_node->parent,
+ (const char *) i_node->name,
+ pcmk__xf_acl_read)) {
+ ns = NS_DENIED;
+ } else if (!pcmk__check_acl(i_node->parent,
+ (const char *) i_node->name,
+ pcmk__xf_acl_write)) {
+ ns = NS_READABLE;
+ } else {
+ ns = NS_WRITABLE;
+ }
+ pcmk__acl_mark_node_with_namespace(i_node, ns, &ret,
+ &ns_recycle_writable,
+ &ns_recycle_readable,
+ &ns_recycle_denied);
+ break;
+
+ default:
+ break;
}
}
@@ -222,10 +238,12 @@ pcmk__acl_annotate_permissions(const char *cred, const xmlDoc *cib_doc,
pcmk__enable_acl(target, target, cred);
- ret = pcmk__acl_annotate_permissions_recursive(target);
+ ret = annotate_with_siblings(target);
if (ret == pcmk_rc_ok) {
- char* credentials = crm_strdup_printf("ACLs as evaluated for user %s", cred);
+ char *credentials = crm_strdup_printf("ACLs as evaluated for user %s",
+ cred);
+
comment = xmlNewDocComment(target->doc, (pcmkXmlStr) credentials);
free(credentials);
if (comment == NULL) {
diff --git a/lib/pacemaker/pcmk_agents.c b/lib/pacemaker/pcmk_agents.c
new file mode 100644
index 0000000..6fec140
--- /dev/null
+++ b/lib/pacemaker/pcmk_agents.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/lrmd_internal.h>
+#include <pacemaker.h>
+#include <pacemaker-internal.h>
+
+int
+pcmk__list_alternatives(pcmk__output_t *out, const char *agent_spec)
+{
+ int rc = pcmk_rc_ok;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL && agent_spec != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
+
+ if (rc > 0) {
+ rc = out->message(out, "alternatives-list", list, agent_spec);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ out->err(out, _("No %s found for %s"), "OCF providers", agent_spec);
+ rc = ENXIO;
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_alternatives(xmlNodePtr *xml, const char *agent_spec)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_alternatives(out, agent_spec);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
+
+/*!
+ * \internal
+ * \brief List all agents available for the named standard and/or provider
+ *
+ * \param[in,out] out Output object
+ * \param[in] agent_spec STD[:PROV]
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk__list_agents(pcmk__output_t *out, char *agent_spec)
+{
+ int rc = pcmk_rc_ok;
+ char *provider = NULL;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL && agent_spec != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ provider = strchr(agent_spec, ':');
+
+ if (provider) {
+ *provider++ = 0;
+ }
+
+ rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
+
+ if (rc > 0) {
+ rc = out->message(out, "agents-list", list, agent_spec, provider);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ if (provider == NULL) {
+ out->err(out, _("No agents found for standard '%s'"), agent_spec);
+ } else {
+ out->err(out, _("No agents found for standard '%s' and provider '%s'"),
+ agent_spec, provider);
+ }
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_agents(xmlNodePtr *xml, char *agent_spec)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_agents(out, agent_spec);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
+
+int
+pcmk__list_providers(pcmk__output_t *out, const char *agent_spec)
+{
+ int rc = pcmk_rc_ok;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
+
+ if (rc > 0) {
+ rc = out->message(out, "providers-list", list, agent_spec);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ if (agent_spec == NULL) {
+ out->err(out, _("No %s found"), "OCF providers");
+ } else {
+ out->err(out, _("No %s found for %s"), "OCF providers", agent_spec);
+ }
+
+ rc = ENXIO;
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_providers(xmlNodePtr *xml, const char *agent_spec)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_providers(out, agent_spec);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
+
+int
+pcmk__list_standards(pcmk__output_t *out)
+{
+ int rc = pcmk_rc_ok;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
+
+ if (rc > 0) {
+ rc = out->message(out, "standards-list", list);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ out->err(out, _("No %s found"), "standards");
+ rc = ENXIO;
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_standards(xmlNodePtr *xml)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_standards(out);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c
index 6002cd4..6a12c45 100644
--- a/lib/pacemaker/pcmk_cluster_queries.c
+++ b/lib/pacemaker/pcmk_cluster_queries.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -9,7 +9,6 @@
#include <crm_internal.h>
-#include <glib.h> // gboolean, GMainLoop, etc.
#include <libxml/tree.h> // xmlNode
#include <pacemaker.h>
@@ -362,8 +361,7 @@ ipc_connect(data_t *data, enum pcmk_ipc_server server, pcmk_ipc_callback_t cb,
pcmk_register_ipc_callback(api, cb, data);
}
- rc = pcmk_connect_ipc(api, dispatch_type);
-
+ rc = pcmk__connect_ipc(api, dispatch_type, 5);
if (rc != pcmk_rc_ok) {
if (rc == EREMOTEIO) {
data->pcmkd_state = pcmk_pacemakerd_state_remote;
@@ -371,6 +369,9 @@ ipc_connect(data_t *data, enum pcmk_ipc_server server, pcmk_ipc_callback_t cb,
/* EREMOTEIO may be expected and acceptable for some callers
* on a Pacemaker Remote node
*/
+ crm_debug("Ignoring %s connection failure: No "
+ "Pacemaker Remote connection",
+ pcmk_ipc_name(api, true));
rc = pcmk_rc_ok;
} else {
out->err(out, "error: Could not connect to %s: %s",
@@ -402,7 +403,7 @@ poll_until_reply(data_t *data, pcmk_ipc_api_t *api, const char *on_node)
pcmk__output_t *out = data->out;
uint64_t start_nsec = qb_util_nano_current_get();
- uint64_t end_nsec = start_nsec;
+ uint64_t end_nsec = 0;
uint64_t elapsed_ms = 0;
uint64_t remaining_ms = data->message_timeout_ms;
@@ -806,7 +807,7 @@ struct node_data {
int found;
const char *field; /* XML attribute to check for node name */
const char *type;
- gboolean bash_export;
+ bool bash_export;
};
static void
@@ -819,16 +820,13 @@ remote_node_print_helper(xmlNode *result, void *user_data)
// node name and node id are the same for remote/guest nodes
out->message(out, "crmadmin-node", data->type,
- name ? name : id,
- id,
- data->bash_export);
+ pcmk__s(name, id), id, data->bash_export);
data->found++;
}
// \return Standard Pacemaker return code
int
-pcmk__list_nodes(pcmk__output_t *out, const char *node_types,
- gboolean bash_export)
+pcmk__list_nodes(pcmk__output_t *out, const char *node_types, bool bash_export)
{
xmlNode *xml_node = NULL;
int rc;
@@ -862,7 +860,8 @@ pcmk__list_nodes(pcmk__output_t *out, const char *node_types,
remote_node_print_helper, &data);
}
- if (pcmk__str_empty(node_types) || !pcmk__strcmp(node_types, ",|^remote", pcmk__str_regex)) {
+ if (pcmk__str_empty(node_types)
+ || pcmk__str_eq(node_types, ",|^remote", pcmk__str_regex)) {
data.field = "id";
data.type = "remote";
crm_foreach_xpath_result(xml_node, PCMK__XP_REMOTE_NODE_CONFIG,
diff --git a/lib/pacemaker/pcmk_fence.c b/lib/pacemaker/pcmk_fence.c
index 7a0490f..9f86e46 100644
--- a/lib/pacemaker/pcmk_fence.c
+++ b/lib/pacemaker/pcmk_fence.c
@@ -95,11 +95,12 @@ reduce_fence_history(stonith_history_t *history)
for (np = new; ; np = np->next) {
if ((hp->state == st_done) || (hp->state == st_failed)) {
/* action not in progress */
- if (pcmk__str_eq(hp->target, np->target, pcmk__str_casei) &&
- pcmk__str_eq(hp->action, np->action, pcmk__str_none) &&
- (hp->state == np->state) &&
- ((hp->state == st_done) ||
- pcmk__str_eq(hp->delegate, np->delegate, pcmk__str_casei))) {
+ if (pcmk__str_eq(hp->target, np->target, pcmk__str_casei)
+ && pcmk__str_eq(hp->action, np->action, pcmk__str_none)
+ && (hp->state == np->state)
+ && ((hp->state == st_done)
+ || pcmk__str_eq(hp->delegate, np->delegate,
+ pcmk__str_casei))) {
/* purge older hp */
stonith_history_free(hp);
break;
@@ -146,6 +147,7 @@ async_fence_helper(gpointer user_data)
stonith_t *st = async_fence_data.st;
int call_id = 0;
int rc = stonith_api_connect_retry(st, async_fence_data.name, 10);
+ int timeout = 0;
if (rc != pcmk_ok) {
g_main_loop_quit(mainloop);
@@ -154,7 +156,8 @@ async_fence_helper(gpointer user_data)
return TRUE;
}
- st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, notify_callback);
+ st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE,
+ notify_callback);
call_id = st->cmds->fence_with_delay(st,
st_opt_allow_suicide,
@@ -171,12 +174,12 @@ async_fence_helper(gpointer user_data)
return TRUE;
}
- st->cmds->register_callback(st,
- call_id,
- (async_fence_data.timeout/1000
- + (async_fence_data.delay > 0 ? async_fence_data.delay : 0)),
- st_opt_timeout_updates, NULL, "callback", fence_callback);
-
+ timeout = async_fence_data.timeout / 1000;
+ if (async_fence_data.delay > 0) {
+ timeout += async_fence_data.delay;
+ }
+ st->cmds->register_callback(st, call_id, timeout, st_opt_timeout_updates,
+ NULL, "callback", fence_callback);
return TRUE;
}
@@ -251,9 +254,10 @@ pcmk__fence_history(pcmk__output_t *out, stonith_t *st, const char *target,
if (broadcast) {
stonith__set_call_options(opts, target, st_opt_broadcast);
}
- rc = st->cmds->history(st, opts,
- pcmk__str_eq(target, "*", pcmk__str_none)? NULL : target,
- &history, timeout/1000);
+ if (pcmk__str_eq(target, "*", pcmk__str_none)) {
+ target = NULL;
+ }
+ rc = st->cmds->history(st, opts, target, &history, (timeout / 1000));
if (cleanup) {
// Cleanup doesn't return a history list
@@ -314,7 +318,8 @@ pcmk_fence_history(xmlNodePtr *xml, stonith_t *st, const char *target,
out->quiet = quiet;
- rc = pcmk__fence_history(out, st, target, timeout, verbose, broadcast, cleanup);
+ rc = pcmk__fence_history(out, st, target, timeout, verbose, broadcast,
+ cleanup);
pcmk__xml_output_finish(out, xml);
return rc;
}
@@ -326,15 +331,17 @@ pcmk__fence_installed(pcmk__output_t *out, stonith_t *st, unsigned int timeout)
stonith_key_value_t *devices = NULL;
int rc = pcmk_rc_ok;
- rc = st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout/1000);
- /* list_agents returns a negative error code or a positive number of agents. */
+ rc = st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices,
+ (timeout / 1000));
+ // rc is a negative error code or a positive number of agents
if (rc < 0) {
return pcmk_legacy2rc(rc);
}
- out->begin_list(out, "fence device", "fence devices", "Installed fence devices");
- for (stonith_key_value_t *dIter = devices; dIter; dIter = dIter->next) {
- out->list_item(out, "device", "%s", dIter->value);
+ out->begin_list(out, "fence device", "fence devices",
+ "Installed fence devices");
+ for (stonith_key_value_t *iter = devices; iter != NULL; iter = iter->next) {
+ out->list_item(out, "device", "%s", iter->value);
}
out->end_list(out);
@@ -498,9 +505,10 @@ pcmk__fence_registered(pcmk__output_t *out, stonith_t *st, const char *target,
return pcmk_legacy2rc(rc);
}
- out->begin_list(out, "fence device", "fence devices", "Registered fence devices");
- for (stonith_key_value_t *dIter = devices; dIter; dIter = dIter->next) {
- out->list_item(out, "device", "%s", dIter->value);
+ out->begin_list(out, "fence device", "fence devices",
+ "Registered fence devices");
+ for (stonith_key_value_t *iter = devices; iter != NULL; iter = iter->next) {
+ out->list_item(out, "device", "%s", iter->value);
}
out->end_list(out);
@@ -609,7 +617,8 @@ pcmk__get_fencing_history(stonith_t *st, stonith_history_t **stonith_history,
if ((st == NULL) || (st->state == stonith_disconnected)) {
rc = ENOTCONN;
} else if (fence_history != pcmk__fence_history_none) {
- rc = st->cmds->history(st, st_opt_sync_call, NULL, stonith_history, 120);
+ rc = st->cmds->history(st, st_opt_sync_call, NULL, stonith_history,
+ 120);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
diff --git a/lib/pacemaker/pcmk_graph_consumer.c b/lib/pacemaker/pcmk_graph_consumer.c
index f2f172e..0daa00d 100644
--- a/lib/pacemaker/pcmk_graph_consumer.c
+++ b/lib/pacemaker/pcmk_graph_consumer.c
@@ -47,7 +47,10 @@ update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id)
if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
return; // All inputs have already been confirmed
}
- pcmk__set_synapse_flags(synapse, pcmk__synapse_ready); // Presume ready until proven otherwise
+
+ // Presume ready until proven otherwise
+ pcmk__set_synapse_flags(synapse, pcmk__synapse_ready);
+
for (GList *lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) {
pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data;
@@ -56,7 +59,7 @@ update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id)
action_id, synapse->id);
pcmk__set_graph_action_flags(prereq, pcmk__graph_action_confirmed);
- } else if (!(pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed))) {
+ } else if (!pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed)) {
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
crm_trace("Synapse %d still not ready after action %d",
synapse->id, action_id);
@@ -87,14 +90,16 @@ update_synapse_confirmed(pcmk__graph_synapse_t *synapse, int action_id)
action_id, synapse->id);
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
- } else if (all_confirmed && !(pcmk_is_set(action->flags, pcmk__graph_action_confirmed))) {
+ } else if (all_confirmed &&
+ !pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) {
all_confirmed = false;
crm_trace("Synapse %d still not confirmed after action %d",
synapse->id, action_id);
}
}
- if (all_confirmed && !(pcmk_is_set(synapse->flags, pcmk__synapse_confirmed))) {
+ if (all_confirmed
+ && !pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
crm_trace("Confirmed synapse %d", synapse->id);
pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
}
@@ -113,13 +118,15 @@ pcmk__update_graph(pcmk__graph_t *graph, const pcmk__graph_action_t *action)
for (GList *lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
- if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed|pcmk__synapse_failed)) {
+ if (pcmk_any_flags_set(synapse->flags,
+ pcmk__synapse_confirmed|pcmk__synapse_failed)) {
continue; // This synapse already completed
} else if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
update_synapse_confirmed(synapse, action->id);
- } else if (!(pcmk_is_set(action->flags, pcmk__graph_action_failed)) || (synapse->priority == INFINITY)) {
+ } else if (!pcmk_is_set(action->flags, pcmk__graph_action_failed)
+ || (synapse->priority == INFINITY)) {
update_synapse_ready(synapse, action->id);
}
}
@@ -179,7 +186,9 @@ should_fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
break;
- } else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed) && !(pcmk_is_set(prereq->flags, pcmk__graph_action_can_fail))) {
+ } else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed)
+ && !pcmk_is_set(prereq->flags,
+ pcmk__graph_action_can_fail)) {
crm_trace("Input %d for synapse %d confirmed but failed",
prereq->id, synapse->id);
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
@@ -244,7 +253,7 @@ initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
case pcmk__cluster_graph_action:
if (pcmk__str_eq(crm_element_value(action->xml, XML_LRM_ATTR_TASK),
- CRM_OP_FENCE, pcmk__str_casei)) {
+ PCMK_ACTION_STONITH, pcmk__str_none)) {
crm_trace("Executing fencing action %d (%s)",
action->id, id);
return graph_fns->fence(graph, action);
@@ -255,7 +264,7 @@ initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
default:
crm_err("Unsupported graph action type <%s " XML_ATTR_ID "='%s'> "
"(bug?)",
- crm_element_name(action->xml), id);
+ action->xml->name, id);
return EINVAL;
}
}
@@ -280,7 +289,7 @@ fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
if (rc != pcmk_rc_ok) {
crm_err("Failed initiating <%s " XML_ATTR_ID "=%d> in synapse %d: "
"%s",
- crm_element_name(action->xml), action->id, synapse->id,
+ action->xml->name, action->id, synapse->id,
pcmk_rc_str(rc));
pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
pcmk__set_graph_action_flags(action,
@@ -374,7 +383,8 @@ pcmk__execute_graph(pcmk__graph_t *graph)
if (pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
graph->completed++;
- } else if (!(pcmk_is_set(synapse->flags, pcmk__synapse_failed)) && pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
+ } else if (!pcmk_is_set(synapse->flags, pcmk__synapse_failed)
+ && pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
graph->pending++;
}
}
@@ -396,7 +406,9 @@ pcmk__execute_graph(pcmk__graph_t *graph)
graph->skipped++;
continue;
- } else if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed|pcmk__synapse_executed)) {
+ } else if (pcmk_any_flags_set(synapse->flags,
+ pcmk__synapse_confirmed
+ |pcmk__synapse_executed)) {
continue; // Already handled
} else if (should_fire_synapse(graph, synapse)) {
@@ -470,7 +482,6 @@ unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
{
enum pcmk__graph_action_type action_type;
pcmk__graph_action_t *action = NULL;
- const char *element = TYPE(xml_action);
const char *value = ID(xml_action);
if (value == NULL) {
@@ -479,20 +490,18 @@ unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
return NULL;
}
- if (pcmk__str_eq(element, XML_GRAPH_TAG_RSC_OP, pcmk__str_casei)) {
+ if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_RSC_OP)) {
action_type = pcmk__rsc_graph_action;
- } else if (pcmk__str_eq(element, XML_GRAPH_TAG_PSEUDO_EVENT,
- pcmk__str_casei)) {
+ } else if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_PSEUDO_EVENT)) {
action_type = pcmk__pseudo_graph_action;
- } else if (pcmk__str_eq(element, XML_GRAPH_TAG_CRM_EVENT,
- pcmk__str_casei)) {
+ } else if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_CRM_EVENT)) {
action_type = pcmk__cluster_graph_action;
} else {
crm_err("Ignoring transition graph action of unknown type '%s' (bug?)",
- element);
+ xml_action->name);
crm_log_xml_trace(xml_action, "invalid");
return NULL;
}
@@ -531,10 +540,9 @@ unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
value = g_hash_table_lookup(action->params, "CRM_meta_can_fail");
if (value != NULL) {
+ int can_fail = 0;
- gboolean can_fail = FALSE;
- crm_str_to_boolean(value, &can_fail);
- if (can_fail) {
+ if ((crm_str_to_boolean(value, &can_fail) > 0) && (can_fail > 0)) {
pcmk__set_graph_action_flags(action, pcmk__graph_action_can_fail);
} else {
pcmk__clear_graph_action_flags(action, pcmk__graph_action_can_fail);
diff --git a/lib/pacemaker/pcmk_graph_logging.c b/lib/pacemaker/pcmk_graph_logging.c
index b922a3e..f6fc179 100644
--- a/lib/pacemaker/pcmk_graph_logging.c
+++ b/lib/pacemaker/pcmk_graph_logging.c
@@ -68,18 +68,15 @@ find_graph_action_by_id(const pcmk__graph_t *graph, int id)
return NULL;
}
- for (const GList *sIter = graph->synapses; sIter != NULL;
- sIter = sIter->next) {
+ for (const GList *synapse_iter = graph->synapses;
+ synapse_iter != NULL; synapse_iter = synapse_iter->next) {
- const pcmk__graph_synapse_t *synapse = NULL;
+ const pcmk__graph_synapse_t *synapse = synapse_iter->data;
- synapse = (const pcmk__graph_synapse_t *) sIter->data;
- for (const GList *aIter = synapse->actions; aIter != NULL;
- aIter = aIter->next) {
+ for (const GList *action_iter = synapse->actions;
+ action_iter != NULL; action_iter = action_iter->next) {
- const pcmk__graph_action_t *action = NULL;
-
- action = (const pcmk__graph_action_t *) aIter->data;
+ const pcmk__graph_action_t *action = action_iter->data;
if (action->id == id) {
return action;
}
diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c
index 5484e8b..59b6176 100644
--- a/lib/pacemaker/pcmk_graph_producer.c
+++ b/lib/pacemaker/pcmk_graph_producer.c
@@ -24,13 +24,13 @@
// Convenience macros for logging action properties
#define action_type_str(flags) \
- (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action")
+ (pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
- (pcmk_is_set((flags), pe_action_optional)? "optional" : "required")
+ (pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
- (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable")
+ (pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
@@ -61,46 +61,48 @@ add_node_to_xml_by_id(const char *id, xmlNode *xml)
* \param[in,out] xml XML to add node to
*/
static void
-add_node_to_xml(const pe_node_t *node, void *xml)
+add_node_to_xml(const pcmk_node_t *node, void *xml)
{
add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
}
/*!
* \internal
- * \brief Add XML with nodes that need an update of their maintenance state
+ * \brief Count (optionally add to XML) nodes needing maintenance state update
*
- * \param[in,out] xml Parent XML tag to add to
- * \param[in] data_set Working set for cluster
+ * \param[in,out] xml Parent XML tag to add to, if any
+ * \param[in] scheduler Scheduler data
+ *
+ * \return Count of nodes added
+ * \note Only Pacemaker Remote nodes are considered currently
*/
static int
-add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
+add_maintenance_nodes(xmlNode *xml, const pcmk_scheduler_t *scheduler)
{
- GList *gIter = NULL;
- xmlNode *maintenance =
- xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL;
+ xmlNode *maintenance = NULL;
int count = 0;
- for (gIter = data_set->nodes; gIter != NULL;
- gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
- struct pe_node_shared_s *details = node->details;
+ if (xml != NULL) {
+ maintenance = create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE);
+ }
+ for (const GList *iter = scheduler->nodes;
+ iter != NULL; iter = iter->next) {
+ const pcmk_node_t *node = iter->data;
- if (!pe__is_guest_or_remote_node(node)) {
- continue; /* just remote nodes need to know atm */
- }
+ if (pe__is_guest_or_remote_node(node) &&
+ (node->details->maintenance != node->details->remote_maintenance)) {
- if (details->maintenance != details->remote_maintenance) {
- if (maintenance) {
- crm_xml_add(
- add_node_to_xml_by_id(node->details->id, maintenance),
- XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0");
+ if (maintenance != NULL) {
+ crm_xml_add(add_node_to_xml_by_id(node->details->id,
+ maintenance),
+ XML_NODE_IS_MAINTENANCE,
+ (node->details->maintenance? "1" : "0"));
}
count++;
}
}
- crm_trace("%s %d nodes to adjust maintenance-mode "
- "to transition", maintenance?"Added":"Counted", count);
+ crm_trace("%s %d nodes in need of maintenance mode update in state",
+ ((maintenance == NULL)? "Counted" : "Added"), count);
return count;
}
@@ -108,17 +110,16 @@ add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
* \internal
* \brief Add pseudo action with nodes needing maintenance state update
*
- * \param[in,out] data_set Working set for cluster
+ * \param[in,out] scheduler Scheduler data
*/
static void
-add_maintenance_update(pe_working_set_t *data_set)
+add_maintenance_update(pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- if (add_maintenance_nodes(NULL, data_set)) {
- crm_trace("adding maintenance state update pseudo action");
- action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set);
- pe__set_action_flags(action, pe_action_print_always);
+ if (add_maintenance_nodes(NULL, scheduler) != 0) {
+ action = get_pseudo_op(PCMK_ACTION_MAINTENANCE_NODES, scheduler);
+ pe__set_action_flags(action, pcmk_action_always_in_graph);
}
}
@@ -132,21 +133,21 @@ add_maintenance_update(pe_working_set_t *data_set)
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] action Action to check for downed nodes
- * \param[in] data_set Working set for cluster
*/
static void
-add_downed_nodes(xmlNode *xml, const pe_action_t *action,
- const pe_working_set_t *data_set)
+add_downed_nodes(xmlNode *xml, const pcmk_action_t *action)
{
- CRM_CHECK(xml && action && action->node && data_set, return);
+ CRM_CHECK((xml != NULL) && (action != NULL) && (action->node != NULL),
+ return);
- if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) {
/* Shutdown makes the action's node down */
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
+ pcmk__str_none)) {
/* Fencing makes the action's node and any hosted guest nodes down */
const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
@@ -154,24 +155,28 @@ add_downed_nodes(xmlNode *xml, const pe_action_t *action,
if (pcmk__is_fencing_action(fence)) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
- pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed);
+ pe_foreach_guest_node(action->node->details->data_set,
+ action->node, add_node_to_xml, downed);
}
} else if (action->rsc && action->rsc->is_remote_node
- && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
/* Stopping a remote connection resource makes connected node down,
* unless it's part of a migration
*/
GList *iter;
- pe_action_t *input;
- gboolean migrating = FALSE;
+ pcmk_action_t *input;
+ bool migrating = false;
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
- input = ((pe_action_wrapper_t *) iter->data)->action;
- if (input->rsc && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_casei)
- && pcmk__str_eq(input->task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
- migrating = TRUE;
+ input = ((pcmk__related_action_t *) iter->data)->action;
+ if ((input->rsc != NULL)
+ && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_none)
+ && pcmk__str_eq(input->task, PCMK_ACTION_MIGRATE_FROM,
+ pcmk__str_none)) {
+ migrating = true;
break;
}
}
@@ -192,9 +197,9 @@ add_downed_nodes(xmlNode *xml, const pe_action_t *action,
* \return Newly allocated string with transition graph operation key
*/
static char *
-clone_op_key(const pe_action_t *action, guint interval_ms)
+clone_op_key(const pcmk_action_t *action, guint interval_ms)
{
- if (pcmk__str_eq(action->task, RSC_NOTIFY, pcmk__str_none)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
const char *n_task = g_hash_table_lookup(action->meta,
"notify_operation");
@@ -218,9 +223,9 @@ clone_op_key(const pe_action_t *action, guint interval_ms)
* \param[in,out] xml Transition graph action XML for \p action
*/
static void
-add_node_details(const pe_action_t *action, xmlNode *xml)
+add_node_details(const pcmk_action_t *action, xmlNode *xml)
{
- pe_node_t *router_node = pcmk__connection_host_for_action(action);
+ pcmk_node_t *router_node = pcmk__connection_host_for_action(action);
crm_xml_add(xml, XML_LRM_ATTR_TARGET, action->node->details->uname);
crm_xml_add(xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id);
@@ -237,7 +242,7 @@ add_node_details(const pe_action_t *action, xmlNode *xml)
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
-add_resource_details(const pe_action_t *action, xmlNode *action_xml)
+add_resource_details(const pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *rsc_xml = NULL;
const char *attr_list[] = {
@@ -256,8 +261,9 @@ add_resource_details(const pe_action_t *action, xmlNode *action_xml)
// List affected resource
- rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml));
- if (pcmk_is_set(action->rsc->flags, pe_rsc_orphan)
+ rsc_xml = create_xml_node(action_xml,
+ (const char *) action->rsc->xml->name);
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_removed)
&& (action->rsc->clone_name != NULL)) {
/* Use the numbered instance name here, because if there is more
* than one instance on a node, we need to make sure the command
@@ -272,7 +278,7 @@ add_resource_details(const pe_action_t *action, xmlNode *action_xml)
crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name);
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
- } else if (!pcmk_is_set(action->rsc->flags, pe_rsc_unique)) {
+ } else if (!pcmk_is_set(action->rsc->flags, pcmk_rsc_unique)) {
const char *xml_id = ID(action->rsc->xml);
crm_debug("Using anonymous clone name %s for %s (aka %s)",
@@ -319,7 +325,7 @@ add_resource_details(const pe_action_t *action, xmlNode *action_xml)
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
-add_action_attributes(pe_action_t *action, xmlNode *action_xml)
+add_action_attributes(pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *args_xml = NULL;
@@ -341,7 +347,8 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
g_hash_table_foreach(params, hash2smartfield, args_xml);
- } else if ((action->rsc != NULL) && (action->rsc->variant <= pe_native)) {
+ } else if ((action->rsc != NULL)
+ && (action->rsc->variant <= pcmk_rsc_variant_primitive)) {
GHashTable *params = pe_rsc_params(action->rsc, NULL,
action->rsc->cluster);
@@ -350,7 +357,7 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
g_hash_table_foreach(action->meta, hash2metafield, args_xml);
if (action->rsc != NULL) {
- pe_resource_t *parent = action->rsc;
+ pcmk_resource_t *parent = action->rsc;
while (parent != NULL) {
parent->cmds->add_graph_meta(parent, args_xml);
@@ -359,7 +366,7 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
pcmk__add_bundle_meta_to_xml(args_xml, action);
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_none)
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)
&& (action->node != NULL)) {
/* Pass the node's attributes as meta-attributes.
*
@@ -367,7 +374,8 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
* added in 33d99707, probably for the libfence-based implementation in
* c9a90bd, which is no longer used.
*/
- g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml);
+ g_hash_table_foreach(action->node->details->attrs, hash2metafield,
+ args_xml);
}
sorted_xml(args_xml, action_xml, FALSE);
@@ -381,41 +389,43 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
* \param[in,out] parent Parent XML element to add action to
* \param[in,out] action Scheduled action
* \param[in] skip_details If false, add action details as sub-elements
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*/
static void
-create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
- const pe_working_set_t *data_set)
+create_graph_action(xmlNode *parent, pcmk_action_t *action, bool skip_details,
+ const pcmk_scheduler_t *scheduler)
{
bool needs_node_info = true;
bool needs_maintenance_info = false;
xmlNode *action_xml = NULL;
- if ((action == NULL) || (data_set == NULL)) {
+ if ((action == NULL) || (scheduler == NULL)) {
return;
}
// Create the top-level element based on task
- if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) {
/* All fences need node info; guest node fences are pseudo-events */
- action_xml = create_xml_node(parent,
- pcmk_is_set(action->flags, pe_action_pseudo)?
- XML_GRAPH_TAG_PSEUDO_EVENT :
- XML_GRAPH_TAG_CRM_EVENT);
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
+ action_xml = create_xml_node(parent, XML_GRAPH_TAG_PSEUDO_EVENT);
+ } else {
+ action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
+ }
} else if (pcmk__str_any_of(action->task,
- CRM_OP_SHUTDOWN,
- CRM_OP_CLEAR_FAILCOUNT, NULL)) {
+ PCMK_ACTION_DO_SHUTDOWN,
+ PCMK_ACTION_CLEAR_FAILCOUNT, NULL)) {
action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
- } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_none)) {
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE,
+ pcmk__str_none)) {
// CIB-only clean-up for shutdown locks
action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
crm_xml_add(action_xml, PCMK__XA_MODE, XML_TAG_CIB);
- } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
- if (pcmk__str_eq(action->task, CRM_OP_MAINTENANCE_NODES,
+ } else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_MAINTENANCE_NODES,
pcmk__str_none)) {
needs_maintenance_info = true;
}
@@ -439,7 +449,8 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
}
clone_key = clone_op_key(action, interval_ms);
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key);
- crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid);
+ crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY,
+ action->uuid);
free(clone_key);
} else {
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid);
@@ -458,7 +469,7 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
}
if ((action->rsc != NULL)
- && !pcmk_is_set(action->flags, pe_action_pseudo)) {
+ && !pcmk_is_set(action->flags, pcmk_action_pseudo)) {
// This is a real resource action, so add resource details
add_resource_details(action, action_xml);
@@ -469,11 +480,11 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
/* List any nodes this action is expected to make down */
if (needs_node_info && (action->node != NULL)) {
- add_downed_nodes(action_xml, action, data_set);
+ add_downed_nodes(action_xml, action);
}
if (needs_maintenance_info) {
- add_maintenance_nodes(action_xml, data_set);
+ add_maintenance_nodes(action_xml, scheduler);
}
}
@@ -486,16 +497,16 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
* \return true if action should be added to graph, otherwise false
*/
static bool
-should_add_action_to_graph(const pe_action_t *action)
+should_add_action_to_graph(const pcmk_action_t *action)
{
- if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring action %s (%d): unrunnable",
action->uuid, action->id);
return false;
}
- if (pcmk_is_set(action->flags, pe_action_optional)
- && !pcmk_is_set(action->flags, pe_action_print_always)) {
+ if (pcmk_is_set(action->flags, pcmk_action_optional)
+ && !pcmk_is_set(action->flags, pcmk_action_always_in_graph)) {
crm_trace("Ignoring action %s (%d): optional",
action->uuid, action->id);
return false;
@@ -505,8 +516,9 @@ should_add_action_to_graph(const pe_action_t *action)
* with the exception of monitors and cancellation of recurring monitors.
*/
if ((action->rsc != NULL)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
- && !pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_none)) {
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
+ && !pcmk__str_eq(action->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
+
const char *interval_ms_s;
/* A cancellation of a recurring monitor will get here because the task
@@ -526,21 +538,21 @@ should_add_action_to_graph(const pe_action_t *action)
/* Always add pseudo-actions, fence actions, and shutdown actions (already
* determined to be required and runnable by this point)
*/
- if (pcmk_is_set(action->flags, pe_action_pseudo)
- || pcmk__strcase_any_of(action->task, CRM_OP_FENCE, CRM_OP_SHUTDOWN,
- NULL)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)
+ || pcmk__strcase_any_of(action->task, PCMK_ACTION_STONITH,
+ PCMK_ACTION_DO_SHUTDOWN, NULL)) {
return true;
}
if (action->node == NULL) {
pe_err("Skipping action %s (%d) "
- "because it was not allocated to a node (bug?)",
+ "because it was not assigned to a node (bug?)",
action->uuid, action->id);
- pcmk__log_action("Unallocated", action, false);
+ pcmk__log_action("Unassigned", action, false);
return false;
}
- if (pcmk_is_set(action->flags, pe_action_dc)) {
+ if (pcmk_is_set(action->flags, pcmk_action_on_dc)) {
crm_trace("Action %s (%d) should be dumped: "
"can run on DC instead of %s",
action->uuid, action->id, pe__node_name(action->node));
@@ -577,11 +589,12 @@ should_add_action_to_graph(const pe_action_t *action)
* \return true if ordering has flags that can change an action, false otherwise
*/
static bool
-ordering_can_change_actions(const pe_action_wrapper_t *ordering)
+ordering_can_change_actions(const pcmk__related_action_t *ordering)
{
- return pcmk_any_flags_set(ordering->type, ~(pe_order_implies_first_printed
- |pe_order_implies_then_printed
- |pe_order_optional));
+ return pcmk_any_flags_set(ordering->type,
+ ~(pcmk__ar_then_implies_first_graphed
+ |pcmk__ar_first_implies_then_graphed
+ |pcmk__ar_ordered));
}
/*!
@@ -596,20 +609,21 @@ ordering_can_change_actions(const pe_action_wrapper_t *ordering)
* circumstances (load or anti-colocation orderings that are not needed).
*/
static bool
-should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
+should_add_input_to_graph(const pcmk_action_t *action,
+ pcmk__related_action_t *input)
{
if (input->state == pe_link_dumped) {
return true;
}
- if (input->type == pe_order_none) {
+ if ((uint32_t) input->type == pcmk__ar_none) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"ordering disabled",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
+ } else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
&& !ordering_can_change_actions(input)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional and input unrunnable",
@@ -617,32 +631,32 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
input->action->uuid, input->action->id);
return false;
- } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
- && pcmk_is_set(input->type, pe_order_one_or_more)) {
+ } else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
+ && pcmk_is_set(input->type, pcmk__ar_min_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "one-or-more and input unrunnable",
+ "minimum number of instances required but input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if (pcmk_is_set(input->type, pe_order_implies_first_migratable)
- && !pcmk_is_set(input->action->flags, pe_action_runnable)) {
+ } else if (pcmk_is_set(input->type, pcmk__ar_unmigratable_then_blocks)
+ && !pcmk_is_set(input->action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "implies input migratable but input unrunnable",
+ "input blocked if 'then' unmigratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if (pcmk_is_set(input->type, pe_order_apply_first_non_migratable)
- && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)) {
- crm_trace("Ignoring %s (%d) input %s (%d): "
- "only if input unmigratable but input unrunnable",
+ } else if (pcmk_is_set(input->type, pcmk__ar_if_first_unmigratable)
+ && pcmk_is_set(input->action->flags, pcmk_action_migratable)) {
+ crm_trace("Ignoring %s (%d) input %s (%d): ordering applies "
+ "only if input is unmigratable, but it is migratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if ((input->type == pe_order_optional)
- && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)
+ } else if (((uint32_t) input->type == pcmk__ar_ordered)
+ && pcmk_is_set(input->action->flags, pcmk_action_migratable)
&& pcmk__ends_with(input->action->uuid, "_stop_0")) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional but stop in migration",
@@ -650,74 +664,73 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
input->action->uuid, input->action->id);
return false;
- } else if (input->type == pe_order_load) {
- pe_node_t *input_node = input->action->node;
+ } else if ((uint32_t) input->type == pcmk__ar_if_on_same_node_or_target) {
+ pcmk_node_t *input_node = input->action->node;
- // load orderings are relevant only if actions are for same node
+ if ((action->rsc != NULL)
+ && pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO,
+ pcmk__str_none)) {
- if (action->rsc && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)) {
- pe_node_t *allocated = action->rsc->allocated_to;
+ pcmk_node_t *assigned = action->rsc->allocated_to;
- /* For load_stopped -> migrate_to orderings, we care about where it
- * has been allocated to, not where it will be executed.
+ /* For load_stopped -> migrate_to orderings, we care about where
+ * the resource has been assigned, not where migrate_to will be
+ * executed.
*/
- if ((input_node == NULL) || (allocated == NULL)
- || (input_node->details != allocated->details)) {
+ if (!pe__same_node(input_node, assigned)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "load ordering node mismatch %s vs %s",
+ "migration target %s is not same as input node %s",
action->uuid, action->id,
input->action->uuid, input->action->id,
- (allocated? allocated->details->uname : "<none>"),
+ (assigned? assigned->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
- } else if ((input_node == NULL) || (action->node == NULL)
- || (input_node->details != action->node->details)) {
+ } else if (!pe__same_node(input_node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "load ordering node mismatch %s vs %s",
+ "not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
(action->node? action->node->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
- } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "load ordering input optional",
+ "ordering optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
- } else if (input->type == pe_order_anti_colocation) {
+ } else if ((uint32_t) input->type == pcmk__ar_if_required_on_same_node) {
if (input->action->node && action->node
- && (input->action->node->details != action->node->details)) {
+ && !pe__same_node(input->action->node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "anti-colocation node mismatch %s vs %s",
+ "not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
pe__node_name(action->node),
pe__node_name(input->action->node));
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
- } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
- crm_trace("Ignoring %s (%d) input %s (%d): "
- "anti-colocation input optional",
+ } else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
+ crm_trace("Ignoring %s (%d) input %s (%d): optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if (input->action->rsc
&& input->action->rsc != action->rsc
- && pcmk_is_set(input->action->rsc->flags, pe_rsc_failed)
- && !pcmk_is_set(input->action->rsc->flags, pe_rsc_managed)
+ && pcmk_is_set(input->action->rsc->flags, pcmk_rsc_failed)
+ && !pcmk_is_set(input->action->rsc->flags, pcmk_rsc_managed)
&& pcmk__ends_with(input->action->uuid, "_stop_0")
&& action->rsc && pe_rsc_is_clone(action->rsc)) {
crm_warn("Ignoring requirement that %s complete before %s:"
@@ -725,9 +738,10 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
input->action->uuid, action->uuid);
return false;
- } else if (pcmk_is_set(input->action->flags, pe_action_optional)
+ } else if (pcmk_is_set(input->action->flags, pcmk_action_optional)
&& !pcmk_any_flags_set(input->action->flags,
- pe_action_print_always|pe_action_dumped)
+ pcmk_action_always_in_graph
+ |pcmk_action_added_to_graph)
&& !should_add_action_to_graph(input->action)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"input optional",
@@ -758,12 +772,12 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
* \return true if the ordering creates a loop, otherwise false
*/
bool
-pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
- pe_action_wrapper_t *input)
+pcmk__graph_has_loop(const pcmk_action_t *init_action,
+ const pcmk_action_t *action, pcmk__related_action_t *input)
{
bool has_loop = false;
- if (pcmk_is_set(input->action->flags, pe_action_tracking)) {
+ if (pcmk_is_set(input->action->flags, pcmk_action_detect_loop)) {
crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
@@ -787,7 +801,7 @@ pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
return true;
}
- pe__set_action_flags(input->action, pe_action_tracking);
+ pe__set_action_flags(input->action, pcmk_action_detect_loop);
crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)"
"for graph loop with %s@%s ",
@@ -804,14 +818,14 @@ pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
iter != NULL; iter = iter->next) {
if (pcmk__graph_has_loop(init_action, input->action,
- (pe_action_wrapper_t *) iter->data)) {
+ (pcmk__related_action_t *) iter->data)) {
// Recursive call already logged a debug message
has_loop = true;
break;
}
}
- pe__clear_action_flags(input->action, pe_action_tracking);
+ pe__clear_action_flags(input->action, pcmk_action_detect_loop);
if (!has_loop) {
crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)",
@@ -828,19 +842,19 @@ pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
* \internal
* \brief Create a synapse XML element for a transition graph
*
- * \param[in] action Action that synapse is for
- * \param[in,out] data_set Cluster working set containing graph
+ * \param[in] action Action that synapse is for
+ * \param[in,out] scheduler Scheduler data containing graph
*
* \return Newly added XML element for new graph synapse
*/
static xmlNode *
-create_graph_synapse(const pe_action_t *action, pe_working_set_t *data_set)
+create_graph_synapse(const pcmk_action_t *action, pcmk_scheduler_t *scheduler)
{
int synapse_priority = 0;
- xmlNode *syn = create_xml_node(data_set->graph, "synapse");
+ xmlNode *syn = create_xml_node(scheduler->graph, "synapse");
- crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse);
- data_set->num_synapse++;
+ crm_xml_add_int(syn, XML_ATTR_ID, scheduler->num_synapse);
+ scheduler->num_synapse++;
if (action->rsc != NULL) {
synapse_priority = action->rsc->priority;
@@ -859,10 +873,10 @@ create_graph_synapse(const pe_action_t *action, pe_working_set_t *data_set)
* \brief Add an action to the transition graph XML if appropriate
*
* \param[in,out] data Action to possibly add
- * \param[in,out] user_data Cluster working set
+ * \param[in,out] user_data Scheduler data
*
* \note This will de-duplicate the action inputs, meaning that the
- * pe_action_wrapper_t:type flags can no longer be relied on to retain
+ * pcmk__related_action_t:type flags can no longer be relied on to retain
* their original settings. That means this MUST be called after
* pcmk__apply_orderings() is complete, and nothing after this should rely
* on those type flags. (For example, some code looks for type equal to
@@ -873,8 +887,8 @@ create_graph_synapse(const pe_action_t *action, pe_working_set_t *data_set)
static void
add_action_to_graph(gpointer data, gpointer user_data)
{
- pe_action_t *action = (pe_action_t *) data;
- pe_working_set_t *data_set = (pe_working_set_t *) user_data;
+ pcmk_action_t *action = (pcmk_action_t *) data;
+ pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) user_data;
xmlNode *syn = NULL;
xmlNode *set = NULL;
@@ -884,36 +898,36 @@ add_action_to_graph(gpointer data, gpointer user_data)
* the action to the graph, so that crm_simulate's dot graphs don't have
* duplicates).
*/
- if (!pcmk_is_set(action->flags, pe_action_dedup)) {
+ if (!pcmk_is_set(action->flags, pcmk_action_inputs_deduplicated)) {
pcmk__deduplicate_action_inputs(action);
- pe__set_action_flags(action, pe_action_dedup);
+ pe__set_action_flags(action, pcmk_action_inputs_deduplicated);
}
- if (pcmk_is_set(action->flags, pe_action_dumped) // Already added, or
- || !should_add_action_to_graph(action)) { // shouldn't be added
- return;
+ if (pcmk_is_set(action->flags, pcmk_action_added_to_graph)
+ || !should_add_action_to_graph(action)) {
+ return; // Already added, or shouldn't be
}
- pe__set_action_flags(action, pe_action_dumped);
+ pe__set_action_flags(action, pcmk_action_added_to_graph);
crm_trace("Adding action %d (%s%s%s) to graph",
action->id, action->uuid,
((action->node == NULL)? "" : " on "),
((action->node == NULL)? "" : action->node->details->uname));
- syn = create_graph_synapse(action, data_set);
+ syn = create_graph_synapse(action, scheduler);
set = create_xml_node(syn, "action_set");
in = create_xml_node(syn, "inputs");
- create_graph_action(set, action, false, data_set);
+ create_graph_action(set, action, false, scheduler);
for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
- pe_action_wrapper_t *input = (pe_action_wrapper_t *) lpc->data;
+ pcmk__related_action_t *input = lpc->data;
if (should_add_input_to_graph(action, input)) {
xmlNode *input_xml = create_xml_node(in, "trigger");
input->state = pe_link_dumped;
- create_graph_action(input_xml, input->action, true, data_set);
+ create_graph_action(input_xml, input->action, true, scheduler);
}
}
}
@@ -960,7 +974,7 @@ pcmk__log_transition_summary(const char *filename)
* \param[in,out] rsc Resource whose actions should be added
*/
void
-pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc)
+pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc)
{
GList *iter = NULL;
@@ -972,7 +986,7 @@ pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc)
// Then recursively add its children's actions (appropriate to variant)
for (iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
child_rsc->cmds->add_actions_to_graph(child_rsc);
}
@@ -982,10 +996,10 @@ pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc)
* \internal
* \brief Create a transition graph with all cluster actions needed
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__create_graph(pe_working_set_t *data_set)
+pcmk__create_graph(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
const char *value = NULL;
@@ -994,38 +1008,38 @@ pcmk__create_graph(pe_working_set_t *data_set)
transition_id++;
crm_trace("Creating transition graph %d", transition_id);
- data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
+ scheduler->graph = create_xml_node(NULL, XML_TAG_GRAPH);
- value = pe_pref(data_set->config_hash, "cluster-delay");
- crm_xml_add(data_set->graph, "cluster-delay", value);
+ value = pe_pref(scheduler->config_hash, "cluster-delay");
+ crm_xml_add(scheduler->graph, "cluster-delay", value);
- value = pe_pref(data_set->config_hash, "stonith-timeout");
- crm_xml_add(data_set->graph, "stonith-timeout", value);
+ value = pe_pref(scheduler->config_hash, "stonith-timeout");
+ crm_xml_add(scheduler->graph, "stonith-timeout", value);
- crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
+ crm_xml_add(scheduler->graph, "failed-stop-offset", "INFINITY");
- if (pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)) {
- crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
+ crm_xml_add(scheduler->graph, "failed-start-offset", "INFINITY");
} else {
- crm_xml_add(data_set->graph, "failed-start-offset", "1");
+ crm_xml_add(scheduler->graph, "failed-start-offset", "1");
}
- value = pe_pref(data_set->config_hash, "batch-limit");
- crm_xml_add(data_set->graph, "batch-limit", value);
+ value = pe_pref(scheduler->config_hash, "batch-limit");
+ crm_xml_add(scheduler->graph, "batch-limit", value);
- crm_xml_add_int(data_set->graph, "transition_id", transition_id);
+ crm_xml_add_int(scheduler->graph, "transition_id", transition_id);
- value = pe_pref(data_set->config_hash, "migration-limit");
+ value = pe_pref(scheduler->config_hash, "migration-limit");
if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
- crm_xml_add(data_set->graph, "migration-limit", value);
+ crm_xml_add(scheduler->graph, "migration-limit", value);
}
- if (data_set->recheck_by > 0) {
+ if (scheduler->recheck_by > 0) {
char *recheck_epoch = NULL;
recheck_epoch = crm_strdup_printf("%llu",
- (long long) data_set->recheck_by);
- crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
+ (long long) scheduler->recheck_by);
+ crm_xml_add(scheduler->graph, "recheck-by", recheck_epoch);
free(recheck_epoch);
}
@@ -1035,44 +1049,48 @@ pcmk__create_graph(pe_working_set_t *data_set)
*/
// Add resource actions to graph
- for (iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
pe_rsc_trace(rsc, "Processing actions for %s", rsc->id);
rsc->cmds->add_actions_to_graph(rsc);
}
// Add pseudo-action for list of nodes with maintenance state update
- add_maintenance_update(data_set);
+ add_maintenance_update(scheduler);
// Add non-resource (node) actions
- for (iter = data_set->actions; iter != NULL; iter = iter->next) {
- pe_action_t *action = (pe_action_t *) iter->data;
+ for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
if ((action->rsc != NULL)
&& (action->node != NULL)
&& action->node->details->shutdown
- && !pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_maintenance)
&& !pcmk_any_flags_set(action->flags,
- pe_action_optional|pe_action_runnable)
- && pcmk__str_eq(action->task, RSC_STOP, pcmk__str_none)) {
+ pcmk_action_optional|pcmk_action_runnable)
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* Eventually we should just ignore the 'fence' case, but for now
* it's the best way to detect (in CTS) when CIB resource updates
* are being lost.
*/
- if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)
- || (data_set->no_quorum_policy == no_quorum_ignore)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
+ || (scheduler->no_quorum_policy == pcmk_no_quorum_ignore)) {
+ const bool managed = pcmk_is_set(action->rsc->flags,
+ pcmk_rsc_managed);
+ const bool failed = pcmk_is_set(action->rsc->flags,
+ pcmk_rsc_failed);
+
crm_crit("Cannot %s %s because of %s:%s%s (%s)",
action->node->details->unclean? "fence" : "shut down",
pe__node_name(action->node), action->rsc->id,
- pcmk_is_set(action->rsc->flags, pe_rsc_managed)? " blocked" : " unmanaged",
- pcmk_is_set(action->rsc->flags, pe_rsc_failed)? " failed" : "",
- action->uuid);
+ (managed? " blocked" : " unmanaged"),
+ (failed? " failed" : ""), action->uuid);
}
}
- add_action_to_graph((gpointer) action, (gpointer) data_set);
+ add_action_to_graph((gpointer) action, (gpointer) scheduler);
}
- crm_log_xml_trace(data_set->graph, "graph");
+ crm_log_xml_trace(scheduler->graph, "graph");
}
diff --git a/lib/pacemaker/pcmk_injections.c b/lib/pacemaker/pcmk_injections.c
index ea8fc17..f6b36e8 100644
--- a/lib/pacemaker/pcmk_injections.c
+++ b/lib/pacemaker/pcmk_injections.c
@@ -19,12 +19,12 @@
#include <dirent.h>
#include <crm/crm.h>
-#include <crm/lrmd.h> // lrmd_event_data_t, lrmd_free_event()
#include <crm/cib.h>
#include <crm/cib/internal.h>
#include <crm/common/util.h>
#include <crm/common/iso8601.h>
#include <crm/common/xml_internal.h>
+#include <crm/lrmd_events.h> // lrmd_event_data_t, etc.
#include <crm/lrmd_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
@@ -35,6 +35,7 @@ bool pcmk__simulate_node_config = false;
#define XPATH_NODE_CONFIG "//" XML_CIB_TAG_NODE "[@" XML_ATTR_UNAME "='%s']"
#define XPATH_NODE_STATE "//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']"
+#define XPATH_NODE_STATE_BY_ID "//" XML_CIB_TAG_STATE "[@" XML_ATTR_ID "='%s']"
#define XPATH_RSC_HISTORY XPATH_NODE_STATE \
"//" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']"
@@ -249,7 +250,7 @@ pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid)
}
if (found_uuid) {
- char *xpath_by_uuid = crm_strdup_printf("//" XML_CIB_TAG_STATE "[@" XML_ATTR_ID "='%s']",
+ char *xpath_by_uuid = crm_strdup_printf(XPATH_NODE_STATE_BY_ID,
found_uuid);
// It's possible that a node_state entry doesn't have an uname yet.
@@ -257,8 +258,8 @@ pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid)
cib_xpath|cib_sync_call|cib_scope_local);
if ((cib_object != NULL) && (ID(cib_object) == NULL)) {
- crm_err("Detected multiple node_state entries for xpath=%s, bailing",
- xpath_by_uuid);
+ crm_err("Can't inject node state for %s because multiple "
+ "state entries found for ID %s", node, found_uuid);
duplicate = true;
free(xpath_by_uuid);
goto done;
@@ -266,7 +267,8 @@ pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid)
} else if (cib_object != NULL) {
crm_xml_add(cib_object, XML_ATTR_UNAME, node);
- rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_STATUS, cib_object,
+ rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_STATUS,
+ cib_object,
cib_sync_call|cib_scope_local);
}
@@ -318,17 +320,17 @@ pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up)
if (up) {
pcmk__xe_set_props(cib_node,
- XML_NODE_IN_CLUSTER, XML_BOOLEAN_YES,
- XML_NODE_IS_PEER, ONLINESTATUS,
- XML_NODE_JOIN_STATE, CRMD_JOINSTATE_MEMBER,
- XML_NODE_EXPECTED, CRMD_JOINSTATE_MEMBER,
+ PCMK__XA_IN_CCM, XML_BOOLEAN_YES,
+ PCMK__XA_CRMD, ONLINESTATUS,
+ PCMK__XA_JOIN, CRMD_JOINSTATE_MEMBER,
+ PCMK__XA_EXPECTED, CRMD_JOINSTATE_MEMBER,
NULL);
} else {
pcmk__xe_set_props(cib_node,
- XML_NODE_IN_CLUSTER, XML_BOOLEAN_NO,
- XML_NODE_IS_PEER, OFFLINESTATUS,
- XML_NODE_JOIN_STATE, CRMD_JOINSTATE_DOWN,
- XML_NODE_EXPECTED, CRMD_JOINSTATE_DOWN,
+ PCMK__XA_IN_CCM, XML_BOOLEAN_NO,
+ PCMK__XA_CRMD, OFFLINESTATUS,
+ PCMK__XA_JOIN, CRMD_JOINSTATE_DOWN,
+ PCMK__XA_EXPECTED, CRMD_JOINSTATE_DOWN,
NULL);
}
crm_xml_add(cib_node, XML_ATTR_ORIGIN, crm_system_name);
@@ -400,8 +402,10 @@ pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
if ((rclass == NULL) || (rtype == NULL)) {
// @TODO query configuration for class, provider, type
- out->err(out, "Resource %s not found in the status section of %s."
- " Please supply the class and type to continue", resource, ID(cib_node));
+ out->err(out,
+ "Resource %s not found in the status section of %s "
+ "(supply class and type to continue)",
+ resource, ID(cib_node));
return NULL;
} else if (!pcmk__strcase_any_of(rclass,
@@ -479,7 +483,7 @@ find_ticket_state(pcmk__output_t *out, cib_t *the_cib, const char *ticket_id,
}
crm_log_xml_debug(xml_search, "Match");
- if (xml_has_children(xml_search) && (ticket_id != NULL)) {
+ if ((xml_search->children != NULL) && (ticket_id != NULL)) {
out->err(out, "Multiple ticket_states match ticket_id=%s", ticket_id);
}
*ticket_state_xml = xml_search;
@@ -548,11 +552,11 @@ set_ticket_state_attr(pcmk__output_t *out, const char *ticket_id,
* \param[in,out] out Output object for displaying error messages
* \param[in] spec Action specification to inject
* \param[in,out] cib CIB object for scheduler input
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*/
static void
inject_action(pcmk__output_t *out, const char *spec, cib_t *cib,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
int rc;
int outcome = PCMK_OCF_OK;
@@ -570,7 +574,7 @@ inject_action(pcmk__output_t *out, const char *spec, cib_t *cib,
xmlNode *cib_op = NULL;
xmlNode *cib_node = NULL;
xmlNode *cib_resource = NULL;
- const pe_resource_t *rsc = NULL;
+ const pcmk_resource_t *rsc = NULL;
lrmd_event_data_t *op = NULL;
out->message(out, "inject-spec", spec);
@@ -586,7 +590,7 @@ inject_action(pcmk__output_t *out, const char *spec, cib_t *cib,
parse_op_key(key, &resource, &task, &interval_ms);
- rsc = pe_find_resource(data_set->resources, resource);
+ rsc = pe_find_resource(scheduler->resources, resource);
if (rsc == NULL) {
out->err(out, "Invalid resource name: %s", resource);
goto done;
@@ -627,18 +631,18 @@ done:
* \internal
* \brief Inject fictitious scheduler inputs
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in,out] cib CIB object for scheduler input to modify
* \param[in] injections Injections to apply
*/
void
-pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
+pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib,
const pcmk_injections_t *injections)
{
int rc = pcmk_ok;
const GList *iter = NULL;
xmlNode *cib_node = NULL;
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
out->message(out, "inject-modify-config", injections->quorum,
injections->watchdog);
@@ -654,9 +658,9 @@ pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
if (injections->watchdog != NULL) {
rc = cib__update_node_attr(out, cib, cib_sync_call|cib_scope_local,
- XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL,
- XML_ATTR_HAVE_WATCHDOG, injections->watchdog,
- NULL, NULL);
+ XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL,
+ NULL, XML_ATTR_HAVE_WATCHDOG,
+ injections->watchdog, NULL, NULL);
CRM_ASSERT(rc == pcmk_rc_ok);
}
@@ -707,7 +711,7 @@ pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
out->message(out, "inject-modify-node", "Failing", node);
cib_node = pcmk__inject_node_state_change(cib, node, true);
- crm_xml_add(cib_node, XML_NODE_IN_CLUSTER, XML_BOOLEAN_NO);
+ crm_xml_add(cib_node, PCMK__XA_IN_CCM, XML_BOOLEAN_NO);
CRM_ASSERT(cib_node != NULL);
rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, cib_node,
@@ -753,7 +757,7 @@ pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
}
for (iter = injections->op_inject; iter != NULL; iter = iter->next) {
- inject_action(out, (const char *) iter->data, cib, data_set);
+ inject_action(out, (const char *) iter->data, cib, scheduler);
}
if (!out->is_quiet(out)) {
diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c
index 7379516..85001da 100644
--- a/lib/pacemaker/pcmk_output.c
+++ b/lib/pacemaker/pcmk_output.c
@@ -21,11 +21,11 @@
#include <stdint.h>
static char *
-colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons,
+colocations_header(pcmk_resource_t *rsc, pcmk__colocation_t *cons,
bool dependents) {
char *retval = NULL;
- if (cons->primary_role > RSC_ROLE_STARTED) {
+ if (cons->primary_role > pcmk_role_started) {
retval = crm_strdup_printf("%s (score=%s, %s role=%s, id=%s)",
rsc->id, pcmk_readable_score(cons->score),
(dependents? "needs" : "with"),
@@ -39,7 +39,7 @@ colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons,
}
static void
-colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc,
+colocations_xml_node(pcmk__output_t *out, pcmk_resource_t *rsc,
pcmk__colocation_t *cons) {
xmlNodePtr node = NULL;
@@ -47,26 +47,29 @@ colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc,
"id", cons->id,
"rsc", cons->dependent->id,
"with-rsc", cons->primary->id,
- "score", pcmk_readable_score(cons->score),
+ "score",
+ pcmk_readable_score(cons->score),
NULL);
if (cons->node_attribute) {
- xmlSetProp(node, (pcmkXmlStr) "node-attribute", (pcmkXmlStr) cons->node_attribute);
+ xmlSetProp(node, (pcmkXmlStr) "node-attribute",
+ (pcmkXmlStr) cons->node_attribute);
}
- if (cons->dependent_role != RSC_ROLE_UNKNOWN) {
+ if (cons->dependent_role != pcmk_role_unknown) {
xmlSetProp(node, (pcmkXmlStr) "rsc-role",
(pcmkXmlStr) role2text(cons->dependent_role));
}
- if (cons->primary_role != RSC_ROLE_UNKNOWN) {
+ if (cons->primary_role != pcmk_role_unknown) {
xmlSetProp(node, (pcmkXmlStr) "with-rsc-role",
(pcmkXmlStr) role2text(cons->primary_role));
}
}
static int
-do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
+do_locations_list_xml(pcmk__output_t *out, pcmk_resource_t *rsc,
+ bool add_header)
{
GList *lpc = NULL;
GList *list = rsc->rsc_location;
@@ -78,7 +81,7 @@ do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
GList *lpc2 = NULL;
for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
- pe_node_t *node = (pe_node_t *) lpc2->data;
+ pcmk_node_t *node = (pcmk_node_t *) lpc2->data;
if (add_header) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "locations");
@@ -88,7 +91,8 @@ do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
"node", node->details->uname,
"rsc", rsc->id,
"id", cons->id,
- "score", pcmk_readable_score(node->weight),
+ "score",
+ pcmk_readable_score(node->weight),
NULL);
}
}
@@ -100,18 +104,18 @@ do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
return rc;
}
-PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *",
- "pe_node_t *", "pe_node_t *", "pe_action_t *",
- "pe_action_t *")
+PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pcmk_resource_t *",
+ "pcmk_node_t *", "pcmk_node_t *", "pcmk_action_t *",
+ "pcmk_action_t *")
static int
rsc_action_item(pcmk__output_t *out, va_list args)
{
const char *change = va_arg(args, const char *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *origin = va_arg(args, pe_node_t *);
- pe_node_t *destination = va_arg(args, pe_node_t *);
- pe_action_t *action = va_arg(args, pe_action_t *);
- pe_action_t *source = va_arg(args, pe_action_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *origin = va_arg(args, pcmk_node_t *);
+ pcmk_node_t *destination = va_arg(args, pcmk_node_t *);
+ pcmk_action_t *action = va_arg(args, pcmk_action_t *);
+ pcmk_action_t *source = va_arg(args, pcmk_action_t *);
int len = 0;
char *reason = NULL;
@@ -126,25 +130,25 @@ rsc_action_item(pcmk__output_t *out, va_list args)
CRM_ASSERT(action);
CRM_ASSERT(destination != NULL || origin != NULL);
- if(source == NULL) {
+ if (source == NULL) {
source = action;
}
len = strlen(rsc->id);
- if(len > rsc_width) {
+ if (len > rsc_width) {
rsc_width = len + 2;
}
- if ((rsc->role > RSC_ROLE_STARTED)
- || (rsc->next_role > RSC_ROLE_UNPROMOTED)) {
+ if ((rsc->role > pcmk_role_started)
+ || (rsc->next_role > pcmk_role_unpromoted)) {
need_role = true;
}
- if(origin != NULL && destination != NULL && origin->details == destination->details) {
+ if (pe__same_node(origin, destination)) {
same_host = true;
}
- if(rsc->role == rsc->next_role) {
+ if (rsc->role == rsc->next_role) {
same_role = true;
}
@@ -202,41 +206,43 @@ rsc_action_item(pcmk__output_t *out, va_list args)
}
len = strlen(details);
- if(len > detail_width) {
+ if (len > detail_width) {
detail_width = len;
}
- if(source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) {
+ if ((source->reason != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_runnable)) {
reason = crm_strdup_printf("due to %s (blocked)", source->reason);
- } else if(source->reason) {
+ } else if (source->reason) {
reason = crm_strdup_printf("due to %s", source->reason);
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
reason = strdup("blocked");
}
- out->list_item(out, NULL, "%-8s %-*s ( %*s )%s%s", change, rsc_width,
- rsc->id, detail_width, details, reason ? " " : "", reason ? reason : "");
+ out->list_item(out, NULL, "%-8s %-*s ( %*s )%s%s",
+ change, rsc_width, rsc->id, detail_width, details,
+ ((reason == NULL)? "" : " "), pcmk__s(reason, ""));
free(details);
free(reason);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *",
- "pe_node_t *", "pe_node_t *", "pe_action_t *",
- "pe_action_t *")
+PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pcmk_resource_t *",
+ "pcmk_node_t *", "pcmk_node_t *", "pcmk_action_t *",
+ "pcmk_action_t *")
static int
rsc_action_item_xml(pcmk__output_t *out, va_list args)
{
const char *change = va_arg(args, const char *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *origin = va_arg(args, pe_node_t *);
- pe_node_t *destination = va_arg(args, pe_node_t *);
- pe_action_t *action = va_arg(args, pe_action_t *);
- pe_action_t *source = va_arg(args, pe_action_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *origin = va_arg(args, pcmk_node_t *);
+ pcmk_node_t *destination = va_arg(args, pcmk_node_t *);
+ pcmk_action_t *action = va_arg(args, pcmk_action_t *);
+ pcmk_action_t *source = va_arg(args, pcmk_action_t *);
char *change_str = NULL;
@@ -252,16 +258,16 @@ rsc_action_item_xml(pcmk__output_t *out, va_list args)
source = action;
}
- if ((rsc->role > RSC_ROLE_STARTED)
- || (rsc->next_role > RSC_ROLE_UNPROMOTED)) {
+ if ((rsc->role > pcmk_role_started)
+ || (rsc->next_role > pcmk_role_unpromoted)) {
need_role = true;
}
- if(origin != NULL && destination != NULL && origin->details == destination->details) {
+ if (pe__same_node(origin, destination)) {
same_host = true;
}
- if(rsc->role == rsc->next_role) {
+ if (rsc->role == rsc->next_role) {
same_role = true;
}
@@ -339,16 +345,17 @@ rsc_action_item_xml(pcmk__output_t *out, va_list args)
NULL);
}
- if (source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) {
+ if ((source->reason != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_runnable)) {
pcmk__xe_set_props(xml,
"reason", source->reason,
"blocked", "true",
NULL);
- } else if(source->reason) {
+ } else if (source->reason != NULL) {
crm_xml_add(xml, "reason", source->reason);
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
pcmk__xe_set_bool_attr(xml, "blocked", true);
}
@@ -356,29 +363,30 @@ rsc_action_item_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
/* We're listing constraints explicitly involving rsc, so use rsc->rsc_cons
* directly rather than rsc->cmds->this_with_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
char *hdr = NULL;
- PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources %s is colocated with", rsc->id);
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc,
+ "Resources %s is colocated with", rsc->id);
- if (pcmk_is_set(cons->primary->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->primary->flags, pcmk_rsc_detect_loop)) {
out->list_item(out, NULL, "%s (id=%s - loop)",
cons->primary->id, cons->id);
continue;
@@ -388,7 +396,7 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) {
out->list_item(out, NULL, "%s", hdr);
free(hdr);
- /* Empty list header just for indentation of information about this resource. */
+ // Empty list header for indentation of information about this resource
out->begin_list(out, NULL, NULL, NULL);
out->message(out, "locations-list", cons->primary);
@@ -404,26 +412,26 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
/* We're listing constraints explicitly involving rsc, so use rsc->rsc_cons
* directly rather than rsc->cmds->this_with_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
- if (pcmk_is_set(cons->primary->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->primary->flags, pcmk_rsc_detect_loop)) {
colocations_xml_node(out, cons->primary, cons);
continue;
}
@@ -440,15 +448,15 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
@@ -456,14 +464,15 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
* rsc->rsc_cons_lhs directly rather than
* rsc->cmds->with_this_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
char *hdr = NULL;
- PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources colocated with %s", rsc->id);
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources colocated with %s",
+ rsc->id);
- if (pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)) {
out->list_item(out, NULL, "%s (id=%s - loop)",
cons->dependent->id, cons->id);
continue;
@@ -473,7 +482,7 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
out->list_item(out, NULL, "%s", hdr);
free(hdr);
- /* Empty list header just for indentation of information about this resource. */
+ // Empty list header for indentation of information about this resource
out->begin_list(out, NULL, NULL, NULL);
out->message(out, "locations-list", cons->dependent);
@@ -489,15 +498,15 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
@@ -505,11 +514,11 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
* rsc->rsc_cons_lhs directly rather than
* rsc->cmds->with_this_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
- if (pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)) {
colocations_xml_node(out, cons->dependent, cons);
continue;
}
@@ -526,10 +535,10 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
+PCMK__OUTPUT_ARGS("locations-list", "pcmk_resource_t *")
static int
locations_list(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *lpc = NULL;
GList *list = rsc->rsc_location;
@@ -541,7 +550,7 @@ locations_list(pcmk__output_t *out, va_list args) {
GList *lpc2 = NULL;
for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
- pe_node_t *node = (pe_node_t *) lpc2->data;
+ pcmk_node_t *node = (pcmk_node_t *) lpc2->data;
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Locations");
out->list_item(out, NULL, "Node %s (score=%s, id=%s, rsc=%s)",
@@ -555,24 +564,23 @@ locations_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
+PCMK__OUTPUT_ARGS("locations-list", "pcmk_resource_t *")
static int
locations_list_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
return do_locations_list_xml(out, rsc, true);
}
-PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *",
- "pe_working_set_t *", "bool", "bool")
+PCMK__OUTPUT_ARGS("locations-and-colocations", "pcmk_resource_t *",
+ "bool", "bool")
static int
locations_and_colocations(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
bool force = va_arg(args, int);
- pcmk__unpack_constraints(data_set);
+ pcmk__unpack_constraints(rsc->cluster);
// Constraints apply to group/clone, not member/instance
if (!force) {
@@ -581,25 +589,24 @@ locations_and_colocations(pcmk__output_t *out, va_list args)
out->message(out, "locations-list", rsc);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rscs-colocated-with-list", rsc, recursive);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rsc-is-colocated-with-list", rsc, recursive);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *",
- "pe_working_set_t *", "bool", "bool")
+PCMK__OUTPUT_ARGS("locations-and-colocations", "pcmk_resource_t *",
+ "bool", "bool")
static int
locations_and_colocations_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
bool force = va_arg(args, int);
- pcmk__unpack_constraints(data_set);
+ pcmk__unpack_constraints(rsc->cluster);
// Constraints apply to group/clone, not member/instance
if (!force) {
@@ -609,17 +616,18 @@ locations_and_colocations_xml(pcmk__output_t *out, va_list args)
pcmk__output_xml_create_parent(out, "constraints", NULL);
do_locations_list_xml(out, rsc, false);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rscs-colocated-with-list", rsc, recursive);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rsc-is-colocated-with-list", rsc, recursive);
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *",
+ "const char *")
static int
health(pcmk__output_t *out, va_list args)
{
@@ -634,7 +642,8 @@ health(pcmk__output_t *out, va_list args)
pcmk__s(result, "unknown result"));
}
-PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *",
+ "const char *")
static int
health_text(pcmk__output_t *out, va_list args)
{
@@ -655,7 +664,8 @@ health_text(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
-PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *",
+ "const char *")
static int
health_xml(pcmk__output_t *out, va_list args)
{
@@ -890,7 +900,8 @@ dc_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *",
+ "const char *", "bool")
static int
crmadmin_node(pcmk__output_t *out, va_list args)
{
@@ -908,7 +919,8 @@ crmadmin_node(pcmk__output_t *out, va_list args)
}
}
-PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *",
+ "const char *", "bool")
static int
crmadmin_node_text(pcmk__output_t *out, va_list args)
{
@@ -925,7 +937,8 @@ crmadmin_node_text(pcmk__output_t *out, va_list args)
}
}
-PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *",
+ "const char *", "bool")
static int
crmadmin_node_xml(pcmk__output_t *out, va_list args)
{
@@ -942,13 +955,13 @@ crmadmin_node_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *",
+PCMK__OUTPUT_ARGS("digests", "const pcmk_resource_t *", "const pcmk_node_t *",
"const char *", "guint", "const op_digest_cache_t *")
static int
digests_text(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
- const pe_node_t *node = va_arg(args, const pe_node_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
+ const pcmk_node_t *node = va_arg(args, const pcmk_node_t *);
const char *task = va_arg(args, const char *);
guint interval_ms = va_arg(args, guint);
const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *);
@@ -960,7 +973,7 @@ digests_text(pcmk__output_t *out, va_list args)
if (interval_ms != 0) {
action_desc = crm_strdup_printf("%ums-interval %s action", interval_ms,
((task == NULL)? "unknown" : task));
- } else if (pcmk__str_eq(task, "monitor", pcmk__str_none)) {
+ } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
action_desc = strdup("probe action");
} else {
action_desc = crm_strdup_printf("%s action",
@@ -1012,13 +1025,13 @@ add_digest_xml(xmlNode *parent, const char *type, const char *digest,
}
}
-PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *",
+PCMK__OUTPUT_ARGS("digests", "const pcmk_resource_t *", "const pcmk_node_t *",
"const char *", "guint", "const op_digest_cache_t *")
static int
digests_xml(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
- const pe_node_t *node = va_arg(args, const pe_node_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
+ const pcmk_node_t *node = va_arg(args, const pcmk_node_t *);
const char *task = va_arg(args, const char *);
guint interval_ms = va_arg(args, guint);
const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *);
@@ -1028,7 +1041,8 @@ digests_xml(pcmk__output_t *out, va_list args)
xml = pcmk__output_create_xml_node(out, "digests",
"resource", pcmk__s(rsc->id, ""),
- "node", pcmk__s(node->details->uname, ""),
+ "node",
+ pcmk__s(node->details->uname, ""),
"task", pcmk__s(task, ""),
"interval", interval_s,
NULL);
@@ -1045,111 +1059,124 @@ digests_xml(pcmk__output_t *out, va_list args)
}
#define STOP_SANITY_ASSERT(lineno) do { \
- if(current && current->details->unclean) { \
+ if ((current != NULL) && current->details->unclean) { \
/* It will be a pseudo op */ \
- } else if(stop == NULL) { \
+ } else if (stop == NULL) { \
crm_err("%s:%d: No stop action exists for %s", \
__func__, lineno, rsc->id); \
CRM_ASSERT(stop != NULL); \
- } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
+ } else if (pcmk_is_set(stop->flags, pcmk_action_optional)) { \
crm_err("%s:%d: Action %s is still optional", \
__func__, lineno, stop->uuid); \
- CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
+ CRM_ASSERT(!pcmk_is_set(stop->flags, pcmk_action_optional));\
} \
- } while(0)
+ } while (0)
-PCMK__OUTPUT_ARGS("rsc-action", "pe_resource_t *", "pe_node_t *", "pe_node_t *")
+PCMK__OUTPUT_ARGS("rsc-action", "pcmk_resource_t *", "pcmk_node_t *",
+ "pcmk_node_t *")
static int
rsc_action_default(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *current = va_arg(args, pe_node_t *);
- pe_node_t *next = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *current = va_arg(args, pcmk_node_t *);
+ pcmk_node_t *next = va_arg(args, pcmk_node_t *);
GList *possible_matches = NULL;
char *key = NULL;
int rc = pcmk_rc_no_output;
bool moving = false;
- pe_node_t *start_node = NULL;
- pe_action_t *start = NULL;
- pe_action_t *stop = NULL;
- pe_action_t *promote = NULL;
- pe_action_t *demote = NULL;
+ pcmk_node_t *start_node = NULL;
+ pcmk_action_t *start = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *promote = NULL;
+ pcmk_action_t *demote = NULL;
+ pcmk_action_t *reason_op = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)
|| (current == NULL && next == NULL)) {
+ const bool managed = pcmk_is_set(rsc->flags, pcmk_rsc_managed);
+
pe_rsc_info(rsc, "Leave %s\t(%s%s)",
rsc->id, role2text(rsc->role),
- !pcmk_is_set(rsc->flags, pe_rsc_managed)? " unmanaged" : "");
+ (managed? "" : " unmanaged"));
return rc;
}
moving = (current != NULL) && (next != NULL)
- && (current->details != next->details);
+ && !pe__same_node(current, next);
- possible_matches = pe__resource_actions(rsc, next, RSC_START, false);
+ possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_START,
+ false);
if (possible_matches) {
start = possible_matches->data;
g_list_free(possible_matches);
}
- if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
+ if ((start == NULL)
+ || !pcmk_is_set(start->flags, pcmk_action_runnable)) {
start_node = NULL;
} else {
start_node = current;
}
- possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, false);
+ possible_matches = pe__resource_actions(rsc, start_node, PCMK_ACTION_STOP,
+ false);
if (possible_matches) {
stop = possible_matches->data;
g_list_free(possible_matches);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_stop_unexpected)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_unexpected)) {
/* The resource is multiply active with multiple-active set to
* stop_unexpected, and not stopping on its current node, but it should
* be stopping elsewhere.
*/
- possible_matches = pe__resource_actions(rsc, NULL, RSC_STOP, false);
+ possible_matches = pe__resource_actions(rsc, NULL, PCMK_ACTION_STOP,
+ false);
if (possible_matches != NULL) {
stop = possible_matches->data;
g_list_free(possible_matches);
}
}
- possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, false);
+ possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_PROMOTE,
+ false);
if (possible_matches) {
promote = possible_matches->data;
g_list_free(possible_matches);
}
- possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, false);
+ possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_DEMOTE,
+ false);
if (possible_matches) {
demote = possible_matches->data;
g_list_free(possible_matches);
}
if (rsc->role == rsc->next_role) {
- pe_action_t *migrate_op = NULL;
+ pcmk_action_t *migrate_op = NULL;
CRM_CHECK(next != NULL, return rc);
- possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, false);
+ possible_matches = pe__resource_actions(rsc, next,
+ PCMK_ACTION_MIGRATE_FROM,
+ false);
if (possible_matches) {
migrate_op = possible_matches->data;
}
if ((migrate_op != NULL) && (current != NULL)
- && pcmk_is_set(migrate_op->flags, pe_action_runnable)) {
+ && pcmk_is_set(migrate_op->flags, pcmk_action_runnable)) {
rc = out->message(out, "rsc-action-item", "Migrate", rsc, current,
next, start, NULL);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_reload)) {
rc = out->message(out, "rsc-action-item", "Reload", rsc, current,
next, start, NULL);
- } else if (start == NULL || pcmk_is_set(start->flags, pe_action_optional)) {
+ } else if ((start == NULL)
+ || pcmk_is_set(start->flags, pcmk_action_optional)) {
if ((demote != NULL) && (promote != NULL)
- && !pcmk_is_set(demote->flags, pe_action_optional)
- && !pcmk_is_set(promote->flags, pe_action_optional)) {
+ && !pcmk_is_set(demote->flags, pcmk_action_optional)
+ && !pcmk_is_set(promote->flags, pcmk_action_optional)) {
rc = out->message(out, "rsc-action-item", "Re-promote", rsc,
current, next, promote, demote);
} else {
@@ -1157,16 +1184,24 @@ rsc_action_default(pcmk__output_t *out, va_list args)
role2text(rsc->role), pe__node_name(next));
}
- } else if (!pcmk_is_set(start->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
+ if ((stop == NULL) || (stop->reason == NULL)) {
+ reason_op = start;
+ } else {
+ reason_op = stop;
+ }
rc = out->message(out, "rsc-action-item", "Stop", rsc, current,
- NULL, stop, (stop && stop->reason)? stop : start);
+ NULL, stop, reason_op);
STOP_SANITY_ASSERT(__LINE__);
} else if (moving && current) {
- rc = out->message(out, "rsc-action-item", pcmk_is_set(rsc->flags, pe_rsc_failed)? "Recover" : "Move",
- rsc, current, next, stop, NULL);
+ const bool failed = pcmk_is_set(rsc->flags, pcmk_rsc_failed);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ rc = out->message(out, "rsc-action-item",
+ (failed? "Recover" : "Move"), rsc, current, next,
+ stop, NULL);
+
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
rc = out->message(out, "rsc-action-item", "Recover", rsc, current,
NULL, stop, NULL);
STOP_SANITY_ASSERT(__LINE__);
@@ -1174,36 +1209,46 @@ rsc_action_default(pcmk__output_t *out, va_list args)
} else {
rc = out->message(out, "rsc-action-item", "Restart", rsc, current,
next, start, NULL);
- /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
+#if 0
+ /* @TODO This can be reached in situations that should really be
+ * "Start" (see for example the migrate-fail-7 regression test)
+ */
+ STOP_SANITY_ASSERT(__LINE__);
+#endif
}
g_list_free(possible_matches);
return rc;
}
- if(stop
- && (rsc->next_role == RSC_ROLE_STOPPED
- || (start && !pcmk_is_set(start->flags, pe_action_runnable)))) {
-
- GList *gIter = NULL;
+ if ((stop != NULL)
+ && ((rsc->next_role == pcmk_role_stopped)
+ || ((start != NULL)
+ && !pcmk_is_set(start->flags, pcmk_action_runnable)))) {
key = stop_key(rsc);
- for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
- pe_action_t *stop_op = NULL;
+ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = iter->data;
+ pcmk_action_t *stop_op = NULL;
+ reason_op = start;
possible_matches = find_actions(rsc->actions, key, node);
if (possible_matches) {
stop_op = possible_matches->data;
g_list_free(possible_matches);
}
- if (stop_op && (stop_op->flags & pe_action_runnable)) {
- STOP_SANITY_ASSERT(__LINE__);
+ if (stop_op != NULL) {
+ if (pcmk_is_set(stop_op->flags, pcmk_action_runnable)) {
+ STOP_SANITY_ASSERT(__LINE__);
+ }
+ if (stop_op->reason != NULL) {
+ reason_op = stop_op;
+ }
}
if (out->message(out, "rsc-action-item", "Stop", rsc, node, NULL,
- stop_op, (stop_op && stop_op->reason)? stop_op : start) == pcmk_rc_ok) {
+ stop_op, reason_op) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -1211,7 +1256,8 @@ rsc_action_default(pcmk__output_t *out, va_list args)
free(key);
} else if ((stop != NULL)
- && pcmk_all_flags_set(rsc->flags, pe_rsc_failed|pe_rsc_stop)) {
+ && pcmk_all_flags_set(rsc->flags,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed)) {
/* 'stop' may be NULL if the failure was ignored */
rc = out->message(out, "rsc-action-item", "Recover", rsc, current,
next, stop, start);
@@ -1222,26 +1268,28 @@ rsc_action_default(pcmk__output_t *out, va_list args)
stop, NULL);
STOP_SANITY_ASSERT(__LINE__);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_reload)) {
rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next,
start, NULL);
- } else if (stop != NULL && !pcmk_is_set(stop->flags, pe_action_optional)) {
+ } else if ((stop != NULL)
+ && !pcmk_is_set(stop->flags, pcmk_action_optional)) {
rc = out->message(out, "rsc-action-item", "Restart", rsc, current,
next, start, NULL);
STOP_SANITY_ASSERT(__LINE__);
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
CRM_LOG_ASSERT(current != NULL);
rc = out->message(out, "rsc-action-item", "Demote", rsc, current,
next, demote, NULL);
- } else if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->next_role == pcmk_role_promoted) {
CRM_LOG_ASSERT(next);
rc = out->message(out, "rsc-action-item", "Promote", rsc, current,
next, promote, NULL);
- } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
+ } else if ((rsc->role == pcmk_role_stopped)
+ && (rsc->next_role > pcmk_role_stopped)) {
rc = out->message(out, "rsc-action-item", "Start", rsc, current, next,
start, NULL);
}
@@ -1291,12 +1339,12 @@ node_action_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-info", "int", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *",
"const char *", "bool", "bool")
static int
node_info_default(pcmk__output_t *out, va_list args)
{
- int node_id = va_arg(args, int);
+ uint32_t node_id = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
const char *uuid = va_arg(args, const char *);
const char *state = va_arg(args, const char *);
@@ -1304,32 +1352,32 @@ node_info_default(pcmk__output_t *out, va_list args)
bool is_remote = (bool) va_arg(args, int);
return out->info(out,
- "Node %d: %s "
+ "Node %" PRIu32 ": %s "
"(uuid=%s, state=%s, have_quorum=%s, is_remote=%s)",
node_id, pcmk__s(node_name, "unknown"),
pcmk__s(uuid, "unknown"), pcmk__s(state, "unknown"),
pcmk__btoa(have_quorum), pcmk__btoa(is_remote));
}
-PCMK__OUTPUT_ARGS("node-info", "int", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *",
"const char *", "bool", "bool")
static int
node_info_xml(pcmk__output_t *out, va_list args)
{
- int node_id = va_arg(args, int);
+ uint32_t node_id = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
const char *uuid = va_arg(args, const char *);
const char *state = va_arg(args, const char *);
bool have_quorum = (bool) va_arg(args, int);
bool is_remote = (bool) va_arg(args, int);
- char *id_s = crm_strdup_printf("%d", node_id);
+ char *id_s = crm_strdup_printf("%" PRIu32, node_id);
pcmk__output_create_xml_node(out, "node-info",
"nodeid", id_s,
XML_ATTR_UNAME, node_name,
XML_ATTR_ID, uuid,
- XML_NODE_IS_PEER, state,
+ PCMK__XA_CRMD, state,
XML_ATTR_HAVE_QUORUM, pcmk__btoa(have_quorum),
XML_NODE_IS_REMOTE, pcmk__btoa(is_remote),
NULL);
@@ -1337,7 +1385,8 @@ node_info_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *",
+ "xmlNodePtr")
static int
inject_cluster_action(pcmk__output_t *out, va_list args)
{
@@ -1349,8 +1398,9 @@ inject_cluster_action(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- if(rsc) {
- out->list_item(out, NULL, "Cluster action: %s for %s on %s", task, ID(rsc), node);
+ if (rsc != NULL) {
+ out->list_item(out, NULL, "Cluster action: %s for %s on %s",
+ task, ID(rsc), node);
} else {
out->list_item(out, NULL, "Cluster action: %s on %s", task, node);
}
@@ -1358,7 +1408,8 @@ inject_cluster_action(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *",
+ "xmlNodePtr")
static int
inject_cluster_action_xml(pcmk__output_t *out, va_list args)
{
@@ -1638,8 +1689,8 @@ inject_pseudo_action(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- out->list_item(out, NULL, "Pseudo action: %s%s%s", task, node ? " on " : "",
- node ? node : "");
+ out->list_item(out, NULL, "Pseudo action: %s%s%s",
+ task, ((node == NULL)? "" : " on "), pcmk__s(node, ""));
return pcmk_rc_ok;
}
@@ -1728,14 +1779,14 @@ inject_rsc_action_xml(pcmk__output_t *out, va_list args)
retcode = pcmk_rc_ok; \
}
-PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "crm_exit_t",
"stonith_history_t *", "enum pcmk__fence_history", "uint32_t",
"uint32_t", "const char *", "GList *", "GList *")
int
pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
crm_exit_t history_rc = va_arg(args, crm_exit_t);
@@ -1750,39 +1801,43 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
int rc = pcmk_rc_no_output;
bool already_printed_failure = false;
- CHECK_RC(rc, out->message(out, "cluster-summary", data_set, pcmkd_state,
+ CHECK_RC(rc, out->message(out, "cluster-summary", scheduler, pcmkd_state,
section_opts, show_opts));
if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) {
- CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames,
+ CHECK_RC(rc, out->message(out, "node-list", scheduler->nodes, unames,
resources, show_opts, rc == pcmk_rc_ok));
}
/* Print resources section, if needed */
if (pcmk_is_set(section_opts, pcmk_section_resources)) {
- CHECK_RC(rc, out->message(out, "resource-list", data_set, show_opts,
+ CHECK_RC(rc, out->message(out, "resource-list", scheduler, show_opts,
true, unames, resources, rc == pcmk_rc_ok));
}
/* print Node Attributes section if requested */
if (pcmk_is_set(section_opts, pcmk_section_attributes)) {
- CHECK_RC(rc, out->message(out, "node-attribute-list", data_set,
- show_opts, rc == pcmk_rc_ok, unames, resources));
+ CHECK_RC(rc, out->message(out, "node-attribute-list", scheduler,
+ show_opts, (rc == pcmk_rc_ok), unames,
+ resources));
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
- if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) {
- CHECK_RC(rc, out->message(out, "node-summary", data_set, unames,
- resources, section_opts, show_opts, rc == pcmk_rc_ok));
+ if (pcmk_any_flags_set(section_opts,
+ pcmk_section_operations|pcmk_section_failcounts)) {
+ CHECK_RC(rc, out->message(out, "node-summary", scheduler, unames,
+ resources, section_opts, show_opts,
+ (rc == pcmk_rc_ok)));
}
/* If there were any failed actions, print them */
if (pcmk_is_set(section_opts, pcmk_section_failures)
- && xml_has_children(data_set->failed)) {
+ && (scheduler->failed != NULL)
+ && (scheduler->failed->children != NULL)) {
- CHECK_RC(rc, out->message(out, "failed-action-list", data_set, unames,
+ CHECK_RC(rc, out->message(out, "failed-action-list", scheduler, unames,
resources, show_opts, rc == pcmk_rc_ok));
}
@@ -1790,9 +1845,11 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
if (pcmk_is_set(section_opts, pcmk_section_fence_failed) &&
fence_history != pcmk__fence_history_none) {
if (history_rc == 0) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_eq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "failed-fencing-list",
stonith_history, unames, section_opts,
@@ -1811,12 +1868,13 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
/* Print tickets if requested */
if (pcmk_is_set(section_opts, pcmk_section_tickets)) {
- CHECK_RC(rc, out->message(out, "ticket-list", data_set, rc == pcmk_rc_ok));
+ CHECK_RC(rc, out->message(out, "ticket-list", scheduler,
+ (rc == pcmk_rc_ok)));
}
/* Print negative location constraints if requested */
if (pcmk_is_set(section_opts, pcmk_section_bans)) {
- CHECK_RC(rc, out->message(out, "ban-list", data_set, prefix, resources,
+ CHECK_RC(rc, out->message(out, "ban-list", scheduler, prefix, resources,
show_opts, rc == pcmk_rc_ok));
}
@@ -1832,17 +1890,22 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
out->end_list(out);
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_neq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "fencing-list", hp, unames,
section_opts, show_opts,
rc == pcmk_rc_ok));
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_pending,
+ NULL);
if (hp) {
CHECK_RC(rc, out->message(out, "pending-fencing-list", hp,
unames, section_opts, show_opts,
@@ -1854,14 +1917,14 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "crm_exit_t",
"stonith_history_t *", "enum pcmk__fence_history", "uint32_t",
"uint32_t", "const char *", "GList *", "GList *")
static int
cluster_status_xml(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
crm_exit_t history_rc = va_arg(args, crm_exit_t);
@@ -1873,12 +1936,12 @@ cluster_status_xml(pcmk__output_t *out, va_list args)
GList *unames = va_arg(args, GList *);
GList *resources = va_arg(args, GList *);
- out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts,
+ out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts,
show_opts);
/*** NODES ***/
if (pcmk_is_set(section_opts, pcmk_section_nodes)) {
- out->message(out, "node-list", data_set->nodes, unames, resources,
+ out->message(out, "node-list", scheduler->nodes, unames, resources,
show_opts, false);
}
@@ -1887,29 +1950,31 @@ cluster_status_xml(pcmk__output_t *out, va_list args)
/* XML output always displays full details. */
uint32_t full_show_opts = show_opts & ~pcmk_show_brief;
- out->message(out, "resource-list", data_set, full_show_opts,
+ out->message(out, "resource-list", scheduler, full_show_opts,
false, unames, resources, false);
}
/* print Node Attributes section if requested */
if (pcmk_is_set(section_opts, pcmk_section_attributes)) {
- out->message(out, "node-attribute-list", data_set, show_opts, false,
+ out->message(out, "node-attribute-list", scheduler, show_opts, false,
unames, resources);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
- if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) {
- out->message(out, "node-summary", data_set, unames,
+ if (pcmk_any_flags_set(section_opts,
+ pcmk_section_operations|pcmk_section_failcounts)) {
+ out->message(out, "node-summary", scheduler, unames,
resources, section_opts, show_opts, false);
}
/* If there were any failed actions, print them */
if (pcmk_is_set(section_opts, pcmk_section_failures)
- && xml_has_children(data_set->failed)) {
+ && (scheduler->failed != NULL)
+ && (scheduler->failed->children != NULL)) {
- out->message(out, "failed-action-list", data_set, unames, resources,
+ out->message(out, "failed-action-list", scheduler, unames, resources,
show_opts, false);
}
@@ -1922,26 +1987,26 @@ cluster_status_xml(pcmk__output_t *out, va_list args)
/* Print tickets if requested */
if (pcmk_is_set(section_opts, pcmk_section_tickets)) {
- out->message(out, "ticket-list", data_set, false);
+ out->message(out, "ticket-list", scheduler, false);
}
/* Print negative location constraints if requested */
if (pcmk_is_set(section_opts, pcmk_section_bans)) {
- out->message(out, "ban-list", data_set, prefix, resources, show_opts,
+ out->message(out, "ban-list", scheduler, prefix, resources, show_opts,
false);
}
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "crm_exit_t",
"stonith_history_t *", "enum pcmk__fence_history", "uint32_t",
"uint32_t", "const char *", "GList *", "GList *")
static int
cluster_status_html(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
crm_exit_t history_rc = va_arg(args, crm_exit_t);
@@ -1954,40 +2019,42 @@ cluster_status_html(pcmk__output_t *out, va_list args)
GList *resources = va_arg(args, GList *);
bool already_printed_failure = false;
- out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts,
+ out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts,
show_opts);
/*** NODE LIST ***/
if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) {
- out->message(out, "node-list", data_set->nodes, unames, resources,
+ out->message(out, "node-list", scheduler->nodes, unames, resources,
show_opts, false);
}
/* Print resources section, if needed */
if (pcmk_is_set(section_opts, pcmk_section_resources)) {
- out->message(out, "resource-list", data_set, show_opts, true, unames,
+ out->message(out, "resource-list", scheduler, show_opts, true, unames,
resources, false);
}
/* print Node Attributes section if requested */
if (pcmk_is_set(section_opts, pcmk_section_attributes)) {
- out->message(out, "node-attribute-list", data_set, show_opts, false,
+ out->message(out, "node-attribute-list", scheduler, show_opts, false,
unames, resources);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
- if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) {
- out->message(out, "node-summary", data_set, unames,
+ if (pcmk_any_flags_set(section_opts,
+ pcmk_section_operations|pcmk_section_failcounts)) {
+ out->message(out, "node-summary", scheduler, unames,
resources, section_opts, show_opts, false);
}
/* If there were any failed actions, print them */
if (pcmk_is_set(section_opts, pcmk_section_failures)
- && xml_has_children(data_set->failed)) {
+ && (scheduler->failed != NULL)
+ && (scheduler->failed->children != NULL)) {
- out->message(out, "failed-action-list", data_set, unames, resources,
+ out->message(out, "failed-action-list", scheduler, unames, resources,
show_opts, false);
}
@@ -1995,12 +2062,14 @@ cluster_status_html(pcmk__output_t *out, va_list args)
if (pcmk_is_set(section_opts, pcmk_section_fence_failed) &&
fence_history != pcmk__fence_history_none) {
if (history_rc == 0) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_eq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
- out->message(out, "failed-fencing-list", stonith_history, unames,
- section_opts, show_opts, false);
+ out->message(out, "failed-fencing-list", stonith_history,
+ unames, section_opts, show_opts, false);
}
} else {
out->begin_list(out, NULL, NULL, "Failed Fencing Actions");
@@ -2021,16 +2090,21 @@ cluster_status_html(pcmk__output_t *out, va_list args)
out->end_list(out);
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_neq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
out->message(out, "fencing-list", hp, unames, section_opts,
show_opts, false);
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_pending,
+ NULL);
if (hp) {
out->message(out, "pending-fencing-list", hp, unames,
section_opts, show_opts, false);
@@ -2040,12 +2114,12 @@ cluster_status_html(pcmk__output_t *out, va_list args)
/* Print tickets if requested */
if (pcmk_is_set(section_opts, pcmk_section_tickets)) {
- out->message(out, "ticket-list", data_set, false);
+ out->message(out, "ticket-list", scheduler, false);
}
/* Print negative location constraints if requested */
if (pcmk_is_set(section_opts, pcmk_section_bans)) {
- out->message(out, "ban-list", data_set, prefix, resources, show_opts,
+ out->message(out, "ban-list", scheduler, prefix, resources, show_opts,
false);
}
diff --git a/lib/pacemaker/pcmk_resource.c b/lib/pacemaker/pcmk_resource.c
index ee4c904..7a17838 100644
--- a/lib/pacemaker/pcmk_resource.c
+++ b/lib/pacemaker/pcmk_resource.c
@@ -28,8 +28,7 @@
"/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']"
static xmlNode *
-best_op(const pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+best_op(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
char *xpath = NULL;
xmlNode *history = NULL;
@@ -41,7 +40,7 @@ best_op(const pe_resource_t *rsc, const pe_node_t *node,
// Find node's resource history
xpath = crm_strdup_printf(XPATH_OP_HISTORY, node->details->uname, rsc->id);
- history = get_xpath_object(xpath, data_set->input, LOG_NEVER);
+ history = get_xpath_object(xpath, rsc->cluster->input, LOG_NEVER);
free(xpath);
// Examine each history entry
@@ -58,9 +57,10 @@ best_op(const pe_resource_t *rsc, const pe_node_t *node,
crm_element_value_ms(lrm_rsc_op, XML_LRM_ATTR_INTERVAL, &interval_ms);
effective_op = interval_ms == 0
- && pcmk__strcase_any_of(task, RSC_STATUS,
- RSC_START, RSC_PROMOTE,
- RSC_MIGRATED, NULL);
+ && pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_MIGRATE_FROM, NULL);
if (best == NULL) {
goto is_best;
@@ -71,7 +71,7 @@ best_op(const pe_resource_t *rsc, const pe_node_t *node,
if (!effective_op) {
continue;
}
- // Do not use an ineffective non-recurring op if there's a recurring one.
+ // Do not use an ineffective non-recurring op if there's a recurring one
} else if (best_interval != 0
&& !effective_op
&& interval_ms == 0) {
@@ -115,8 +115,8 @@ is_best:
* \return Standard Pacemaker return code
*/
int
-pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *overrides)
+pcmk__resource_digests(pcmk__output_t *out, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *overrides)
{
const char *task = NULL;
xmlNode *xml_op = NULL;
@@ -127,13 +127,13 @@ pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
if ((out == NULL) || (rsc == NULL) || (node == NULL)) {
return EINVAL;
}
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
// Only primitives get operation digests
return EOPNOTSUPP;
}
// Find XML of operation history to use
- xml_op = best_op(rsc, node, rsc->cluster);
+ xml_op = best_op(rsc, node);
// Generate an operation key
if (xml_op != NULL) {
@@ -141,7 +141,7 @@ pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
}
if (task == NULL) { // Assume start if no history is available
- task = RSC_START;
+ task = PCMK_ACTION_START;
interval_ms = 0;
}
@@ -155,9 +155,9 @@ pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
}
int
-pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *overrides,
- pe_working_set_t *data_set)
+pcmk_resource_digests(xmlNodePtr *xml, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *overrides,
+ pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
diff --git a/lib/pacemaker/pcmk_rule.c b/lib/pacemaker/pcmk_rule.c
index b8ca453..99c0b23 100644
--- a/lib/pacemaker/pcmk_rule.c
+++ b/lib/pacemaker/pcmk_rule.c
@@ -13,6 +13,7 @@
#include <crm/common/cib.h>
#include <crm/common/iso8601.h>
#include <crm/msg_xml.h>
+#include <crm/pengine/internal.h>
#include <crm/pengine/rules_internal.h>
#include <pacemaker-internal.h>
@@ -30,7 +31,7 @@ eval_date_expression(const xmlNode *expr, crm_time_t *now)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -42,51 +43,51 @@ eval_date_expression(const xmlNode *expr, crm_time_t *now)
/*!
* \internal
- * \brief Initialize the cluster working set for checking rules
+ * \brief Initialize scheduler data for checking rules
*
* Make our own copies of the CIB XML and date/time object, if they're not
* \c NULL. This way we don't have to take ownership of the objects passed via
* the API.
*
- * \param[in,out] out Output object
- * \param[in] input The CIB XML to check (if \c NULL, use current CIB)
- * \param[in] date Check whether the rule is in effect at this date
- * and time (if \c NULL, use current date and time)
- * \param[out] data_set Where to store the cluster working set
+ * \param[in,out] out Output object
+ * \param[in] input The CIB XML to check (if \c NULL, use current CIB)
+ * \param[in] date Check whether the rule is in effect at this date
+ * and time (if \c NULL, use current date and time)
+ * \param[out] scheduler Where to store initialized scheduler data
*
* \return Standard Pacemaker return code
*/
static int
init_rule_check(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
- pe_working_set_t **data_set)
+ pcmk_scheduler_t **scheduler)
{
- // Allows for cleaner syntax than dereferencing the data_set argument
- pe_working_set_t *new_data_set = NULL;
+ // Allows for cleaner syntax than dereferencing the scheduler argument
+ pcmk_scheduler_t *new_scheduler = NULL;
- new_data_set = pe_new_working_set();
- if (new_data_set == NULL) {
+ new_scheduler = pe_new_working_set();
+ if (new_scheduler == NULL) {
return ENOMEM;
}
- pe__set_working_set_flags(new_data_set,
- pe_flag_no_counts|pe_flag_no_compat);
+ pe__set_working_set_flags(new_scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
- // Populate the working set instance
+ // Populate the scheduler data
// Make our own copy of the given input or fetch the CIB and use that
if (input != NULL) {
- new_data_set->input = copy_xml(input);
- if (new_data_set->input == NULL) {
+ new_scheduler->input = copy_xml(input);
+ if (new_scheduler->input == NULL) {
out->err(out, "Failed to copy input XML");
- pe_free_working_set(new_data_set);
+ pe_free_working_set(new_scheduler);
return ENOMEM;
}
} else {
- int rc = cib__signon_query(out, NULL, &(new_data_set->input));
+ int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
if (rc != pcmk_rc_ok) {
- pe_free_working_set(new_data_set);
+ pe_free_working_set(new_scheduler);
return rc;
}
}
@@ -95,12 +96,12 @@ init_rule_check(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
// cluster_status() populates with the current time
if (date != NULL) {
// pcmk_copy_time() guarantees non-NULL
- new_data_set->now = pcmk_copy_time(date);
+ new_scheduler->now = pcmk_copy_time(date);
}
// Unpack everything
- cluster_status(new_data_set);
- *data_set = new_data_set;
+ cluster_status(new_scheduler);
+ *scheduler = new_scheduler;
return pcmk_rc_ok;
}
@@ -111,14 +112,14 @@ init_rule_check(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
* \internal
* \brief Check whether a given rule is in effect
*
- * \param[in] data_set Cluster working set
- * \param[in] rule_id The ID of the rule to check
- * \param[out] error Where to store a rule evaluation error message
+ * \param[in] scheduler Scheduler data
+ * \param[in] rule_id The ID of the rule to check
+ * \param[out] error Where to store a rule evaluation error message
*
* \return Standard Pacemaker return code
*/
static int
-eval_rule(pe_working_set_t *data_set, const char *rule_id, const char **error)
+eval_rule(pcmk_scheduler_t *scheduler, const char *rule_id, const char **error)
{
xmlNodePtr cib_constraints = NULL;
xmlNodePtr match = NULL;
@@ -130,7 +131,7 @@ eval_rule(pe_working_set_t *data_set, const char *rule_id, const char **error)
*error = NULL;
/* Rules are under the constraints node in the XML, so first find that. */
- cib_constraints = pcmk_find_cib_element(data_set->input,
+ cib_constraints = pcmk_find_cib_element(scheduler->input,
XML_CIB_TAG_CONSTRAINTS);
/* Get all rules matching the given ID that are also simple enough for us
@@ -215,7 +216,7 @@ eval_rule(pe_working_set_t *data_set, const char *rule_id, const char **error)
CRM_ASSERT(match != NULL);
CRM_ASSERT(find_expression_type(match) == time_expr);
- rc = eval_date_expression(match, data_set->now);
+ rc = eval_date_expression(match, scheduler->now);
if (rc == pcmk_rc_undetermined) {
/* pe__eval_date_expr() should return this only if something is
* malformed or missing
@@ -244,7 +245,7 @@ int
pcmk__check_rules(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
const char **rule_ids)
{
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
int rc = pcmk_rc_ok;
CRM_ASSERT(out != NULL);
@@ -254,14 +255,14 @@ pcmk__check_rules(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
return pcmk_rc_ok;
}
- rc = init_rule_check(out, input, date, &data_set);
+ rc = init_rule_check(out, input, date, &scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
for (const char **rule_id = rule_ids; *rule_id != NULL; rule_id++) {
const char *error = NULL;
- int last_rc = eval_rule(data_set, *rule_id, &error);
+ int last_rc = eval_rule(scheduler, *rule_id, &error);
out->message(out, "rule-check", *rule_id, last_rc, error);
@@ -270,7 +271,7 @@ pcmk__check_rules(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
}
}
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index 06d7f00..76b5584 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -14,6 +14,7 @@
#include <glib.h>
#include <crm/lrmd_internal.h>
+#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
@@ -27,11 +28,11 @@
*
* \return Action flags that should be used for orderings
*/
-static enum pe_action_flags
-action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
+static uint32_t
+action_flags_for_ordering(pcmk_action_t *action, const pcmk_node_t *node)
{
bool runnable = false;
- enum pe_action_flags flags;
+ uint32_t flags;
// For non-resource actions, return the action flags
if (action->rsc == NULL) {
@@ -50,7 +51,7 @@ action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
/* Otherwise (i.e., for clone resource actions on a specific node), first
* remember whether the non-node-specific action is runnable.
*/
- runnable = pcmk_is_set(flags, pe_action_runnable);
+ runnable = pcmk_is_set(flags, pcmk_action_runnable);
// Then recheck the resource method with the node
flags = action->rsc->cmds->action_flags(action, node);
@@ -63,9 +64,8 @@ action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
* function shouldn't be used for other types of constraints without
* changes. Not very satisfying, but it's logical and appears to work well.
*/
- if (runnable && !pcmk_is_set(flags, pe_action_runnable)) {
- pe__set_raw_action_flags(flags, action->rsc->id,
- pe_action_runnable);
+ if (runnable && !pcmk_is_set(flags, pcmk_action_runnable)) {
+ pe__set_raw_action_flags(flags, action->rsc->id, pcmk_action_runnable);
}
return flags;
}
@@ -89,18 +89,19 @@ action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
* \note It is the caller's responsibility to free the return value.
*/
static char *
-action_uuid_for_ordering(const char *first_uuid, const pe_resource_t *first_rsc)
+action_uuid_for_ordering(const char *first_uuid,
+ const pcmk_resource_t *first_rsc)
{
guint interval_ms = 0;
char *uuid = NULL;
char *rid = NULL;
char *first_task_str = NULL;
- enum action_tasks first_task = no_action;
- enum action_tasks remapped_task = no_action;
+ enum action_tasks first_task = pcmk_action_unspecified;
+ enum action_tasks remapped_task = pcmk_action_unspecified;
// Only non-notify actions for collective resources need remapping
- if ((strstr(first_uuid, "notify") != NULL)
- || (first_rsc->variant < pe_group)) {
+ if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
+ || (first_rsc->variant < pcmk_rsc_variant_group)) {
goto done;
}
@@ -112,39 +113,35 @@ action_uuid_for_ordering(const char *first_uuid, const pe_resource_t *first_rsc)
first_task = text2task(first_task_str);
switch (first_task) {
- case stop_rsc:
- case start_rsc:
- case action_notify:
- case action_promote:
- case action_demote:
+ case pcmk_action_stop:
+ case pcmk_action_start:
+ case pcmk_action_notify:
+ case pcmk_action_promote:
+ case pcmk_action_demote:
remapped_task = first_task + 1;
break;
- case stopped_rsc:
- case started_rsc:
- case action_notified:
- case action_promoted:
- case action_demoted:
+ case pcmk_action_stopped:
+ case pcmk_action_started:
+ case pcmk_action_notified:
+ case pcmk_action_promoted:
+ case pcmk_action_demoted:
remapped_task = first_task;
break;
- case monitor_rsc:
- case shutdown_crm:
- case stonith_node:
+ case pcmk_action_monitor:
+ case pcmk_action_shutdown:
+ case pcmk_action_fence:
break;
default:
crm_err("Unknown action '%s' in ordering", first_task_str);
break;
}
- if (remapped_task != no_action) {
- /* If a (clone) resource has notifications enabled, we want to order
- * relative to when all notifications have been sent for the remapped
- * task. Only outermost resources or those in bundles have
- * notifications.
+ if (remapped_task != pcmk_action_unspecified) {
+ /* If a clone or bundle has notifications enabled, the ordering will be
+ * relative to when notifications have been sent for the remapped task.
*/
- if (pcmk_is_set(first_rsc->flags, pe_rsc_notify)
- && ((first_rsc->parent == NULL)
- || (pe_rsc_is_clone(first_rsc)
- && (first_rsc->parent->variant == pe_container)))) {
+ if (pcmk_is_set(first_rsc->flags, pcmk_rsc_notify)
+ && (pe_rsc_is_clone(first_rsc) || pe_rsc_is_bundled(first_rsc))) {
uuid = pcmk__notify_key(rid, "confirmed-post",
task2text(remapped_task));
} else {
@@ -181,13 +178,14 @@ done:
*
* \return Actual action that should be used for the ordering
*/
-static pe_action_t *
-action_for_ordering(pe_action_t *action)
+static pcmk_action_t *
+action_for_ordering(pcmk_action_t *action)
{
- pe_action_t *result = action;
- pe_resource_t *rsc = action->rsc;
+ pcmk_action_t *result = action;
+ pcmk_resource_t *rsc = action->rsc;
- if ((rsc != NULL) && (rsc->variant >= pe_group) && (action->uuid != NULL)) {
+ if ((rsc != NULL) && (rsc->variant >= pcmk_rsc_variant_group)
+ && (action->uuid != NULL)) {
char *uuid = action_uuid_for_ordering(action->uuid, rsc);
result = find_first_action(rsc->actions, uuid, NULL, NULL);
@@ -203,6 +201,34 @@ action_for_ordering(pe_action_t *action)
/*!
* \internal
+ * \brief Wrapper for update_ordered_actions() method for readability
+ *
+ * \param[in,out] rsc Resource to call method for
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this
+ * node (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates
+ * (may include pcmk_action_optional to affect only
+ * mandatory actions, and pe_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return Group of enum pcmk__updated flags indicating what was updated
+ */
+static inline uint32_t
+update(pcmk_resource_t *rsc, pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
+ pcmk_scheduler_t *scheduler)
+{
+ return rsc->cmds->update_ordered_actions(first, then, node, flags, filter,
+ type, scheduler);
+}
+
+/*!
+ * \internal
* \brief Update flags for ordering's actions appropriately for ordering's flags
*
* \param[in,out] first First action in an ordering
@@ -210,16 +236,15 @@ action_for_ordering(pe_action_t *action)
* \param[in] first_flags Action flags for \p first for ordering purposes
* \param[in] then_flags Action flags for \p then for ordering purposes
* \param[in,out] order Action wrapper for \p first in ordering
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags
*/
static uint32_t
-update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
- enum pe_action_flags first_flags,
- enum pe_action_flags then_flags,
- pe_action_wrapper_t *order,
- pe_working_set_t *data_set)
+update_action_for_ordering_flags(pcmk_action_t *first, pcmk_action_t *then,
+ uint32_t first_flags, uint32_t then_flags,
+ pcmk__related_action_t *order,
+ pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
@@ -228,96 +253,90 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
* whole 'then' clone should restart if 'first' is restarted, so then->node
* is needed.
*/
- pe_node_t *node = then->node;
+ pcmk_node_t *node = then->node;
- if (pcmk_is_set(order->type, pe_order_implies_then_on_node)) {
+ if (pcmk_is_set(order->type, pcmk__ar_first_implies_same_node_then)) {
/* For unfencing, only instances of 'then' on the same node as 'first'
* (the unfencing operation) should restart, so reset node to
* first->node, at which point this case is handled like a normal
- * pe_order_implies_then.
+ * pcmk__ar_first_implies_then.
*/
- pe__clear_order_flags(order->type, pe_order_implies_then_on_node);
- pe__set_order_flags(order->type, pe_order_implies_then);
+ pe__clear_order_flags(order->type,
+ pcmk__ar_first_implies_same_node_then);
+ pe__set_order_flags(order->type, pcmk__ar_first_implies_then);
node = first->node;
pe_rsc_trace(then->rsc,
- "%s then %s: mapped pe_order_implies_then_on_node to "
- "pe_order_implies_then on %s",
+ "%s then %s: mapped pcmk__ar_first_implies_same_node_then "
+ "to pcmk__ar_first_implies_then on %s",
first->uuid, then->uuid, pe__node_name(node));
}
- if (pcmk_is_set(order->type, pe_order_implies_then)) {
+ if (pcmk_is_set(order->type, pcmk__ar_first_implies_then)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags & pe_action_optional,
- pe_action_optional,
- pe_order_implies_then,
- data_set);
- } else if (!pcmk_is_set(first_flags, pe_action_optional)
- && pcmk_is_set(then->flags, pe_action_optional)) {
- pe__clear_action_flags(then, pe_action_optional);
+ changed |= update(then->rsc, first, then, node,
+ first_flags & pcmk_action_optional,
+ pcmk_action_optional, pcmk__ar_first_implies_then,
+ scheduler);
+ } else if (!pcmk_is_set(first_flags, pcmk_action_optional)
+ && pcmk_is_set(then->flags, pcmk_action_optional)) {
+ pe__clear_action_flags(then, pcmk_action_optional);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_then",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_first_implies_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_restart) && (then->rsc != NULL)) {
- enum pe_action_flags restart = pe_action_optional|pe_action_runnable;
+ if (pcmk_is_set(order->type, pcmk__ar_intermediate_stop)
+ && (then->rsc != NULL)) {
+ enum pe_action_flags restart = pcmk_action_optional
+ |pcmk_action_runnable;
- changed |= then->rsc->cmds->update_ordered_actions(first, then, node,
- first_flags, restart,
- pe_order_restart,
- data_set);
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_restart",
+ changed |= update(then->rsc, first, then, node, first_flags, restart,
+ pcmk__ar_intermediate_stop, scheduler);
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_intermediate_stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_implies_first)) {
+ if (pcmk_is_set(order->type, pcmk__ar_then_implies_first)) {
if (first->rsc != NULL) {
- changed |= first->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_optional,
- pe_order_implies_first,
- data_set);
- } else if (!pcmk_is_set(first_flags, pe_action_optional)
- && pcmk_is_set(first->flags, pe_action_runnable)) {
- pe__clear_action_flags(first, pe_action_runnable);
+ changed |= update(first->rsc, first, then, node, first_flags,
+ pcmk_action_optional, pcmk__ar_then_implies_first,
+ scheduler);
+ } else if (!pcmk_is_set(first_flags, pcmk_action_optional)
+ && pcmk_is_set(first->flags, pcmk_action_runnable)) {
+ pe__clear_action_flags(first, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_first",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_promoted_implies_first)) {
+ if (pcmk_is_set(order->type, pcmk__ar_promoted_then_implies_first)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags & pe_action_optional,
- pe_action_optional,
- pe_order_promoted_implies_first,
- data_set);
+ changed |= update(then->rsc, first, then, node,
+ first_flags & pcmk_action_optional,
+ pcmk_action_optional,
+ pcmk__ar_promoted_then_implies_first, scheduler);
}
pe_rsc_trace(then->rsc,
- "%s then %s: %s after pe_order_promoted_implies_first",
+ "%s then %s: %s after pcmk__ar_promoted_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_one_or_more)) {
+ if (pcmk_is_set(order->type, pcmk__ar_min_runnable)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_one_or_more,
- data_set);
-
- } else if (pcmk_is_set(first_flags, pe_action_runnable)) {
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable, pcmk__ar_min_runnable,
+ scheduler);
+
+ } else if (pcmk_is_set(first_flags, pcmk_action_runnable)) {
// We have another runnable instance of "first"
then->runnable_before++;
@@ -325,145 +344,131 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
* "before" instances to be runnable, and they now are.
*/
if ((then->runnable_before >= then->required_runnable_before)
- && !pcmk_is_set(then->flags, pe_action_runnable)) {
+ && !pcmk_is_set(then->flags, pcmk_action_runnable)) {
- pe__set_action_flags(then, pe_action_runnable);
+ pe__set_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_one_or_more",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_min_runnable",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_probe) && (then->rsc != NULL)) {
- if (!pcmk_is_set(first_flags, pe_action_runnable)
- && (first->rsc->running_on != NULL)) {
+ if (pcmk_is_set(order->type, pcmk__ar_nested_remote_probe)
+ && (then->rsc != NULL)) {
+
+ if (!pcmk_is_set(first_flags, pcmk_action_runnable)
+ && (first->rsc != NULL) && (first->rsc->running_on != NULL)) {
pe_rsc_trace(then->rsc,
"%s then %s: ignoring because first is stopping",
first->uuid, then->uuid);
- order->type = pe_order_none;
+ order->type = (enum pe_ordering) pcmk__ar_none;
} else {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_runnable_left,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable,
+ pcmk__ar_unrunnable_first_blocks, scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_probe",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_nested_remote_probe",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_runnable_left)) {
+ if (pcmk_is_set(order->type, pcmk__ar_unrunnable_first_blocks)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_runnable_left,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable,
+ pcmk__ar_unrunnable_first_blocks, scheduler);
- } else if (!pcmk_is_set(first_flags, pe_action_runnable)
- && pcmk_is_set(then->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(first_flags, pcmk_action_runnable)
+ && pcmk_is_set(then->flags, pcmk_action_runnable)) {
- pe__clear_action_flags(then, pe_action_runnable);
+ pe__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_runnable_left",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_unrunnable_first_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_implies_first_migratable)) {
+ if (pcmk_is_set(order->type, pcmk__ar_unmigratable_then_blocks)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_optional,
- pe_order_implies_first_migratable,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_optional,
+ pcmk__ar_unmigratable_then_blocks, scheduler);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after "
- "pe_order_implies_first_migratable",
+ "pcmk__ar_unmigratable_then_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_pseudo_left)) {
+ if (pcmk_is_set(order->type, pcmk__ar_first_else_then)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_optional,
- pe_order_pseudo_left,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_optional, pcmk__ar_first_else_then,
+ scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_pseudo_left",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_first_else_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_optional)) {
+ if (pcmk_is_set(order->type, pcmk__ar_ordered)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_optional,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable, pcmk__ar_ordered,
+ scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_optional",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_ordered",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_asymmetrical)) {
+ if (pcmk_is_set(order->type, pcmk__ar_asymmetric)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_asymmetrical,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable, pcmk__ar_asymmetric,
+ scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_asymmetrical",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_asymmetric",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(first->flags, pe_action_runnable)
- && pcmk_is_set(order->type, pe_order_implies_then_printed)
- && !pcmk_is_set(first_flags, pe_action_optional)) {
+ if (pcmk_is_set(first->flags, pcmk_action_runnable)
+ && pcmk_is_set(order->type, pcmk__ar_first_implies_then_graphed)
+ && !pcmk_is_set(first_flags, pcmk_action_optional)) {
pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
then->uuid, first->uuid);
- pe__set_action_flags(then, pe_action_print_always);
+ pe__set_action_flags(then, pcmk_action_always_in_graph);
// Don't bother marking 'then' as changed just for this
}
- if (pcmk_is_set(order->type, pe_order_implies_first_printed)
- && !pcmk_is_set(then_flags, pe_action_optional)) {
+ if (pcmk_is_set(order->type, pcmk__ar_then_implies_first_graphed)
+ && !pcmk_is_set(then_flags, pcmk_action_optional)) {
pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
first->uuid, then->uuid);
- pe__set_action_flags(first, pe_action_print_always);
+ pe__set_action_flags(first, pcmk_action_always_in_graph);
// Don't bother marking 'first' as changed just for this
}
- if (pcmk_any_flags_set(order->type, pe_order_implies_then
- |pe_order_implies_first
- |pe_order_restart)
+ if (pcmk_any_flags_set(order->type, pcmk__ar_first_implies_then
+ |pcmk__ar_then_implies_first
+ |pcmk__ar_intermediate_stop)
&& (first->rsc != NULL)
- && !pcmk_is_set(first->rsc->flags, pe_rsc_managed)
- && pcmk_is_set(first->rsc->flags, pe_rsc_block)
- && !pcmk_is_set(first->flags, pe_action_runnable)
- && pcmk__str_eq(first->task, RSC_STOP, pcmk__str_casei)) {
+ && !pcmk_is_set(first->rsc->flags, pcmk_rsc_managed)
+ && pcmk_is_set(first->rsc->flags, pcmk_rsc_blocked)
+ && !pcmk_is_set(first->flags, pcmk_action_runnable)
+ && pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
- if (pcmk_is_set(then->flags, pe_action_runnable)) {
- pe__clear_action_flags(then, pe_action_runnable);
+ if (pcmk_is_set(then->flags, pcmk_action_runnable)) {
+ pe__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after checking whether first "
@@ -478,13 +483,13 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
// Convenience macros for logging action properties
#define action_type_str(flags) \
- (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action")
+ (pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
- (pcmk_is_set((flags), pe_action_optional)? "optional" : "required")
+ (pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
- (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable")
+ (pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
@@ -493,11 +498,12 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
* \internal
* \brief Update an action's flags for all orderings where it is "then"
*
- * \param[in,out] then Action to update
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] then Action to update
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
+pcmk__update_action_for_orderings(pcmk_action_t *then,
+ pcmk_scheduler_t *scheduler)
{
GList *lpc = NULL;
uint32_t changed = pcmk__updated_none;
@@ -508,7 +514,7 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
action_optional_str(then->flags),
action_runnable_str(then->flags), action_node_str(then));
- if (pcmk_is_set(then->flags, pe_action_requires_any)) {
+ if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
/* Initialize current known "runnable before" actions. As
* update_action_for_ordering_flags() is called for each of then's
* before actions, this number will increment as runnable 'first'
@@ -523,22 +529,23 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
then->required_runnable_before = 1;
}
- /* The pe_order_one_or_more clause of update_action_for_ordering_flags()
- * (called below) will reset runnable if appropriate.
+ /* The pcmk__ar_min_runnable clause of
+ * update_action_for_ordering_flags() (called below)
+ * will reset runnable if appropriate.
*/
- pe__clear_action_flags(then, pe_action_runnable);
+ pe__clear_action_flags(then, pcmk_action_runnable);
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
- pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
- pe_action_t *first = other->action;
+ pcmk__related_action_t *other = lpc->data;
+ pcmk_action_t *first = other->action;
- pe_node_t *then_node = then->node;
- pe_node_t *first_node = first->node;
+ pcmk_node_t *then_node = then->node;
+ pcmk_node_t *first_node = first->node;
if ((first->rsc != NULL)
- && (first->rsc->variant == pe_group)
- && pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
+ && (first->rsc->variant == pcmk_rsc_variant_group)
+ && pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
if (first_node != NULL) {
@@ -548,8 +555,8 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
}
if ((then->rsc != NULL)
- && (then->rsc->variant == pe_group)
- && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
+ && (then->rsc->variant == pcmk_rsc_variant_group)
+ && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
if (then_node != NULL) {
@@ -559,30 +566,31 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
}
// Disable constraint if it only applies when on same node, but isn't
- if (pcmk_is_set(other->type, pe_order_same_node)
+ if (pcmk_is_set(other->type, pcmk__ar_if_on_same_node)
&& (first_node != NULL) && (then_node != NULL)
- && (first_node->details != then_node->details)) {
+ && !pe__same_node(first_node, then_node)) {
pe_rsc_trace(then->rsc,
- "Disabled ordering %s on %s then %s on %s: not same node",
+ "Disabled ordering %s on %s then %s on %s: "
+ "not same node",
other->action->uuid, pe__node_name(first_node),
then->uuid, pe__node_name(then_node));
- other->type = pe_order_none;
+ other->type = (enum pe_ordering) pcmk__ar_none;
continue;
}
pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
if ((first->rsc != NULL)
- && pcmk_is_set(other->type, pe_order_then_cancels_first)
- && !pcmk_is_set(then->flags, pe_action_optional)) {
+ && pcmk_is_set(other->type, pcmk__ar_then_cancels_first)
+ && !pcmk_is_set(then->flags, pcmk_action_optional)) {
/* 'then' is required, so we must abandon 'first'
* (e.g. a required stop cancels any agent reload).
*/
- pe__set_action_flags(other->action, pe_action_optional);
- if (!strcmp(first->task, CRMD_ACTION_RELOAD_AGENT)) {
- pe__clear_resource_flags(first->rsc, pe_rsc_reload);
+ pe__set_action_flags(other->action, pcmk_action_optional);
+ if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
+ pe__clear_resource_flags(first->rsc, pcmk_rsc_reload);
}
}
@@ -605,14 +613,14 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
* could mean it is a non-resource action, a primitive resource
* action, or already expanded.
*/
- enum pe_action_flags first_flags, then_flags;
+ uint32_t first_flags, then_flags;
first_flags = action_flags_for_ordering(first, then_node);
then_flags = action_flags_for_ordering(then, first_node);
changed |= update_action_for_ordering_flags(first, then,
first_flags, then_flags,
- other, data_set);
+ other, scheduler);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
@@ -626,7 +634,7 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
"Disabled ordering %s then %s in favor of %s then %s",
other->action->uuid, then->uuid, first->uuid,
then->uuid);
- other->type = pe_order_none;
+ other->type = (enum pe_ordering) pcmk__ar_none;
}
@@ -635,15 +643,15 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
"because it changed", first->uuid);
for (GList *lpc2 = first->actions_after; lpc2 != NULL;
lpc2 = lpc2->next) {
- pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data;
+ pcmk__related_action_t *other = lpc2->data;
- pcmk__update_action_for_orderings(other->action, data_set);
+ pcmk__update_action_for_orderings(other->action, scheduler);
}
- pcmk__update_action_for_orderings(first, data_set);
+ pcmk__update_action_for_orderings(first, scheduler);
}
}
- if (pcmk_is_set(then->flags, pe_action_requires_any)) {
+ if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
if (last_flags == then->flags) {
pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
} else {
@@ -654,23 +662,24 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
if (pcmk_is_set(changed, pcmk__updated_then)) {
crm_trace("Re-processing %s and its 'after' actions because it changed",
then->uuid);
- if (pcmk_is_set(last_flags, pe_action_runnable)
- && !pcmk_is_set(then->flags, pe_action_runnable)) {
- pcmk__block_colocation_dependents(then, data_set);
+ if (pcmk_is_set(last_flags, pcmk_action_runnable)
+ && !pcmk_is_set(then->flags, pcmk_action_runnable)) {
+ pcmk__block_colocation_dependents(then);
}
- pcmk__update_action_for_orderings(then, data_set);
+ pcmk__update_action_for_orderings(then, scheduler);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
- pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
+ pcmk__related_action_t *other = lpc->data;
- pcmk__update_action_for_orderings(other->action, data_set);
+ pcmk__update_action_for_orderings(other->action, scheduler);
}
}
}
static inline bool
-is_primitive_action(const pe_action_t *action)
+is_primitive_action(const pcmk_action_t *action)
{
- return action && action->rsc && (action->rsc->variant == pe_native);
+ return (action != NULL) && (action->rsc != NULL)
+ && (action->rsc->variant == pcmk_rsc_variant_primitive);
}
/*!
@@ -686,8 +695,7 @@ is_primitive_action(const pe_action_t *action)
pe__clear_action_flags(action, flag); \
if ((action)->rsc != (reason)->rsc) { \
char *reason_text = pe__action2reason((reason), (flag)); \
- pe_action_set_reason((action), reason_text, \
- ((flag) == pe_action_migrate_runnable)); \
+ pe_action_set_reason((action), reason_text, false); \
free(reason_text); \
} \
} \
@@ -704,27 +712,28 @@ is_primitive_action(const pe_action_t *action)
* \param[in,out] then 'Then' action in an asymmetric ordering
*/
static void
-handle_asymmetric_ordering(const pe_action_t *first, pe_action_t *then)
+handle_asymmetric_ordering(const pcmk_action_t *first, pcmk_action_t *then)
{
/* Only resource actions after an unrunnable 'first' action need updates for
* asymmetric ordering.
*/
- if ((then->rsc == NULL) || pcmk_is_set(first->flags, pe_action_runnable)) {
+ if ((then->rsc == NULL)
+ || pcmk_is_set(first->flags, pcmk_action_runnable)) {
return;
}
// Certain optional 'then' actions are unaffected by unrunnable 'first'
- if (pcmk_is_set(then->flags, pe_action_optional)) {
+ if (pcmk_is_set(then->flags, pcmk_action_optional)) {
enum rsc_role_e then_rsc_role = then->rsc->fns->state(then->rsc, TRUE);
- if ((then_rsc_role == RSC_ROLE_STOPPED)
- && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_none)) {
+ if ((then_rsc_role == pcmk_role_stopped)
+ && pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* If 'then' should stop after 'first' but is already stopped, the
* ordering is irrelevant.
*/
return;
- } else if ((then_rsc_role >= RSC_ROLE_STARTED)
- && pcmk__str_eq(then->task, RSC_START, pcmk__str_none)
+ } else if ((then_rsc_role >= pcmk_role_started)
+ && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
&& pe__rsc_running_on_only(then->rsc, then->node)) {
/* Similarly if 'then' should start after 'first' but is already
* started on a single node.
@@ -734,8 +743,8 @@ handle_asymmetric_ordering(const pe_action_t *first, pe_action_t *then)
}
// 'First' can't run, so 'then' can't either
- clear_action_flag_because(then, pe_action_optional, first);
- clear_action_flag_because(then, pe_action_runnable, first);
+ clear_action_flag_because(then, pcmk_action_optional, first);
+ clear_action_flag_because(then, pcmk_action_runnable, first);
}
/*!
@@ -750,7 +759,8 @@ handle_asymmetric_ordering(const pe_action_t *first, pe_action_t *then)
* "stop later group member before stopping earlier group member"
*/
static void
-handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
+handle_restart_ordering(pcmk_action_t *first, pcmk_action_t *then,
+ uint32_t filter)
{
const char *reason = NULL;
@@ -760,17 +770,17 @@ handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
// We need to update the action in two cases:
// ... if 'then' is required
- if (pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(then->flags, pe_action_optional)) {
+ if (pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(then->flags, pcmk_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable action on same resource (if a resource
* should restart but can't start, we still want to stop)
*/
- if (pcmk_is_set(filter, pe_action_runnable)
- && !pcmk_is_set(then->flags, pe_action_runnable)
- && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
+ if (pcmk_is_set(filter, pcmk_action_runnable)
+ && !pcmk_is_set(then->flags, pcmk_action_runnable)
+ && pcmk_is_set(then->rsc->flags, pcmk_rsc_managed)
&& (first->rsc == then->rsc)) {
reason = "stop";
}
@@ -783,24 +793,24 @@ handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
- if (pcmk_is_set(first->flags, pe_action_runnable)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (pcmk_is_set(first->flags, pcmk_action_runnable)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' required if 'then' is required
- if (!pcmk_is_set(then->flags, pe_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' unmigratable if 'then' is unmigratable
- if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
- clear_action_flag_because(first, pe_action_migrate_runnable, then);
+ if (!pcmk_is_set(then->flags, pcmk_action_migratable)) {
+ clear_action_flag_because(first, pcmk_action_migratable, then);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
- if (!pcmk_is_set(first->flags, pe_action_optional)
- && !pcmk_is_set(first->flags, pe_action_runnable)) {
- clear_action_flag_because(then, pe_action_runnable, first);
+ if (!pcmk_is_set(first->flags, pcmk_action_optional)
+ && !pcmk_is_set(first->flags, pcmk_action_runnable)) {
+ clear_action_flag_because(then, pcmk_action_runnable, first);
}
}
@@ -812,104 +822,107 @@ handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this node
- * (ignored)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this node
+ * (ignored)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates (may
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
-pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
- uint32_t then_flags = then->flags;
- uint32_t first_flags = first->flags;
+ uint32_t then_flags = 0U;
+ uint32_t first_flags = 0U;
+
+ CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
- if (pcmk_is_set(type, pe_order_asymmetrical)) {
+ then_flags = then->flags;
+ first_flags = first->flags;
+ if (pcmk_is_set(type, pcmk__ar_asymmetric)) {
handle_asymmetric_ordering(first, then);
}
- if (pcmk_is_set(type, pe_order_implies_first)
- && !pcmk_is_set(then_flags, pe_action_optional)) {
+ if (pcmk_is_set(type, pcmk__ar_then_implies_first)
+ && !pcmk_is_set(then_flags, pcmk_action_optional)) {
// Then is required, and implies first should be, too
- if (pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(flags, pe_action_optional)
- && pcmk_is_set(first_flags, pe_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(flags, pcmk_action_optional)
+ && pcmk_is_set(first_flags, pcmk_action_optional)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
- if (pcmk_is_set(flags, pe_action_migrate_runnable)
- && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
- clear_action_flag_because(first, pe_action_migrate_runnable, then);
+ if (pcmk_is_set(flags, pcmk_action_migratable)
+ && !pcmk_is_set(then->flags, pcmk_action_migratable)) {
+ clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
- if (pcmk_is_set(type, pe_order_promoted_implies_first)
- && (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)
- && pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(then->flags, pe_action_optional)) {
+ if (pcmk_is_set(type, pcmk__ar_promoted_then_implies_first)
+ && (then->rsc != NULL) && (then->rsc->role == pcmk_role_promoted)
+ && pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(then->flags, pcmk_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ clear_action_flag_because(first, pcmk_action_optional, then);
- if (pcmk_is_set(first->flags, pe_action_migrate_runnable)
- && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
- clear_action_flag_because(first, pe_action_migrate_runnable,
- then);
+ if (pcmk_is_set(first->flags, pcmk_action_migratable)
+ && !pcmk_is_set(then->flags, pcmk_action_migratable)) {
+ clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
- if (pcmk_is_set(type, pe_order_implies_first_migratable)
- && pcmk_is_set(filter, pe_action_optional)) {
+ if (pcmk_is_set(type, pcmk__ar_unmigratable_then_blocks)
+ && pcmk_is_set(filter, pcmk_action_optional)) {
- if (!pcmk_all_flags_set(then->flags,
- pe_action_migrate_runnable|pe_action_runnable)) {
- clear_action_flag_because(first, pe_action_runnable, then);
+ if (!pcmk_all_flags_set(then->flags, pcmk_action_migratable
+ |pcmk_action_runnable)) {
+ clear_action_flag_because(first, pcmk_action_runnable, then);
}
- if (!pcmk_is_set(then->flags, pe_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
}
- if (pcmk_is_set(type, pe_order_pseudo_left)
- && pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(first->flags, pe_action_runnable)) {
+ if (pcmk_is_set(type, pcmk__ar_first_else_then)
+ && pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(first->flags, pcmk_action_runnable)) {
- clear_action_flag_because(then, pe_action_migrate_runnable, first);
- pe__clear_action_flags(then, pe_action_pseudo);
+ clear_action_flag_because(then, pcmk_action_migratable, first);
+ pe__clear_action_flags(then, pcmk_action_pseudo);
}
- if (pcmk_is_set(type, pe_order_runnable_left)
- && pcmk_is_set(filter, pe_action_runnable)
- && pcmk_is_set(then->flags, pe_action_runnable)
- && !pcmk_is_set(flags, pe_action_runnable)) {
+ if (pcmk_is_set(type, pcmk__ar_unrunnable_first_blocks)
+ && pcmk_is_set(filter, pcmk_action_runnable)
+ && pcmk_is_set(then->flags, pcmk_action_runnable)
+ && !pcmk_is_set(flags, pcmk_action_runnable)) {
- clear_action_flag_because(then, pe_action_runnable, first);
- clear_action_flag_because(then, pe_action_migrate_runnable, first);
+ clear_action_flag_because(then, pcmk_action_runnable, first);
+ clear_action_flag_because(then, pcmk_action_migratable, first);
}
- if (pcmk_is_set(type, pe_order_implies_then)
- && pcmk_is_set(filter, pe_action_optional)
- && pcmk_is_set(then->flags, pe_action_optional)
- && !pcmk_is_set(flags, pe_action_optional)
- && !pcmk_is_set(first->flags, pe_action_migrate_runnable)) {
+ if (pcmk_is_set(type, pcmk__ar_first_implies_then)
+ && pcmk_is_set(filter, pcmk_action_optional)
+ && pcmk_is_set(then->flags, pcmk_action_optional)
+ && !pcmk_is_set(flags, pcmk_action_optional)
+ && !pcmk_is_set(first->flags, pcmk_action_migratable)) {
- clear_action_flag_because(then, pe_action_optional, first);
+ clear_action_flag_because(then, pcmk_action_optional, first);
}
- if (pcmk_is_set(type, pe_order_restart)) {
+ if (pcmk_is_set(type, pcmk__ar_intermediate_stop)) {
handle_restart_ordering(first, then, filter);
}
@@ -923,7 +936,7 @@ pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
if ((then->rsc != NULL) && (then->rsc->parent != NULL)) {
// Required to handle "X_stop then X_start" for cloned groups
- pcmk__update_action_for_orderings(then, data_set);
+ pcmk__update_action_for_orderings(then, scheduler);
}
}
@@ -948,7 +961,8 @@ pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
* \param[in] details If true, recursively log dependent actions
*/
void
-pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
+pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
+ bool details)
{
const char *node_uname = NULL;
const char *node_uuid = NULL;
@@ -956,7 +970,7 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
CRM_CHECK(action != NULL, return);
- if (!pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) {
if (action->node != NULL) {
node_uname = action->node->details->uname;
node_uuid = action->node->details->id;
@@ -966,16 +980,14 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
}
switch (text2task(action->task)) {
- case stonith_node:
- case shutdown_crm:
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ case pcmk_action_fence:
+ case pcmk_action_shutdown:
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
- } else if (pcmk_is_set(action->flags, pe_action_processed)) {
- desc = "";
} else {
desc = "(Provisional) ";
}
@@ -988,14 +1000,12 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
(node_uuid? ")" : ""));
break;
default:
- if (pcmk_is_set(action->flags, pe_action_optional)) {
+ if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
- } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
- } else if (pcmk_is_set(action->flags, pe_action_processed)) {
- desc = "";
} else {
desc = "(Provisional) ";
}
@@ -1012,16 +1022,16 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
if (details) {
const GList *iter = NULL;
- const pe_action_wrapper_t *other = NULL;
+ const pcmk__related_action_t *other = NULL;
crm_trace("\t\t====== Preceding Actions");
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
- other = (const pe_action_wrapper_t *) iter->data;
+ other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== Subsequent Actions");
for (iter = action->actions_after; iter != NULL; iter = iter->next) {
- other = (const pe_action_wrapper_t *) iter->data;
+ other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== End");
@@ -1041,19 +1051,19 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
*
* \return Newly created shutdown action for \p node
*/
-pe_action_t *
-pcmk__new_shutdown_action(pe_node_t *node)
+pcmk_action_t *
+pcmk__new_shutdown_action(pcmk_node_t *node)
{
char *shutdown_id = NULL;
- pe_action_t *shutdown_op = NULL;
+ pcmk_action_t *shutdown_op = NULL;
CRM_ASSERT(node != NULL);
- shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN,
+ shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
node->details->uname);
- shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN, node, FALSE,
- TRUE, node->details->data_set);
+ shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
+ node, FALSE, node->details->data_set);
pcmk__order_stops_before_shutdown(node, shutdown_op);
add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
@@ -1136,17 +1146,17 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
* only ever get results for actions scheduled by us, so we can reasonably
* assume any "reload" is actually a pre-1.1 agent reload.
*/
- if (pcmk__str_any_of(task, CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT,
+ if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
NULL)) {
if (op->op_status == PCMK_EXEC_DONE) {
- task = CRMD_ACTION_START;
+ task = PCMK_ACTION_START;
} else {
- task = CRMD_ACTION_STATUS;
+ task = PCMK_ACTION_MONITOR;
}
}
key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
- if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_none)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = crm_meta_value(op->params, "notify_type");
const char *n_task = crm_meta_value(op->params, "notify_operation");
@@ -1166,8 +1176,8 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
/* Migration history is preserved separately, which usually matters for
* multiple nodes and is important for future cluster transitions.
*/
- } else if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
+ } else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
op_id = strdup(key);
} else if (did_rsc_op_fail(op, target_rc)) {
@@ -1212,8 +1222,8 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic);
- crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason);
- crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */
+ crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, pcmk__s(exit_reason, ""));
+ crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); // For context during triage
crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id);
crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc);
@@ -1241,7 +1251,8 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
}
}
- if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
+ if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
/*
* Record migrate_source and migrate_target always for migrate ops.
*/
@@ -1287,12 +1298,11 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
* otherwise false
*/
bool
-pcmk__action_locks_rsc_to_node(const pe_action_t *action)
+pcmk__action_locks_rsc_to_node(const pcmk_action_t *action)
{
// Only resource actions taking place on resource's lock node are locked
if ((action == NULL) || (action->rsc == NULL)
- || (action->rsc->lock_node == NULL) || (action->node == NULL)
- || (action->node->details != action->rsc->lock_node->details)) {
+ || !pe__same_node(action->node, action->rsc->lock_node)) {
return false;
}
@@ -1300,7 +1310,7 @@ pcmk__action_locks_rsc_to_node(const pe_action_t *action)
* a demote would cause the controller to clear the lock)
*/
if (action->node->details->shutdown && (action->task != NULL)
- && (strcmp(action->task, RSC_STOP) != 0)) {
+ && (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
return false;
}
@@ -1311,8 +1321,8 @@ pcmk__action_locks_rsc_to_node(const pe_action_t *action)
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
- const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a;
- const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b;
+ const pcmk__related_action_t *action_wrapper2 = a;
+ const pcmk__related_action_t *action_wrapper1 = b;
if (a == NULL) {
return 1;
@@ -1336,16 +1346,16 @@ sort_action_id(gconstpointer a, gconstpointer b)
* \param[in,out] action Action whose inputs should be checked
*/
void
-pcmk__deduplicate_action_inputs(pe_action_t *action)
+pcmk__deduplicate_action_inputs(pcmk_action_t *action)
{
GList *item = NULL;
GList *next = NULL;
- pe_action_wrapper_t *last_input = NULL;
+ pcmk__related_action_t *last_input = NULL;
action->actions_before = g_list_sort(action->actions_before,
sort_action_id);
for (item = action->actions_before; item != NULL; item = next) {
- pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data;
+ pcmk__related_action_t *input = item->data;
next = item->next;
if ((last_input != NULL)
@@ -1377,31 +1387,34 @@ pcmk__deduplicate_action_inputs(pe_action_t *action)
* \internal
* \brief Output all scheduled actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__output_actions(pe_working_set_t *data_set)
+pcmk__output_actions(pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
// Output node (non-resource) actions
- for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
char *node_name = NULL;
char *task = NULL;
- pe_action_t *action = (pe_action_t *) iter->data;
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
if (action->rsc != NULL) {
continue; // Resource actions will be output later
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
continue; // This action was not scheduled
}
- if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
+ pcmk__str_none)) {
task = strdup("Shutdown");
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
- const char *op = g_hash_table_lookup(action->meta, "stonith_action");
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
+ pcmk__str_none)) {
+ const char *op = g_hash_table_lookup(action->meta,
+ "stonith_action");
task = crm_strdup_printf("Fence (%s)", op);
@@ -1410,9 +1423,11 @@ pcmk__output_actions(pe_working_set_t *data_set)
}
if (pe__is_guest_node(action->node)) {
+ const pcmk_resource_t *remote = action->node->details->remote_rsc;
+
node_name = crm_strdup_printf("%s (resource: %s)",
pe__node_name(action->node),
- action->node->details->remote_rsc->container->id);
+ remote->container->id);
} else if (action->node != NULL) {
node_name = crm_strdup_printf("%s", pe__node_name(action->node));
}
@@ -1424,8 +1439,8 @@ pcmk__output_actions(pe_working_set_t *data_set)
}
// Output resource actions
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->output_actions(rsc);
}
@@ -1433,26 +1448,6 @@ pcmk__output_actions(pe_working_set_t *data_set)
/*!
* \internal
- * \brief Check whether action from resource history is still in configuration
- *
- * \param[in] rsc Resource that action is for
- * \param[in] task Action's name
- * \param[in] interval_ms Action's interval (in milliseconds)
- *
- * \return true if action is still in resource configuration, otherwise false
- */
-static bool
-action_in_config(const pe_resource_t *rsc, const char *task, guint interval_ms)
-{
- char *key = pcmk__op_key(rsc->id, task, interval_ms);
- bool config = (find_rsc_op_entry(rsc, key) != NULL);
-
- free(key);
- return config;
-}
-
-/*!
- * \internal
* \brief Get action name needed to compare digest for configuration changes
*
* \param[in] task Action name from history
@@ -1467,8 +1462,9 @@ task_for_digest(const char *task, guint interval_ms)
* the resource.
*/
if ((interval_ms == 0)
- && pcmk__str_any_of(task, RSC_STATUS, RSC_MIGRATED, RSC_PROMOTE, NULL)) {
- task = RSC_START;
+ && pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_PROMOTE, NULL)) {
+ task = PCMK_ACTION_START;
}
return task;
}
@@ -1486,25 +1482,25 @@ task_for_digest(const char *task, guint interval_ms)
*
* \param[in] xml_op Resource history entry with secure digest
* \param[in] digest_data Operation digest information being compared
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return true if only sanitized parameters changed, otherwise false
*/
static bool
only_sanitized_changed(const xmlNode *xml_op,
const op_digest_cache_t *digest_data,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
const char *digest_secure = NULL;
- if (!pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)) {
// The scheduler is not being run as a simulation
return false;
}
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
- return (digest_data->rc != RSC_DIGEST_MATCH) && (digest_secure != NULL)
+ return (digest_data->rc != pcmk__digest_match) && (digest_secure != NULL)
&& (digest_data->digest_secure_calc != NULL)
&& (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
}
@@ -1519,12 +1515,12 @@ only_sanitized_changed(const xmlNode *xml_op,
* \param[in,out] node Node where resource should be restarted
*/
static void
-force_restart(pe_resource_t *rsc, const char *task, guint interval_ms,
- pe_node_t *node)
+force_restart(pcmk_resource_t *rsc, const char *task, guint interval_ms,
+ pcmk_node_t *node)
{
char *key = pcmk__op_key(rsc->id, task, interval_ms);
- pe_action_t *required = custom_action(rsc, key, task, NULL, FALSE, TRUE,
- rsc->cluster);
+ pcmk_action_t *required = custom_action(rsc, key, task, NULL, FALSE,
+ rsc->cluster);
pe_action_set_reason(required, "resource definition change", true);
trigger_unfencing(rsc, node, "Device parameters changed", NULL,
@@ -1535,28 +1531,30 @@ force_restart(pe_resource_t *rsc, const char *task, guint interval_ms,
* \internal
* \brief Schedule a reload of a resource on a node
*
- * \param[in,out] rsc Resource to reload
- * \param[in] node Where resource should be reloaded
+ * \param[in,out] data Resource to reload
+ * \param[in] user_data Where resource should be reloaded
*/
static void
-schedule_reload(pe_resource_t *rsc, const pe_node_t *node)
+schedule_reload(gpointer data, gpointer user_data)
{
- pe_action_t *reload = NULL;
+ pcmk_resource_t *rsc = data;
+ const pcmk_node_t *node = user_data;
+ pcmk_action_t *reload = NULL;
// For collective resources, just call recursively for children
- if (rsc->variant > pe_native) {
- g_list_foreach(rsc->children, (GFunc) schedule_reload, (gpointer) node);
+ if (rsc->variant > pcmk_rsc_variant_primitive) {
+ g_list_foreach(rsc->children, schedule_reload, user_data);
return;
}
// Skip the reload in certain situations
if ((node == NULL)
- || !pcmk_is_set(rsc->flags, pe_rsc_managed)
- || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_managed)
+ || pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_trace(rsc, "Skip reload of %s:%s%s %s",
rsc->id,
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " unmanaged",
- pcmk_is_set(rsc->flags, pe_rsc_failed)? " failed" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " unmanaged",
+ pcmk_is_set(rsc->flags, pcmk_rsc_failed)? " failed" : "",
(node == NULL)? "inactive" : node->details->uname);
return;
}
@@ -1564,26 +1562,26 @@ schedule_reload(pe_resource_t *rsc, const pe_node_t *node)
/* If a resource's configuration changed while a start was pending,
* force a full restart instead of a reload.
*/
- if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
rsc->id);
- custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
+ custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->cluster);
return;
}
// Schedule the reload
- pe__set_resource_flags(rsc, pe_rsc_reload);
- reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node,
- FALSE, TRUE, rsc->cluster);
+ pe__set_resource_flags(rsc, pcmk_rsc_reload);
+ reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
+ FALSE, rsc->cluster);
pe_action_set_reason(reload, "resource definition change", FALSE);
// Set orderings so that a required stop or demote cancels the reload
pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
- pe_order_optional|pe_order_then_cancels_first,
+ pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->cluster);
pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
- pe_order_optional|pe_order_then_cancels_first,
+ pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->cluster);
}
@@ -1602,7 +1600,7 @@ schedule_reload(pe_resource_t *rsc, const pe_node_t *node)
* \return true if action configuration changed, otherwise false
*/
bool
-pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
+pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op)
{
guint interval_ms = 0;
@@ -1619,14 +1617,15 @@ pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
// If this is a recurring action, check whether it has been orphaned
if (interval_ms > 0) {
- if (action_in_config(rsc, task, interval_ms)) {
+ if (pcmk__find_action_config(rsc, task, interval_ms, false) != NULL) {
pe_rsc_trace(rsc, "%s-interval %s for %s on %s is in configuration",
pcmk__readable_interval(interval_ms), task, rsc->id,
pe__node_name(node));
} else if (pcmk_is_set(rsc->cluster->flags,
- pe_flag_stop_action_orphans)) {
+ pcmk_sched_cancel_removed_actions)) {
pcmk__schedule_cancel(rsc,
- crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
+ crm_element_value(xml_op,
+ XML_LRM_ATTR_CALLID),
task, interval_ms, node, "orphan");
return true;
} else {
@@ -1658,13 +1657,13 @@ pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
}
switch (digest_data->rc) {
- case RSC_DIGEST_RESTART:
+ case pcmk__digest_restart:
crm_log_xml_debug(digest_data->params_restart, "params:restart");
force_restart(rsc, task, interval_ms, node);
return true;
- case RSC_DIGEST_ALL:
- case RSC_DIGEST_UNKNOWN:
+ case pcmk__digest_unknown:
+ case pcmk__digest_mismatch:
// Changes that can potentially be handled by an agent reload
if (interval_ms > 0) {
@@ -1682,12 +1681,12 @@ pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
"Device parameters changed (reload)", NULL,
rsc->cluster);
crm_log_xml_debug(digest_data->params_all, "params:reload");
- schedule_reload(rsc, node);
+ schedule_reload((gpointer) rsc, (gpointer) node);
} else {
pe_rsc_trace(rsc,
- "Restarting %s because agent doesn't support reload",
- rsc->id);
+ "Restarting %s "
+ "because agent doesn't support reload", rsc->id);
crm_log_xml_debug(digest_data->params_restart,
"params:restart");
force_restart(rsc, task, interval_ms, node);
@@ -1737,15 +1736,15 @@ rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
* \param[in,out] node Node whose history is being processed
*/
static void
-process_rsc_history(const xmlNode *rsc_entry, pe_resource_t *rsc,
- pe_node_t *node)
+process_rsc_history(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
+ pcmk_node_t *node)
{
int offset = -1;
int stop_index = 0;
int start_index = 0;
GList *sorted_op_list = NULL;
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
if (pe_rsc_is_anon_clone(pe__const_top_resource(rsc, false))) {
pe_rsc_trace(rsc,
"Skipping configuration check "
@@ -1797,33 +1796,36 @@ process_rsc_history(const xmlNode *rsc_entry, pe_resource_t *rsc,
crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if ((interval_ms > 0)
- && (pcmk_is_set(rsc->flags, pe_rsc_maintenance)
+ && (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)
|| node->details->maintenance)) {
// Maintenance mode cancels recurring operations
pcmk__schedule_cancel(rsc,
- crm_element_value(rsc_op, XML_LRM_ATTR_CALLID),
+ crm_element_value(rsc_op,
+ XML_LRM_ATTR_CALLID),
task, interval_ms, node, "maintenance mode");
} else if ((interval_ms > 0)
- || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START,
- RSC_PROMOTE, RSC_MIGRATED, NULL)) {
+ || pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
- /* We haven't allocated resources to nodes yet, so if the
+ /* We haven't assigned resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
- pe__add_param_check(rsc_op, rsc, node, pe_check_active,
+ pe__add_param_check(rsc_op, rsc, node, pcmk__check_active,
rsc->cluster);
} else if (pcmk__check_action_config(rsc, node, rsc_op)
- && (pe_get_failcount(node, rsc, NULL, pe_fc_effective,
+ && (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL) != 0)) {
pe__clear_failcount(rsc, node, "action definition changed",
rsc->cluster);
@@ -1847,21 +1849,21 @@ process_rsc_history(const xmlNode *rsc_entry, pe_resource_t *rsc,
* \param[in] lrm_rscs Node's <lrm_resources> from CIB status XML
*/
static void
-process_node_history(pe_node_t *node, const xmlNode *lrm_rscs)
+process_node_history(pcmk_node_t *node, const xmlNode *lrm_rscs)
{
crm_trace("Processing node history for %s", pe__node_name(node));
for (const xmlNode *rsc_entry = first_named_child(lrm_rscs,
XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
- if (xml_has_children(rsc_entry)) {
+ if (rsc_entry->children != NULL) {
GList *result = pcmk__rscs_matching_id(ID(rsc_entry),
node->details->data_set);
for (GList *iter = result; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
process_rsc_history(rsc_entry, rsc, node);
}
}
@@ -1885,10 +1887,10 @@ process_node_history(pe_node_t *node, const xmlNode *lrm_rscs)
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
+pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
{
crm_trace("Check resource and action configuration for changes");
@@ -1896,8 +1898,8 @@ pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
* and search for the appropriate status subsection for each. This skips
* orphaned nodes and lets us eliminate some cases before searching the XML.
*/
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* Don't bother checking actions for a node that can't run actions ...
* unless it's in maintenance mode, in which case we still need to
@@ -1910,7 +1912,7 @@ pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
xmlNode *history = NULL;
xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname);
- history = get_xpath_object(xpath, data_set->input, LOG_NEVER);
+ history = get_xpath_object(xpath, scheduler->input, LOG_NEVER);
free(xpath);
process_node_history(node, history);
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index 5682744..1c66314 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -16,402 +16,496 @@
#include "libpacemaker_private.h"
-#define PE__VARIANT_BUNDLE 1
-#include <lib/pengine/variant.h>
+struct assign_data {
+ const pcmk_node_t *prefer;
+ bool stop_if_fail;
+};
+/*!
+ * \internal
+ * \brief Assign a single bundle replica's resources (other than container)
+ *
+ * \param[in,out] replica Replica to assign
+ * \param[in] user_data Preferred node, if any
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
static bool
-is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
+assign_replica(pe__bundle_replica_t *replica, void *user_data)
{
- for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ pcmk_node_t *container_host = NULL;
+
+ struct assign_data *assign_data = user_data;
+ const pcmk_node_t *prefer = assign_data->prefer;
+ bool stop_if_fail = assign_data->stop_if_fail;
+
+ const pcmk_resource_t *bundle = pe__const_top_resource(replica->container,
+ true);
+
+ if (replica->ip != NULL) {
+ pe_rsc_trace(bundle, "Assigning bundle %s IP %s",
+ bundle->id, replica->ip->id);
+ replica->ip->cmds->assign(replica->ip, prefer, stop_if_fail);
+ }
+
+ container_host = replica->container->allocated_to;
+ if (replica->remote != NULL) {
+ if (pe__is_guest_or_remote_node(container_host)) {
+ /* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on
+ * the same host because Pacemaker Remote only supports a single
+ * active connection.
+ */
+ pcmk__new_colocation("#replica-remote-with-host-remote", NULL,
+ INFINITY, replica->remote,
+ container_host->details->remote_rsc, NULL,
+ NULL, pcmk__coloc_influence);
+ }
+ pe_rsc_trace(bundle, "Assigning bundle %s connection %s",
+ bundle->id, replica->remote->id);
+ replica->remote->cmds->assign(replica->remote, prefer, stop_if_fail);
+ }
+
+ if (replica->child != NULL) {
+ pcmk_node_t *node = NULL;
+ GHashTableIter iter;
- if (node->details == replica->node->details) {
- return TRUE;
+ g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
+ if (!pe__same_node(node, replica->node)) {
+ node->weight = -INFINITY;
+ } else if (!pcmk__threshold_reached(replica->child, node, NULL)) {
+ node->weight = INFINITY;
+ }
}
+
+ pe__set_resource_flags(replica->child->parent, pcmk_rsc_assigning);
+ pe_rsc_trace(bundle, "Assigning bundle %s replica child %s",
+ bundle->id, replica->child->id);
+ replica->child->cmds->assign(replica->child, replica->node,
+ stop_if_fail);
+ pe__clear_resource_flags(replica->child->parent, pcmk_rsc_assigning);
}
- return FALSE;
+ return true;
}
/*!
* \internal
* \brief Assign a bundle resource to a node
*
- * \param[in,out] rsc Resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
+ * can't be assigned to a node, set the
+ * descendant's next role to stopped and update
+ * existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__bundle_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
GList *containers = NULL;
- pe__bundle_variant_data_t *bundle_data = NULL;
+ pcmk_resource_t *bundled_resource = NULL;
+ struct assign_data assign_data = { prefer, stop_if_fail };
- CRM_CHECK(rsc != NULL, return NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- get_bundle_variant_data(bundle_data, rsc);
+ pe_rsc_trace(rsc, "Assigning bundle %s", rsc->id);
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
- pe__set_resource_flags(rsc, pe_rsc_allocating);
- containers = pe__bundle_containers(rsc);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
-
- containers = g_list_sort(containers, pcmk__cmp_instance);
- pcmk__assign_instances(rsc, containers, bundle_data->nreplicas,
- bundle_data->nreplicas_per_host);
+ // Assign all containers first, so we know what nodes the bundle will be on
+ containers = g_list_sort(pe__bundle_containers(rsc), pcmk__cmp_instance);
+ pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc),
+ rsc->fns->max_per_node(rsc));
g_list_free(containers);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
- pe_node_t *container_host = NULL;
-
- CRM_ASSERT(replica);
- if (replica->ip) {
- pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
- rsc->id, replica->ip->id);
- replica->ip->cmds->assign(replica->ip, prefer);
- }
-
- container_host = replica->container->allocated_to;
- if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
- /* We need 'nested' connection resources to be on the same
- * host because pacemaker-remoted only supports a single
- * active connection
- */
- pcmk__new_colocation("child-remote-with-docker-remote", NULL,
- INFINITY, replica->remote,
- container_host->details->remote_rsc, NULL,
- NULL, true, rsc->cluster);
- }
-
- if (replica->remote) {
- pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
- rsc->id, replica->remote->id);
- replica->remote->cmds->assign(replica->remote, prefer);
- }
-
- // Explicitly allocate replicas' children before bundle child
- if (replica->child) {
- pe_node_t *node = NULL;
- GHashTableIter iter;
-
- g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
- if (node->details != replica->node->details) {
- node->weight = -INFINITY;
- } else if (!pcmk__threshold_reached(replica->child, node,
- NULL)) {
- node->weight = INFINITY;
- }
- }
-
- pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
- pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
- rsc->id, replica->child->id);
- replica->child->cmds->assign(replica->child, replica->node);
- pe__clear_resource_flags(replica->child->parent,
- pe_rsc_allocating);
- }
- }
+ // Then assign remaining replica resources
+ pe__foreach_bundle_replica(rsc, assign_replica, (void *) &assign_data);
- if (bundle_data->child) {
- pe_node_t *node = NULL;
+ // Finally, assign the bundled resources to each bundle node
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource != NULL) {
+ pcmk_node_t *node = NULL;
GHashTableIter iter;
- g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
+
+ g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
- if (is_bundle_node(bundle_data, node)) {
+ if (pe__node_is_bundle_instance(rsc, node)) {
node->weight = 0;
} else {
node->weight = -INFINITY;
}
}
- pe_rsc_trace(rsc, "Allocating bundle %s child %s",
- rsc->id, bundle_data->child->id);
- bundle_data->child->cmds->assign(bundle_data->child, prefer);
+ bundled_resource->cmds->assign(bundled_resource, prefer, stop_if_fail);
}
- pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned);
return NULL;
}
+/*!
+ * \internal
+ * \brief Create actions for a bundle replica's resources (other than child)
+ *
+ * \param[in,out] replica Replica to create actions for
+ * \param[in] user_data Unused
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+create_replica_actions(pe__bundle_replica_t *replica, void *user_data)
+{
+ if (replica->ip != NULL) {
+ replica->ip->cmds->create_actions(replica->ip);
+ }
+ if (replica->container != NULL) {
+ replica->container->cmds->create_actions(replica->container);
+ }
+ if (replica->remote != NULL) {
+ replica->remote->cmds->create_actions(replica->remote);
+ }
+ return true;
+}
+/*!
+ * \internal
+ * \brief Create all actions needed for a given bundle resource
+ *
+ * \param[in,out] rsc Bundle resource to create actions for
+ */
void
-pcmk__bundle_create_actions(pe_resource_t *rsc)
+pcmk__bundle_create_actions(pcmk_resource_t *rsc)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
GList *containers = NULL;
- pe__bundle_variant_data_t *bundle_data = NULL;
+ pcmk_resource_t *bundled_resource = NULL;
- CRM_CHECK(rsc != NULL, return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- containers = pe__bundle_containers(rsc);
- get_bundle_variant_data(bundle_data, rsc);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- CRM_ASSERT(replica);
- if (replica->ip) {
- replica->ip->cmds->create_actions(replica->ip);
- }
- if (replica->container) {
- replica->container->cmds->create_actions(replica->container);
- }
- if (replica->remote) {
- replica->remote->cmds->create_actions(replica->remote);
- }
- }
+ pe__foreach_bundle_replica(rsc, create_replica_actions, NULL);
+ containers = pe__bundle_containers(rsc);
pcmk__create_instance_actions(rsc, containers);
+ g_list_free(containers);
- if (bundle_data->child) {
- bundle_data->child->cmds->create_actions(bundle_data->child);
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource != NULL) {
+ bundled_resource->cmds->create_actions(bundled_resource);
- if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
- /* promote */
- pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
- action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
+ if (pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) {
+ pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTE, true, true);
+ action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTED,
+ true, true);
action->priority = INFINITY;
- /* demote */
- pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
- action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
+ pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTE, true, true);
+ action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTED,
+ true, true);
action->priority = INFINITY;
}
}
-
- g_list_free(containers);
}
-void
-pcmk__bundle_internal_constraints(pe_resource_t *rsc)
+/*!
+ * \internal
+ * \brief Create internal constraints for a bundle replica's resources
+ *
+ * \param[in,out] replica Replica to create internal constraints for
+ * \param[in,out] user_data Replica's parent bundle
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+replica_internal_constraints(pe__bundle_replica_t *replica, void *user_data)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
+ pcmk_resource_t *bundle = user_data;
- CRM_CHECK(rsc != NULL, return);
+ replica->container->cmds->internal_constraints(replica->container);
- get_bundle_variant_data(bundle_data, rsc);
+ // Start bundle -> start replica container
+ pcmk__order_starts(bundle, replica->container,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
- if (bundle_data->child) {
- pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child,
- RSC_START, pe_order_implies_first_printed);
- pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child,
- RSC_STOP, pe_order_implies_first_printed);
+ // Stop bundle -> stop replica child and container
+ if (replica->child != NULL) {
+ pcmk__order_stops(bundle, replica->child,
+ pcmk__ar_then_implies_first_graphed);
+ }
+ pcmk__order_stops(bundle, replica->container,
+ pcmk__ar_then_implies_first_graphed);
+
+ // Start replica container -> bundle is started
+ pcmk__order_resource_actions(replica->container, PCMK_ACTION_START, bundle,
+ PCMK_ACTION_RUNNING,
+ pcmk__ar_first_implies_then_graphed);
+
+ // Stop replica container -> bundle is stopped
+ pcmk__order_resource_actions(replica->container, PCMK_ACTION_STOP, bundle,
+ PCMK_ACTION_STOPPED,
+ pcmk__ar_first_implies_then_graphed);
+
+ if (replica->ip != NULL) {
+ replica->ip->cmds->internal_constraints(replica->ip);
+
+ // Replica IP address -> replica container (symmetric)
+ pcmk__order_starts(replica->ip, replica->container,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_guest_allowed);
+ pcmk__order_stops(replica->container, replica->ip,
+ pcmk__ar_then_implies_first|pcmk__ar_guest_allowed);
+
+ pcmk__new_colocation("#ip-with-container", NULL, INFINITY, replica->ip,
+ replica->container, NULL, NULL,
+ pcmk__coloc_influence);
+ }
- if (bundle_data->child->children) {
- pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc,
- RSC_STARTED,
- pe_order_implies_then_printed);
- pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc,
- RSC_STOPPED,
- pe_order_implies_then_printed);
- } else {
- pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc,
- RSC_STARTED,
- pe_order_implies_then_printed);
- pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc,
- RSC_STOPPED,
- pe_order_implies_then_printed);
- }
+ if (replica->remote != NULL) {
+ /* This handles ordering and colocating remote relative to container
+ * (via "#resource-with-container"). Since IP is also ordered and
+ * colocated relative to the container, we don't need to do anything
+ * explicit here with IP.
+ */
+ replica->remote->cmds->internal_constraints(replica->remote);
}
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ if (replica->child != NULL) {
+ CRM_ASSERT(replica->remote != NULL);
+ // "Start remote then child" is implicit in scheduler's remote logic
+ }
+ return true;
+}
- CRM_ASSERT(replica);
- CRM_ASSERT(replica->container);
+/*!
+ * \internal
+ * \brief Create implicit constraints needed for a bundle resource
+ *
+ * \param[in,out] rsc Bundle resource to create implicit constraints for
+ */
+void
+pcmk__bundle_internal_constraints(pcmk_resource_t *rsc)
+{
+ pcmk_resource_t *bundled_resource = NULL;
- replica->container->cmds->internal_constraints(replica->container);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- pcmk__order_starts(rsc, replica->container,
- pe_order_runnable_left|pe_order_implies_first_printed);
+ pe__foreach_bundle_replica(rsc, replica_internal_constraints, rsc);
- if (replica->child) {
- pcmk__order_stops(rsc, replica->child,
- pe_order_implies_first_printed);
- }
- pcmk__order_stops(rsc, replica->container,
- pe_order_implies_first_printed);
- pcmk__order_resource_actions(replica->container, RSC_START, rsc,
- RSC_STARTED,
- pe_order_implies_then_printed);
- pcmk__order_resource_actions(replica->container, RSC_STOP, rsc,
- RSC_STOPPED,
- pe_order_implies_then_printed);
-
- if (replica->ip) {
- replica->ip->cmds->internal_constraints(replica->ip);
-
- // Start IP then container
- pcmk__order_starts(replica->ip, replica->container,
- pe_order_runnable_left|pe_order_preserve);
- pcmk__order_stops(replica->container, replica->ip,
- pe_order_implies_first|pe_order_preserve);
-
- pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
- replica->container, NULL, NULL, true,
- rsc->cluster);
- }
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource == NULL) {
+ return;
+ }
- if (replica->remote) {
- /* This handles ordering and colocating remote relative to container
- * (via "resource-with-container"). Since IP is also ordered and
- * colocated relative to the container, we don't need to do anything
- * explicit here with IP.
- */
- replica->remote->cmds->internal_constraints(replica->remote);
- }
+ // Start bundle -> start bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_START, bundled_resource,
+ PCMK_ACTION_START,
+ pcmk__ar_then_implies_first_graphed);
- if (replica->child) {
- CRM_ASSERT(replica->remote);
+ // Bundled clone is started -> bundle is started
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_RUNNING,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_first_implies_then_graphed);
- // "Start remote then child" is implicit in scheduler's remote logic
- }
+ // Stop bundle -> stop bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP, bundled_resource,
+ PCMK_ACTION_STOP,
+ pcmk__ar_then_implies_first_graphed);
- }
+ // Bundled clone is stopped -> bundle is stopped
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_first_implies_then_graphed);
- if (bundle_data->child) {
- bundle_data->child->cmds->internal_constraints(bundle_data->child);
- if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
- pcmk__promotable_restart_ordering(rsc);
+ bundled_resource->cmds->internal_constraints(bundled_resource);
- /* child demoted before global demoted */
- pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc,
- RSC_DEMOTED,
- pe_order_implies_then_printed);
+ if (!pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) {
+ return;
+ }
+ pcmk__promotable_restart_ordering(rsc);
+
+ // Demote bundle -> demote bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTE, bundled_resource,
+ PCMK_ACTION_DEMOTE,
+ pcmk__ar_then_implies_first_graphed);
+
+ // Bundled clone is demoted -> bundle is demoted
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_DEMOTED,
+ pcmk__ar_first_implies_then_graphed);
+
+ // Promote bundle -> promote bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_PROMOTE,
+ bundled_resource, PCMK_ACTION_PROMOTE,
+ pcmk__ar_then_implies_first_graphed);
+
+ // Bundled clone is promoted -> bundle is promoted
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_PROMOTED,
+ rsc, PCMK_ACTION_PROMOTED,
+ pcmk__ar_first_implies_then_graphed);
+}
- /* global demote before child demote */
- pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child,
- RSC_DEMOTE,
- pe_order_implies_first_printed);
+struct match_data {
+ const pcmk_node_t *node; // Node to compare against replica
+ pcmk_resource_t *container; // Replica container corresponding to node
+};
- /* child promoted before global promoted */
- pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc,
- RSC_PROMOTED,
- pe_order_implies_then_printed);
+/*!
+ * \internal
+ * \brief Check whether a replica container is assigned to a given node
+ *
+ * \param[in] replica Replica to check
+ * \param[in,out] user_data struct match_data with node to compare against
+ *
+ * \return true if the replica does not match (to indicate further replicas
+ * should be processed), otherwise false
+ */
+static bool
+match_replica_container(const pe__bundle_replica_t *replica, void *user_data)
+{
+ struct match_data *match_data = user_data;
- /* global promote before child promote */
- pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child,
- RSC_PROMOTE,
- pe_order_implies_first_printed);
- }
+ if (pcmk__instance_matches(replica->container, match_data->node,
+ pcmk_role_unknown, false)) {
+ match_data->container = replica->container;
+ return false; // Match found, don't bother searching further replicas
}
+ return true; // No match, keep searching
}
-static pe_resource_t *
-compatible_replica_for_node(const pe_resource_t *rsc_lh,
- const pe_node_t *candidate,
- const pe_resource_t *rsc, enum rsc_role_e filter,
- gboolean current)
+/*!
+ * \internal
+ * \brief Get the host to which a bundle node is assigned
+ *
+ * \param[in] node Possible bundle node to check
+ *
+ * \return Node to which the container for \p node is assigned if \p node is a
+ * bundle node, otherwise \p node itself
+ */
+static const pcmk_node_t *
+get_bundle_node_host(const pcmk_node_t *node)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(candidate != NULL, return NULL);
- get_bundle_variant_data(bundle_data, rsc);
+ if (pe__is_bundle_node(node)) {
+ const pcmk_resource_t *container = node->details->remote_rsc->container;
- crm_trace("Looking for compatible child from %s for %s on %s",
- rsc_lh->id, rsc->id, pe__node_name(candidate));
-
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- if (pcmk__instance_matches(replica->container, candidate, filter,
- current)) {
- crm_trace("Pairing %s with %s on %s",
- rsc_lh->id, replica->container->id,
- pe__node_name(candidate));
- return replica->container;
- }
+ return container->fns->location(container, NULL, 0);
}
-
- crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
- return NULL;
+ return node;
}
-static pe_resource_t *
-compatible_replica(const pe_resource_t *rsc_lh, const pe_resource_t *rsc,
- enum rsc_role_e filter, gboolean current,
- pe_working_set_t *data_set)
+/*!
+ * \internal
+ * \brief Find a bundle container compatible with a dependent resource
+ *
+ * \param[in] dependent Dependent resource in colocation with bundle
+ * \param[in] bundle Bundle that \p dependent is colocated with
+ *
+ * \return A container from \p bundle assigned to the same node as \p dependent
+ * if assigned, otherwise assigned to any of dependent's allowed nodes,
+ * otherwise NULL.
+ */
+static pcmk_resource_t *
+compatible_container(const pcmk_resource_t *dependent,
+ const pcmk_resource_t *bundle)
{
GList *scratch = NULL;
- pe_resource_t *pair = NULL;
- pe_node_t *active_node_lh = NULL;
-
- active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
- if (active_node_lh) {
- return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
- current);
+ struct match_data match_data = { NULL, NULL };
+
+ // If dependent is assigned, only check there
+ match_data.node = dependent->fns->location(dependent, NULL, 0);
+ match_data.node = get_bundle_node_host(match_data.node);
+ if (match_data.node != NULL) {
+ pe__foreach_const_bundle_replica(bundle, match_replica_container,
+ &match_data);
+ return match_data.container;
}
- scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
+ // Otherwise, check for any of the dependent's allowed nodes
+ scratch = g_hash_table_get_values(dependent->allowed_nodes);
scratch = pcmk__sort_nodes(scratch, NULL);
+ for (const GList *iter = scratch; iter != NULL; iter = iter->next) {
+ match_data.node = iter->data;
+ match_data.node = get_bundle_node_host(match_data.node);
+ if (match_data.node == NULL) {
+ continue;
+ }
- for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
- if (pair) {
- goto done;
+ pe__foreach_const_bundle_replica(bundle, match_replica_container,
+ &match_data);
+ if (match_data.container != NULL) {
+ break;
}
}
-
- pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
- done:
g_list_free(scratch);
- return pair;
+ return match_data.container;
}
-int copies_per_node(pe_resource_t * rsc)
+struct coloc_data {
+ const pcmk__colocation_t *colocation;
+ pcmk_resource_t *dependent;
+ GList *container_hosts;
+};
+
+/*!
+ * \internal
+ * \brief Apply a colocation score to replica node scores or resource priority
+ *
+ * \param[in] replica Replica of primary bundle resource in colocation
+ * \param[in,out] user_data struct coloc_data for colocation being applied
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+replica_apply_coloc_score(const pe__bundle_replica_t *replica, void *user_data)
{
- /* Strictly speaking, there should be a 'copies_per_node' addition
- * to the resource function table and each case would be a
- * function. However that would be serious overkill to return an
- * int. In fact, it seems to me that both function tables
- * could/should be replaced by resources.{c,h} full of
- * rsc_{some_operation} functions containing a switch as below
- * which calls out to functions named {variant}_{some_operation}
- * as needed.
- */
- switch(rsc->variant) {
- case pe_unknown:
- return 0;
- case pe_native:
- case pe_group:
- return 1;
- case pe_clone:
- {
- const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
-
- if (max_clones_node == NULL) {
- return 1;
-
- } else {
- int max_i;
-
- pcmk__scan_min_int(max_clones_node, &max_i, 0);
- return max_i;
- }
- }
- case pe_container:
- {
- pe__bundle_variant_data_t *data = NULL;
- get_bundle_variant_data(data, rsc);
- return data->nreplicas_per_host;
- }
+ struct coloc_data *coloc_data = user_data;
+ pcmk_node_t *chosen = NULL;
+
+ if (coloc_data->colocation->score < INFINITY) {
+ replica->container->cmds->apply_coloc_score(coloc_data->dependent,
+ replica->container,
+ coloc_data->colocation,
+ false);
+ return true;
+ }
+
+ chosen = replica->container->fns->location(replica->container, NULL, 0);
+ if ((chosen == NULL)
+ || is_set_recursive(replica->container, pcmk_rsc_blocked, true)) {
+ return true;
+ }
+
+ if ((coloc_data->colocation->primary_role >= pcmk_role_promoted)
+ && ((replica->child == NULL)
+ || (replica->child->next_role < pcmk_role_promoted))) {
+ return true;
}
- return 0;
+
+ pe_rsc_trace(pe__const_top_resource(replica->container, true),
+ "Allowing mandatory colocation %s using %s @%d",
+ coloc_data->colocation->id, pe__node_name(chosen),
+ chosen->weight);
+ coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts,
+ chosen);
+ return true;
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -420,151 +514,193 @@ int copies_per_node(pe_resource_t * rsc)
* \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
- GList *allocated_primaries = NULL;
- pe__bundle_variant_data_t *bundle_data = NULL;
+ struct coloc_data coloc_data = { colocation, dependent, NULL };
/* This should never be called for the bundle itself as a dependent.
- * Instead, we add its colocation constraints to its replicas and call the
- * apply_coloc_score() for the replicas as dependents.
+ * Instead, we add its colocation constraints to its containers and bundled
+ * primitive and call the apply_coloc_score() method for them as dependents.
*/
- CRM_ASSERT(!for_dependent);
-
- CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
- return);
- CRM_ASSERT(dependent->variant == pe_native);
-
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- pe_rsc_trace(primary, "%s is still provisional", primary->id);
+ CRM_ASSERT((primary != NULL)
+ && (primary->variant == pcmk_rsc_variant_bundle)
+ && (dependent != NULL)
+ && (dependent->variant == pcmk_rsc_variant_primitive)
+ && (colocation != NULL) && !for_dependent);
+
+ if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
+ pe_rsc_trace(primary,
+ "Skipping applying colocation %s "
+ "because %s is still provisional",
+ colocation->id, primary->id);
return;
+ }
+ pe_rsc_trace(primary, "Applying colocation %s (%s with %s at %s)",
+ colocation->id, dependent->id, primary->id,
+ pcmk_readable_score(colocation->score));
- } else if (colocation->dependent->variant > pe_group) {
- pe_resource_t *primary_replica = compatible_replica(dependent, primary,
- RSC_ROLE_UNKNOWN,
- FALSE,
- dependent->cluster);
+ /* If the constraint dependent is a clone or bundle, "dependent" here is one
+ * of its instances. Look for a compatible instance of this bundle.
+ */
+ if (colocation->dependent->variant > pcmk_rsc_variant_group) {
+ const pcmk_resource_t *primary_container = NULL;
- if (primary_replica) {
+ primary_container = compatible_container(dependent, primary);
+ if (primary_container != NULL) { // Success, we found one
pe_rsc_debug(primary, "Pairing %s with %s",
- dependent->id, primary_replica->id);
- dependent->cmds->apply_coloc_score(dependent, primary_replica,
+ dependent->id, primary_container->id);
+ dependent->cmds->apply_coloc_score(dependent, primary_container,
colocation, true);
- } else if (colocation->score >= INFINITY) {
- crm_notice("Cannot pair %s with instance of %s",
+ } else if (colocation->score >= INFINITY) { // Failure, and it's fatal
+ crm_notice("%s cannot run because there is no compatible "
+ "instance of %s to colocate with",
dependent->id, primary->id);
- pcmk__assign_resource(dependent, NULL, true);
+ pcmk__assign_resource(dependent, NULL, true, true);
- } else {
- pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
+ } else { // Failure, but we can ignore it
+ pe_rsc_debug(primary,
+ "%s cannot be colocated with any instance of %s",
dependent->id, primary->id);
}
-
return;
}
- get_bundle_variant_data(bundle_data, primary);
- pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
- colocation->id, dependent->id, primary->id, colocation->score);
+ pe__foreach_const_bundle_replica(primary, replica_apply_coloc_score,
+ &coloc_data);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ if (colocation->score >= INFINITY) {
+ pcmk__colocation_intersect_nodes(dependent, primary, colocation,
+ coloc_data.container_hosts, false);
+ }
+ g_list_free(coloc_data.container_hosts);
+}
- if (colocation->score < INFINITY) {
- replica->container->cmds->apply_coloc_score(dependent,
- replica->container,
- colocation, false);
+// Bundle implementation of pcmk_assignment_methods_t:with_this_colocations()
+void
+pcmk__with_bundle_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
+{
+ const pcmk_resource_t *bundled_rsc = NULL;
- } else {
- pe_node_t *chosen = replica->container->fns->location(replica->container,
- NULL, FALSE);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)
+ && (orig_rsc != NULL) && (list != NULL));
- if ((chosen == NULL)
- || is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
- continue;
- }
- if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
- && (replica->child == NULL)) {
- continue;
- }
- if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
- && (replica->child->next_role < RSC_ROLE_PROMOTED)) {
- continue;
- }
+ // The bundle itself and its containers always get its colocations
+ if ((orig_rsc == rsc)
+ || pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) {
- pe_rsc_trace(primary, "Allowing %s: %s %d",
- colocation->id, pe__node_name(chosen), chosen->weight);
- allocated_primaries = g_list_prepend(allocated_primaries, chosen);
- }
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+ return;
}
- if (colocation->score >= INFINITY) {
- node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE);
+ /* The bundled resource gets the colocations if it's promotable and we've
+ * begun choosing roles
+ */
+ bundled_rsc = pe__bundled_resource(rsc);
+ if ((bundled_rsc == NULL)
+ || !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable)
+ || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
+ return;
+ }
+
+ if (orig_rsc == bundled_rsc) {
+ if (pe__clone_flag_is_set(orig_rsc,
+ pcmk__clone_promotion_constrained)) {
+ /* orig_rsc is the clone and we're setting roles (or have already
+ * done so)
+ */
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+ }
+
+ } else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) {
+ /* orig_rsc is an instance and is already assigned. If something
+ * requests colocations for orig_rsc now, it's for setting roles.
+ */
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
}
- g_list_free(allocated_primaries);
}
-// Bundle implementation of resource_alloc_functions_t:with_this_colocations()
+// Bundle implementation of pcmk_assignment_methods_t:this_with_colocations()
void
-pcmk__with_bundle_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__bundle_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_container)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ const pcmk_resource_t *bundled_rsc = NULL;
+
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)
+ && (orig_rsc != NULL) && (list != NULL));
- if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
+ // The bundle itself and its containers always get its colocations
+ if ((orig_rsc == rsc)
+ || pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) {
- // Only the bundle replicas' containers get the bundle's constraints
- } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, true);
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+ return;
}
-}
-// Bundle implementation of resource_alloc_functions_t:this_with_colocations()
-void
-pcmk__bundle_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
-{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_container)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ /* The bundled resource gets the colocations if it's promotable and we've
+ * begun choosing roles
+ */
+ bundled_rsc = pe__bundled_resource(rsc);
+ if ((bundled_rsc == NULL)
+ || !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable)
+ || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
+ return;
+ }
- if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
- pcmk__add_this_with_list(list, rsc->rsc_cons);
+ if (orig_rsc == bundled_rsc) {
+ if (pe__clone_flag_is_set(orig_rsc,
+ pcmk__clone_promotion_constrained)) {
+ /* orig_rsc is the clone and we're setting roles (or have already
+ * done so)
+ */
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+ }
- // Only the bundle replicas' containers get the bundle's constraints
- } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, false);
+ } else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) {
+ /* orig_rsc is an instance and is already assigned. If something
+ * requests colocations for orig_rsc now, it's for setting roles.
+ */
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
}
}
-enum pe_action_flags
-pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
+/*!
+ * \internal
+ * \brief Return action flags for a given bundle resource action
+ *
+ * \param[in,out] action Bundle resource action to get flags for
+ * \param[in] node If not NULL, limit effects to this node
+ *
+ * \return Flags appropriate to \p action on \p node
+ */
+uint32_t
+pcmk__bundle_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
GList *containers = NULL;
- enum pe_action_flags flags = 0;
- pe__bundle_variant_data_t *data = NULL;
-
- get_bundle_variant_data(data, action->rsc);
- if(data->child) {
- enum action_tasks task = get_complex_task(data->child, action->task);
- switch(task) {
- case no_action:
- case action_notify:
- case action_notified:
- case action_promote:
- case action_promoted:
- case action_demote:
- case action_demoted:
+ uint32_t flags = 0;
+ pcmk_resource_t *bundled_resource = NULL;
+
+ CRM_ASSERT((action != NULL) && (action->rsc != NULL)
+ && (action->rsc->variant == pcmk_rsc_variant_bundle));
+
+ bundled_resource = pe__bundled_resource(action->rsc);
+ if (bundled_resource != NULL) {
+ // Clone actions are done on the bundled clone resource, not container
+ switch (get_complex_task(bundled_resource, action->task)) {
+ case pcmk_action_unspecified:
+ case pcmk_action_notify:
+ case pcmk_action_notified:
+ case pcmk_action_promote:
+ case pcmk_action_promoted:
+ case pcmk_action_demote:
+ case pcmk_action_demoted:
return pcmk__collective_action_flags(action,
- data->child->children,
+ bundled_resource->children,
node);
default:
break;
@@ -579,281 +715,326 @@ pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
/*!
* \internal
- * \brief Get containerized resource corresponding to a given bundle container
+ * \brief Apply a location constraint to a bundle replica
*
- * \param[in] instance Collective instance that might be a bundle container
+ * \param[in,out] replica Replica to apply constraint to
+ * \param[in,out] user_data Location constraint to apply
*
- * \return Bundled resource instance inside \p instance if it is a bundle
- * container instance, otherwise NULL
+ * \return true (to indicate that any further replicas should be processed)
*/
-const pe_resource_t *
-pcmk__get_rsc_in_container(const pe_resource_t *instance)
+static bool
+apply_location_to_replica(pe__bundle_replica_t *replica, void *user_data)
{
- const pe__bundle_variant_data_t *data = NULL;
- const pe_resource_t *top = pe__const_top_resource(instance, true);
+ pe__location_t *location = user_data;
- if ((top == NULL) || (top->variant != pe_container)) {
- return NULL;
+ if (replica->container != NULL) {
+ replica->container->cmds->apply_location(replica->container, location);
}
- get_bundle_variant_data(data, top);
-
- for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
- const pe__bundle_replica_t *replica = iter->data;
-
- if (instance == replica->container) {
- return replica->child;
- }
+ if (replica->ip != NULL) {
+ replica->ip->cmds->apply_location(replica->ip, location);
}
- return NULL;
+ return true;
}
+/*!
+ * \internal
+ * \brief Apply a location constraint to a bundle resource's allowed node scores
+ *
+ * \param[in,out] rsc Bundle resource to apply constraint to
+ * \param[in,out] location Location constraint to apply
+ */
void
-pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
+pcmk__bundle_apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
- get_bundle_variant_data(bundle_data, rsc);
+ pcmk_resource_t *bundled_resource = NULL;
- pcmk__apply_location(rsc, constraint);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)
+ && (location != NULL));
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ pcmk__apply_location(rsc, location);
+ pe__foreach_bundle_replica(rsc, apply_location_to_replica, location);
- if (replica->container) {
- replica->container->cmds->apply_location(replica->container,
- constraint);
- }
- if (replica->ip) {
- replica->ip->cmds->apply_location(replica->ip, constraint);
- }
+ bundled_resource = pe__bundled_resource(rsc);
+ if ((bundled_resource != NULL)
+ && ((location->role_filter == pcmk_role_unpromoted)
+ || (location->role_filter == pcmk_role_promoted))) {
+ bundled_resource->cmds->apply_location(bundled_resource, location);
+ bundled_resource->rsc_location = g_list_prepend(
+ bundled_resource->rsc_location, location);
}
+}
+
+#define XPATH_REMOTE "//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']"
- if (bundle_data->child
- && ((constraint->role_filter == RSC_ROLE_UNPROMOTED)
- || (constraint->role_filter == RSC_ROLE_PROMOTED))) {
- bundle_data->child->cmds->apply_location(bundle_data->child,
- constraint);
- bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
- constraint);
+/*!
+ * \internal
+ * \brief Add a bundle replica's actions to transition graph
+ *
+ * \param[in,out] replica Replica to add to graph
+ * \param[in] user_data Bundle that replica belongs to (for logging only)
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+add_replica_actions_to_graph(pe__bundle_replica_t *replica, void *user_data)
+{
+ if ((replica->remote != NULL) && (replica->container != NULL)
+ && pe__bundle_needs_remote_name(replica->remote)) {
+
+ /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
+ * run pacemaker-remoted inside, without needing a separate IP for
+ * the container. This is done by configuring the inner remote's
+ * connection host as the magic string "#uname", then
+ * replacing it with the underlying host when needed.
+ */
+ xmlNode *nvpair = get_xpath_object(XPATH_REMOTE, replica->remote->xml,
+ LOG_ERR);
+ const char *calculated_addr = NULL;
+
+ // Replace the value in replica->remote->xml (if appropriate)
+ calculated_addr = pe__add_bundle_remote_name(replica->remote,
+ replica->remote->cluster,
+ nvpair, "value");
+ if (calculated_addr != NULL) {
+ /* Since this is for the bundle as a resource, and not any
+ * particular action, replace the value in the default
+ * parameters (not evaluated for node). create_graph_action()
+ * will grab it from there to replace it in node-evaluated
+ * parameters.
+ */
+ GHashTable *params = pe_rsc_params(replica->remote,
+ NULL, replica->remote->cluster);
+
+ g_hash_table_replace(params,
+ strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
+ strdup(calculated_addr));
+ } else {
+ pcmk_resource_t *bundle = user_data;
+
+ /* The only way to get here is if the remote connection is
+ * neither currently running nor scheduled to run. That means we
+ * won't be doing any operations that require addr (only start
+ * requires it; we additionally use it to compare digests when
+ * unpacking status, promote, and migrate_from history, but
+ * that's already happened by this point).
+ */
+ pe_rsc_info(bundle,
+ "Unable to determine address for bundle %s "
+ "remote connection", bundle->id);
+ }
+ }
+ if (replica->ip != NULL) {
+ replica->ip->cmds->add_actions_to_graph(replica->ip);
+ }
+ if (replica->container != NULL) {
+ replica->container->cmds->add_actions_to_graph(replica->container);
+ }
+ if (replica->remote != NULL) {
+ replica->remote->cmds->add_actions_to_graph(replica->remote);
}
+ return true;
}
/*!
* \internal
- * \brief Add a resource's actions to the transition graph
+ * \brief Add a bundle resource's actions to the transition graph
*
- * \param[in,out] rsc Resource whose actions should be added
+ * \param[in,out] rsc Bundle resource whose actions should be added
*/
void
-pcmk__bundle_expand(pe_resource_t *rsc)
+pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(rsc != NULL, return);
+ pcmk_resource_t *bundled_resource = NULL;
- get_bundle_variant_data(bundle_data, rsc);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- if (bundle_data->child) {
- bundle_data->child->cmds->add_actions_to_graph(bundle_data->child);
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource != NULL) {
+ bundled_resource->cmds->add_actions_to_graph(bundled_resource);
}
+ pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, rsc);
+}
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+struct probe_data {
+ pcmk_resource_t *bundle; // Bundle being probed
+ pcmk_node_t *node; // Node to create probes on
+ bool any_created; // Whether any probes have been created
+};
- CRM_ASSERT(replica);
- if (replica->remote && replica->container
- && pe__bundle_needs_remote_name(replica->remote)) {
+/*!
+ * \internal
+ * \brief Order a bundle replica's start after another replica's probe
+ *
+ * \param[in,out] replica Replica to order start for
+ * \param[in,out] user_data Replica with probe to order after
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+order_replica_start_after(pe__bundle_replica_t *replica, void *user_data)
+{
+ pe__bundle_replica_t *probed_replica = user_data;
- /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
- * run pacemaker-remoted inside, without needing a separate IP for
- * the container. This is done by configuring the inner remote's
- * connection host as the magic string "#uname", then
- * replacing it with the underlying host when needed.
- */
- xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
- replica->remote->xml, LOG_ERR);
- const char *calculated_addr = NULL;
-
- // Replace the value in replica->remote->xml (if appropriate)
- calculated_addr = pe__add_bundle_remote_name(replica->remote,
- rsc->cluster,
- nvpair, "value");
- if (calculated_addr) {
- /* Since this is for the bundle as a resource, and not any
- * particular action, replace the value in the default
- * parameters (not evaluated for node). create_graph_action()
- * will grab it from there to replace it in node-evaluated
- * parameters.
- */
- GHashTable *params = pe_rsc_params(replica->remote,
- NULL, rsc->cluster);
-
- g_hash_table_replace(params,
- strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
- strdup(calculated_addr));
- } else {
- /* The only way to get here is if the remote connection is
- * neither currently running nor scheduled to run. That means we
- * won't be doing any operations that require addr (only start
- * requires it; we additionally use it to compare digests when
- * unpacking status, promote, and migrate_from history, but
- * that's already happened by this point).
- */
- crm_info("Unable to determine address for bundle %s remote connection",
- rsc->id);
- }
- }
- if (replica->ip) {
- replica->ip->cmds->add_actions_to_graph(replica->ip);
- }
- if (replica->container) {
- replica->container->cmds->add_actions_to_graph(replica->container);
+ if ((replica == probed_replica) || (replica->container == NULL)) {
+ return true;
+ }
+ pcmk__new_ordering(probed_replica->container,
+ pcmk__op_key(probed_replica->container->id,
+ PCMK_ACTION_MONITOR, 0),
+ NULL, replica->container,
+ pcmk__op_key(replica->container->id, PCMK_ACTION_START,
+ 0),
+ NULL, pcmk__ar_ordered|pcmk__ar_if_on_same_node,
+ replica->container->cluster);
+ return true;
+}
+
+/*!
+ * \internal
+ * \brief Create probes for a bundle replica's resources
+ *
+ * \param[in,out] replica Replica to create probes for
+ * \param[in,out] user_data struct probe_data
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+create_replica_probes(pe__bundle_replica_t *replica, void *user_data)
+{
+ struct probe_data *probe_data = user_data;
+
+ if ((replica->ip != NULL)
+ && replica->ip->cmds->create_probe(replica->ip, probe_data->node)) {
+ probe_data->any_created = true;
+ }
+ if ((replica->child != NULL)
+ && pe__same_node(probe_data->node, replica->node)
+ && replica->child->cmds->create_probe(replica->child,
+ probe_data->node)) {
+ probe_data->any_created = true;
+ }
+ if ((replica->container != NULL)
+ && replica->container->cmds->create_probe(replica->container,
+ probe_data->node)) {
+ probe_data->any_created = true;
+
+ /* If we're limited to one replica per host (due to
+ * the lack of an IP range probably), then we don't
+ * want any of our peer containers starting until
+ * we've established that no other copies are already
+ * running.
+ *
+ * Partly this is to ensure that the maximum replicas per host is
+ * observed, but also to ensure that the containers
+ * don't fail to start because the necessary port
+ * mappings (which won't include an IP for uniqueness)
+ * are already taken
+ */
+ if (probe_data->bundle->fns->max_per_node(probe_data->bundle) == 1) {
+ pe__foreach_bundle_replica(probe_data->bundle,
+ order_replica_start_after, replica);
}
- if (replica->remote) {
- replica->remote->cmds->add_actions_to_graph(replica->remote);
+ }
+ if ((replica->container != NULL) && (replica->remote != NULL)
+ && replica->remote->cmds->create_probe(replica->remote,
+ probe_data->node)) {
+ /* Do not probe the remote resource until we know where the container is
+ * running. This is required for REMOTE_CONTAINER_HACK to correctly
+ * probe remote resources.
+ */
+ char *probe_uuid = pcmk__op_key(replica->remote->id,
+ PCMK_ACTION_MONITOR, 0);
+ pcmk_action_t *probe = find_first_action(replica->remote->actions,
+ probe_uuid, NULL,
+ probe_data->node);
+
+ free(probe_uuid);
+ if (probe != NULL) {
+ probe_data->any_created = true;
+ pe_rsc_trace(probe_data->bundle, "Ordering %s probe on %s",
+ replica->remote->id, pe__node_name(probe_data->node));
+ pcmk__new_ordering(replica->container,
+ pcmk__op_key(replica->container->id,
+ PCMK_ACTION_START, 0),
+ NULL, replica->remote, NULL, probe,
+ pcmk__ar_nested_remote_probe,
+ probe_data->bundle->cluster);
}
}
+ return true;
}
/*!
* \internal
*
- * \brief Schedule any probes needed for a resource on a node
+ * \brief Schedule any probes needed for a bundle resource on a node
*
- * \param[in,out] rsc Resource to create probe for
+ * \param[in,out] rsc Bundle resource to create probes for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool
-pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node)
+pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- bool any_created = false;
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(rsc != NULL, return false);
-
- get_bundle_variant_data(bundle_data, rsc);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- CRM_ASSERT(replica);
- if ((replica->ip != NULL)
- && replica->ip->cmds->create_probe(replica->ip, node)) {
- any_created = true;
- }
- if ((replica->child != NULL) && (node->details == replica->node->details)
- && replica->child->cmds->create_probe(replica->child, node)) {
- any_created = true;
- }
- if ((replica->container != NULL)
- && replica->container->cmds->create_probe(replica->container,
- node)) {
- any_created = true;
-
- /* If we're limited to one replica per host (due to
- * the lack of an IP range probably), then we don't
- * want any of our peer containers starting until
- * we've established that no other copies are already
- * running.
- *
- * Partly this is to ensure that nreplicas_per_host is
- * observed, but also to ensure that the containers
- * don't fail to start because the necessary port
- * mappings (which won't include an IP for uniqueness)
- * are already taken
- */
+ struct probe_data probe_data = { rsc, node, false };
- for (GList *tIter = bundle_data->replicas;
- tIter && (bundle_data->nreplicas_per_host == 1);
- tIter = tIter->next) {
- pe__bundle_replica_t *other = tIter->data;
-
- if ((other != replica) && (other != NULL)
- && (other->container != NULL)) {
-
- pcmk__new_ordering(replica->container,
- pcmk__op_key(replica->container->id, RSC_STATUS, 0),
- NULL, other->container,
- pcmk__op_key(other->container->id, RSC_START, 0),
- NULL,
- pe_order_optional|pe_order_same_node,
- rsc->cluster);
- }
- }
- }
- if ((replica->container != NULL) && (replica->remote != NULL)
- && replica->remote->cmds->create_probe(replica->remote, node)) {
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
+ pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data);
+ return probe_data.any_created;
+}
- /* Do not probe the remote resource until we know where the
- * container is running. This is required for REMOTE_CONTAINER_HACK
- * to correctly probe remote resources.
- */
- char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
- 0);
- pe_action_t *probe = find_first_action(replica->remote->actions,
- probe_uuid, NULL, node);
-
- free(probe_uuid);
- if (probe != NULL) {
- any_created = true;
- crm_trace("Ordering %s probe on %s",
- replica->remote->id, pe__node_name(node));
- pcmk__new_ordering(replica->container,
- pcmk__op_key(replica->container->id, RSC_START, 0),
- NULL, replica->remote, NULL, probe,
- pe_order_probe, rsc->cluster);
- }
- }
+/*!
+ * \internal
+ * \brief Output actions for one bundle replica
+ *
+ * \param[in,out] replica Replica to output actions for
+ * \param[in] user_data Unused
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+output_replica_actions(pe__bundle_replica_t *replica, void *user_data)
+{
+ if (replica->ip != NULL) {
+ replica->ip->cmds->output_actions(replica->ip);
+ }
+ if (replica->container != NULL) {
+ replica->container->cmds->output_actions(replica->container);
}
- return any_created;
+ if (replica->remote != NULL) {
+ replica->remote->cmds->output_actions(replica->remote);
+ }
+ if (replica->child != NULL) {
+ replica->child->cmds->output_actions(replica->child);
+ }
+ return true;
}
+/*!
+ * \internal
+ * \brief Output a summary of scheduled actions for a bundle resource
+ *
+ * \param[in,out] rsc Bundle resource to output actions for
+ */
void
-pcmk__output_bundle_actions(pe_resource_t *rsc)
+pcmk__output_bundle_actions(pcmk_resource_t *rsc)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(rsc != NULL, return);
-
- get_bundle_variant_data(bundle_data, rsc);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- CRM_ASSERT(replica);
- if (replica->ip != NULL) {
- replica->ip->cmds->output_actions(replica->ip);
- }
- if (replica->container != NULL) {
- replica->container->cmds->output_actions(replica->container);
- }
- if (replica->remote != NULL) {
- replica->remote->cmds->output_actions(replica->remote);
- }
- if (replica->child != NULL) {
- replica->child->cmds->output_actions(replica->child);
- }
- }
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
+ pe__foreach_bundle_replica(rsc, output_replica_actions, NULL);
}
-// Bundle implementation of resource_alloc_functions_t:add_utilization()
+// Bundle implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__bundle_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+pcmk__bundle_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
- pe__bundle_replica_t *replica = NULL;
+ pcmk_resource_t *container = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
- return;
- }
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- get_bundle_variant_data(bundle_data, rsc);
- if (bundle_data->replicas == NULL) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
@@ -861,16 +1042,17 @@ pcmk__bundle_add_utilization(const pe_resource_t *rsc,
* is sufficient for any. Only the implicit container resource can have
* utilization values.
*/
- replica = (pe__bundle_replica_t *) bundle_data->replicas->data;
- if (replica->container != NULL) {
- replica->container->cmds->add_utilization(replica->container, orig_rsc,
- all_rscs, utilization);
+ container = pe__first_container(rsc);
+ if (container != NULL) {
+ container->cmds->add_utilization(container, orig_rsc, all_rscs,
+ utilization);
}
}
-// Bundle implementation of resource_alloc_functions_t:shutdown_lock()
+// Bundle implementation of pcmk_assignment_methods_t:shutdown_lock()
void
-pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
+pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc)
{
- return; // Bundles currently don't support shutdown locks
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
+ // Bundles currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index 934f512..7b422d8 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -18,200 +18,222 @@
* \internal
* \brief Assign a clone resource's instances to nodes
*
- * \param[in,out] rsc Clone resource to assign
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Clone resource to assign
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
+ * can't be assigned to a node, set the
+ * descendant's next role to stopped and update
+ * existing actions
*
* \return NULL (clones are not assigned to a single node)
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__clone_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
+ GList *colocations = NULL;
+
CRM_ASSERT(pe_rsc_is_clone(rsc));
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return NULL; // Assignment has already been done
}
// Detect assignment loops
- if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
- pe__set_resource_flags(rsc, pe_rsc_allocating);
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
// If this clone is promotable, consider nodes' promotion scores
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__add_promotion_scores(rsc);
}
- /* If this clone is colocated with any other resources, assign those first.
- * Since the this_with_colocations() method boils down to a copy of rsc_cons
- * for clones, we can use that here directly for efficiency.
- */
- for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
+ // If this clone is colocated with any other resources, assign those first
+ colocations = pcmk__this_with_colocations(rsc);
+ for (GList *iter = colocations; iter != NULL; iter = iter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) iter->data;
pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first",
rsc->id, constraint->id, constraint->primary->id);
- constraint->primary->cmds->assign(constraint->primary, prefer);
+ constraint->primary->cmds->assign(constraint->primary, prefer,
+ stop_if_fail);
}
+ g_list_free(colocations);
- /* If any resources are colocated with this one, consider their preferences.
- * Because the with_this_colocations() method boils down to a copy of
- * rsc_cons_lhs for clones, we can use that here directly for efficiency.
- */
- g_list_foreach(rsc->rsc_cons_lhs, pcmk__add_dependent_scores, rsc);
+ // If any resources are colocated with this one, consider their preferences
+ colocations = pcmk__with_this_colocations(rsc);
+ g_list_foreach(colocations, pcmk__add_dependent_scores, rsc);
+ g_list_free(colocations);
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
pcmk__assign_instances(rsc, rsc->children, pe__clone_max(rsc),
pe__clone_node_max(rsc));
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__set_instance_roles(rsc);
}
- pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unassigned|pcmk_rsc_assigning);
pe_rsc_trace(rsc, "Assigned clone %s", rsc->id);
return NULL;
}
-static pe_action_t *
-find_rsc_action(pe_resource_t *rsc, const char *task)
+/*!
+ * \internal
+ * \brief Create all actions needed for a given clone resource
+ *
+ * \param[in,out] rsc Clone resource to create actions for
+ */
+void
+pcmk__clone_create_actions(pcmk_resource_t *rsc)
{
- pe_action_t *match = NULL;
- GList *actions = pe__resource_actions(rsc, NULL, task, FALSE);
-
- for (GList *item = actions; item != NULL; item = item->next) {
- pe_action_t *op = (pe_action_t *) item->data;
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
- if (!pcmk_is_set(op->flags, pe_action_optional)) {
- if (match != NULL) {
- // More than one match, don't return any
- match = NULL;
- break;
- }
- match = op;
- }
+ pe_rsc_trace(rsc, "Creating actions for clone %s", rsc->id);
+ pcmk__create_instance_actions(rsc, rsc->children);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
+ pcmk__create_promotable_actions(rsc);
}
- g_list_free(actions);
- return match;
}
/*!
* \internal
- * \brief Order starts and stops of an ordered clone's instances
+ * \brief Create implicit constraints needed for a clone resource
*
- * \param[in,out] rsc Clone resource
+ * \param[in,out] rsc Clone resource to create implicit constraints for
*/
-static void
-order_instance_starts_stops(pe_resource_t *rsc)
+void
+pcmk__clone_internal_constraints(pcmk_resource_t *rsc)
{
- pe_action_t *last_stop = NULL;
- pe_action_t *last_start = NULL;
+ bool ordered = false;
- // Instances must be ordered by ascending instance number, so sort them
- rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
+ pe_rsc_trace(rsc, "Creating internal constraints for clone %s", rsc->id);
+
+ // Restart ordering: Stop -> stopped -> start -> started
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_unrunnable_first_blocks);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_unrunnable_first_blocks);
+
+ // Demoted -> stop and started -> promote
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_STOP,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_RUNNING,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_unrunnable_first_blocks);
+ }
+
+ ordered = pe__clone_is_ordered(rsc);
+ if (ordered) {
+ /* Ordered clone instances must start and stop by instance number. The
+ * instances might have been previously shuffled for assignment or
+ * promotion purposes, so re-sort them.
+ */
+ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ }
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
- pe_action_t *action = NULL;
-
- // Order this instance's stop after previous instance's stop
- // @TODO: Should instances be stopped in reverse order instead?
- action = find_rsc_action(child, RSC_STOP);
- if (action != NULL) {
- if (last_stop != NULL) {
- order_actions(action, last_stop, pe_order_optional);
- }
- last_stop = action;
- }
-
- // Order this instance's start after previous instance's start
- action = find_rsc_action(child, RSC_START);
- if (action != NULL) {
- if (last_start != NULL) {
- order_actions(last_start, action, pe_order_optional);
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
+
+ instance->cmds->internal_constraints(instance);
+
+ // Start clone -> start instance -> clone started
+ pcmk__order_starts(rsc, instance, pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(instance, PCMK_ACTION_START,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_first_implies_then_graphed);
+
+ // Stop clone -> stop instance -> clone stopped
+ pcmk__order_stops(rsc, instance, pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(instance, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_first_implies_then_graphed);
+
+ /* Instances of ordered clones must be started and stopped by instance
+ * number. Since only some instances may be starting or stopping, order
+ * each instance relative to every later instance.
+ */
+ if (ordered) {
+ for (GList *later = iter->next;
+ later != NULL; later = later->next) {
+ pcmk__order_starts(instance, (pcmk_resource_t *) later->data,
+ pcmk__ar_ordered);
+ pcmk__order_stops((pcmk_resource_t *) later->data, instance,
+ pcmk__ar_ordered);
}
- last_start = action;
}
}
-}
-
-void
-clone_create_actions(pe_resource_t *rsc)
-{
- pe_rsc_debug(rsc, "Creating actions for clone %s", rsc->id);
- pcmk__create_instance_actions(rsc, rsc->children);
- if (pe__clone_is_ordered(rsc)) {
- order_instance_starts_stops(rsc);
- }
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- pcmk__create_promotable_actions(rsc);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
+ pcmk__order_promotable_instances(rsc);
}
}
-void
-clone_internal_constraints(pe_resource_t *rsc)
+/*!
+ * \internal
+ * \brief Check whether colocated resources can be interleaved
+ *
+ * \param[in] colocation Colocation constraint with clone as primary
+ *
+ * \return true if colocated resources can be interleaved, otherwise false
+ */
+static bool
+can_interleave(const pcmk__colocation_t *colocation)
{
- pe_resource_t *last_rsc = NULL;
- GList *gIter;
- bool ordered = pe__clone_is_ordered(rsc);
-
- pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id);
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
- pe_order_runnable_left);
- pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
- pe_order_runnable_left);
+ const pcmk_resource_t *dependent = colocation->dependent;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
- pe_order_runnable_left);
+ // Only colocations between clone or bundle resources use interleaving
+ if (dependent->variant <= pcmk_rsc_variant_group) {
+ return false;
}
- if (ordered) {
- /* we have to maintain a consistent sorted child list when building order constraints */
- rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ // Only the dependent needs to be marked for interleaving
+ if (!crm_is_true(g_hash_table_lookup(dependent->meta,
+ XML_RSC_ATTR_INTERLEAVE))) {
+ return false;
}
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
-
- child_rsc->cmds->internal_constraints(child_rsc);
-
- pcmk__order_starts(rsc, child_rsc,
- pe_order_runnable_left|pe_order_implies_first_printed);
- pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
- pe_order_implies_then_printed);
- if (ordered && (last_rsc != NULL)) {
- pcmk__order_starts(last_rsc, child_rsc, pe_order_optional);
- }
-
- pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed);
- pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
- pe_order_implies_then_printed);
- if (ordered && (last_rsc != NULL)) {
- pcmk__order_stops(child_rsc, last_rsc, pe_order_optional);
- }
- last_rsc = child_rsc;
- }
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- pcmk__order_promotable_instances(rsc);
+ /* @TODO Do we actually care about multiple primary instances sharing a
+ * dependent instance?
+ */
+ if (dependent->fns->max_per_node(dependent)
+ != colocation->primary->fns->max_per_node(colocation->primary)) {
+ pcmk__config_err("Cannot interleave %s and %s because they do not "
+ "support the same number of instances per node",
+ dependent->id, colocation->primary->id);
+ return false;
}
+
+ return true;
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -220,289 +242,312 @@ clone_internal_constraints(pe_resource_t *rsc)
* \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__clone_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
- GList *gIter = NULL;
- gboolean do_interleave = FALSE;
- const char *interleave_s = NULL;
+ const GList *iter = NULL;
/* This should never be called for the clone itself as a dependent. Instead,
* we add its colocation constraints to its instances and call the
- * apply_coloc_score() for the instances as dependents.
+ * apply_coloc_score() method for the instances as dependents.
*/
CRM_ASSERT(!for_dependent);
- CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
- return);
- CRM_CHECK(dependent->variant == pe_native, return);
+ CRM_ASSERT((colocation != NULL) && pe_rsc_is_clone(primary)
+ && (dependent != NULL)
+ && (dependent->variant == pcmk_rsc_variant_primitive));
- pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
- colocation->id, dependent->id, primary->id, colocation->score);
+ if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
+ pe_rsc_trace(primary,
+ "Delaying processing colocation %s "
+ "because cloned primary %s is still provisional",
+ colocation->id, primary->id);
+ return;
+ }
- if (pcmk_is_set(primary->flags, pe_rsc_promotable)) {
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- // We haven't placed the primary yet, so we can't apply colocation
- pe_rsc_trace(primary, "%s is still provisional", primary->id);
- return;
+ pe_rsc_trace(primary, "Processing colocation %s (%s with clone %s @%s)",
+ colocation->id, dependent->id, primary->id,
+ pcmk_readable_score(colocation->score));
- } else if (colocation->primary_role == RSC_ROLE_UNKNOWN) {
- // This isn't a role-specfic colocation, so handle normally
- pe_rsc_trace(primary, "Handling %s as a clone colocation",
- colocation->id);
+ // Apply role-specific colocations
+ if (pcmk_is_set(primary->flags, pcmk_rsc_promotable)
+ && (colocation->primary_role != pcmk_role_unknown)) {
- } else if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
- // We're placing the dependent
+ if (pcmk_is_set(dependent->flags, pcmk_rsc_unassigned)) {
+ // We're assigning the dependent to a node
pcmk__update_dependent_with_promotable(primary, dependent,
colocation);
return;
+ }
- } else if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
- // We're choosing roles for the dependent
+ if (colocation->dependent_role == pcmk_role_promoted) {
+ // We're choosing a role for the dependent
pcmk__update_promotable_dependent_priority(primary, dependent,
colocation);
return;
}
}
- // Only the dependent needs to be marked for interleave
- interleave_s = g_hash_table_lookup(colocation->dependent->meta,
- XML_RSC_ATTR_INTERLEAVE);
- if (crm_is_true(interleave_s)
- && (colocation->dependent->variant > pe_group)) {
- /* @TODO Do we actually care about multiple primary copies sharing a
- * dependent copy anymore?
- */
- if (copies_per_node(colocation->dependent) != copies_per_node(colocation->primary)) {
- pcmk__config_err("Cannot interleave %s and %s because they do not "
- "support the same number of instances per node",
- colocation->dependent->id,
- colocation->primary->id);
-
- } else {
- do_interleave = TRUE;
- }
- }
-
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- pe_rsc_trace(primary, "%s is still provisional", primary->id);
- return;
-
- } else if (do_interleave) {
- pe_resource_t *primary_instance = NULL;
+ // Apply interleaved colocations
+ if (can_interleave(colocation)) {
+ const pcmk_resource_t *primary_instance = NULL;
primary_instance = pcmk__find_compatible_instance(dependent, primary,
- RSC_ROLE_UNKNOWN,
+ pcmk_role_unknown,
false);
if (primary_instance != NULL) {
- pe_rsc_debug(primary, "Pairing %s with %s",
+ pe_rsc_debug(primary, "Interleaving %s with %s",
dependent->id, primary_instance->id);
dependent->cmds->apply_coloc_score(dependent, primary_instance,
colocation, true);
} else if (colocation->score >= INFINITY) {
- crm_notice("Cannot pair %s with instance of %s",
- dependent->id, primary->id);
- pcmk__assign_resource(dependent, NULL, true);
+ crm_notice("%s cannot run because it cannot interleave with "
+ "any instance of %s", dependent->id, primary->id);
+ pcmk__assign_resource(dependent, NULL, true, true);
} else {
- pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
+ pe_rsc_debug(primary,
+ "%s will not colocate with %s "
+ "because no instance can interleave with it",
dependent->id, primary->id);
}
return;
+ }
- } else if (colocation->score >= INFINITY) {
- GList *affected_nodes = NULL;
+ // Apply mandatory colocations
+ if (colocation->score >= INFINITY) {
+ GList *primary_nodes = NULL;
- gIter = primary->children;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
- pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
+ // Dependent can run only where primary will have unblocked instances
+ for (iter = primary->children; iter != NULL; iter = iter->next) {
+ const pcmk_resource_t *instance = iter->data;
+ pcmk_node_t *chosen = instance->fns->location(instance, NULL, 0);
- if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
+ if ((chosen != NULL)
+ && !is_set_recursive(instance, pcmk_rsc_blocked, TRUE)) {
pe_rsc_trace(primary, "Allowing %s: %s %d",
colocation->id, pe__node_name(chosen),
chosen->weight);
- affected_nodes = g_list_prepend(affected_nodes, chosen);
+ primary_nodes = g_list_prepend(primary_nodes, chosen);
}
}
-
- node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE);
- g_list_free(affected_nodes);
+ pcmk__colocation_intersect_nodes(dependent, primary, colocation,
+ primary_nodes, false);
+ g_list_free(primary_nodes);
return;
}
- gIter = primary->children;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ // Apply optional colocations
+ for (iter = primary->children; iter != NULL; iter = iter->next) {
+ const pcmk_resource_t *instance = iter->data;
- child_rsc->cmds->apply_coloc_score(dependent, child_rsc, colocation,
- false);
+ instance->cmds->apply_coloc_score(dependent, instance, colocation,
+ false);
}
}
-// Clone implementation of resource_alloc_functions_t:with_this_colocations()
+// Clone implementation of pcmk_assignment_methods_t:with_this_colocations()
void
-pcmk__with_clone_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__with_clone_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
- if (rsc == orig_rsc) { // Colocations are wanted for clone itself
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
- } else {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, true);
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list);
}
}
-// Clone implementation of resource_alloc_functions_t:this_with_colocations()
+// Clone implementation of pcmk_assignment_methods_t:this_with_colocations()
void
-pcmk__clone_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__clone_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
- if (rsc == orig_rsc) { // Colocations are wanted for clone itself
- pcmk__add_this_with_list(list, rsc->rsc_cons);
- } else {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, false);
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list);
}
}
-enum pe_action_flags
-clone_action_flags(pe_action_t *action, const pe_node_t *node)
+/*!
+ * \internal
+ * \brief Return action flags for a given clone resource action
+ *
+ * \param[in,out] action Action to get flags for
+ * \param[in] node If not NULL, limit effects to this node
+ *
+ * \return Flags appropriate to \p action on \p node
+ */
+uint32_t
+pcmk__clone_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
+ CRM_ASSERT((action != NULL) && pe_rsc_is_clone(action->rsc));
+
return pcmk__collective_action_flags(action, action->rsc->children, node);
}
+/*!
+ * \internal
+ * \brief Apply a location constraint to a clone resource's allowed node scores
+ *
+ * \param[in,out] rsc Clone resource to apply constraint to
+ * \param[in,out] location Location constraint to apply
+ */
void
-clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
+pcmk__clone_apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
- GList *gIter = rsc->children;
+ CRM_CHECK((location != NULL) && pe_rsc_is_clone(rsc), return);
- pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
+ pcmk__apply_location(rsc, location);
- pcmk__apply_location(rsc, constraint);
-
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
- child_rsc->cmds->apply_location(child_rsc, constraint);
+ instance->cmds->apply_location(instance, location);
}
}
+// GFunc wrapper for calling the action_flags() resource method
+static void
+call_action_flags(gpointer data, gpointer user_data)
+{
+ pcmk_resource_t *rsc = user_data;
+
+ rsc->cmds->action_flags((pcmk_action_t *) data, NULL);
+}
+
/*!
* \internal
- * \brief Add a resource's actions to the transition graph
+ * \brief Add a clone resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void
-clone_expand(pe_resource_t *rsc)
+pcmk__clone_add_actions_to_graph(pcmk_resource_t *rsc)
{
- GList *gIter = NULL;
-
- g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL);
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
+ g_list_foreach(rsc->actions, call_action_flags, rsc);
pe__create_clone_notifications(rsc);
- /* Now that the notifcations have been created we can expand the children */
-
- gIter = rsc->children;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
child_rsc->cmds->add_actions_to_graph(child_rsc);
}
pcmk__add_rsc_actions_to_graph(rsc);
-
- /* The notifications are in the graph now, we can destroy the notify_data */
pe__free_clone_notification_data(rsc);
}
-// Check whether a resource or any of its children is known on node
+/*!
+ * \internal
+ * \brief Check whether a resource or any children have been probed on a node
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] node Node to check
+ *
+ * \return true if \p node is in the known_on table of \p rsc or any of its
+ * children, otherwise false
+ */
static bool
-rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node)
+rsc_probed_on(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- if (rsc->children) {
+ if (rsc->children != NULL) {
for (GList *child_iter = rsc->children; child_iter != NULL;
child_iter = child_iter->next) {
- pe_resource_t *child = (pe_resource_t *) child_iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) child_iter->data;
- if (rsc_known_on(child, node)) {
- return TRUE;
+ if (rsc_probed_on(child, node)) {
+ return true;
}
}
+ return false;
+ }
- } else if (rsc->known_on) {
+ if (rsc->known_on != NULL) {
GHashTableIter iter;
- pe_node_t *known_node = NULL;
+ pcmk_node_t *known_node = NULL;
g_hash_table_iter_init(&iter, rsc->known_on);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
- if (node->details == known_node->details) {
- return TRUE;
+ if (pe__same_node(node, known_node)) {
+ return true;
}
}
}
- return FALSE;
+ return false;
}
-// Look for an instance of clone that is known on node
-static pe_resource_t *
-find_instance_on(const pe_resource_t *clone, const pe_node_t *node)
+/*!
+ * \internal
+ * \brief Find clone instance that has been probed on given node
+ *
+ * \param[in] clone Clone resource to check
+ * \param[in] node Node to check
+ *
+ * \return Instance of \p clone that has been probed on \p node if any,
+ * otherwise NULL
+ */
+static pcmk_resource_t *
+find_probed_instance_on(const pcmk_resource_t *clone, const pcmk_node_t *node)
{
- for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
- if (rsc_known_on(child, node)) {
- return child;
+ if (rsc_probed_on(instance, node)) {
+ return instance;
}
}
return NULL;
}
-// For anonymous clones, only a single instance needs to be probed
+/*!
+ * \internal
+ * \brief Probe an anonymous clone on a node
+ *
+ * \param[in,out] clone Anonymous clone to probe
+ * \param[in,out] node Node to probe \p clone on
+ */
static bool
-probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
- pe_working_set_t *data_set)
+probe_anonymous_clone(pcmk_resource_t *clone, pcmk_node_t *node)
{
- // First, check if we probed an instance on this node last time
- pe_resource_t *child = find_instance_on(rsc, node);
+ // Check whether we already probed an instance on this node
+ pcmk_resource_t *child = find_probed_instance_on(clone, node);
// Otherwise, check if we plan to start an instance on this node
- if (child == NULL) {
- for (GList *child_iter = rsc->children; child_iter && !child;
- child_iter = child_iter->next) {
-
- pe_node_t *local_node = NULL;
- pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data;
-
- if (child_rsc) { /* make clang analyzer happy */
- local_node = child_rsc->fns->location(child_rsc, NULL, FALSE);
- if (local_node && (local_node->details == node->details)) {
- child = child_rsc;
- }
- }
+ for (GList *iter = clone->children; (iter != NULL) && (child == NULL);
+ iter = iter->next) {
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
+ const pcmk_node_t *instance_node = NULL;
+
+ instance_node = instance->fns->location(instance, NULL, 0);
+ if (pe__same_node(instance_node, node)) {
+ child = instance;
}
}
// Otherwise, use the first clone instance
if (child == NULL) {
- child = rsc->children->data;
+ child = clone->children->data;
}
- CRM_ASSERT(child);
+
+ // Anonymous clones only need to probe a single instance
return child->cmds->create_probe(child, node);
}
/*!
* \internal
- *
* \brief Schedule any probes needed for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
@@ -511,70 +556,87 @@ probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
* \return true if any probe was created, otherwise false
*/
bool
-clone_create_probe(pe_resource_t *rsc, pe_node_t *node)
+pcmk__clone_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- CRM_ASSERT(rsc);
-
- rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
- if (rsc->children == NULL) {
- pe_warn("Clone %s has no children", rsc->id);
- return false;
- }
+ CRM_ASSERT((node != NULL) && pe_rsc_is_clone(rsc));
if (rsc->exclusive_discover) {
- pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
- if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) {
- /* exclusive discover is enabled and this node is not marked
- * as a node this resource should be discovered on
- *
- * remove the node from allowed_nodes so that the
- * notification contains only nodes that we might ever run
- * on
+ /* The clone is configured to be probed only where a location constraint
+ * exists with resource-discovery set to exclusive.
+ *
+ * This check is not strictly necessary here since the instance's
+ * create_probe() method would also check, but doing it here is more
+ * efficient (especially for unique clones with a large number of
+ * instances), and affects the CRM_meta_notify_available_uname variable
+ * passed with notify actions.
+ */
+ pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id);
+
+ if ((allowed == NULL)
+ || (allowed->rsc_discover_mode != pcmk_probe_exclusive)) {
+ /* This node is not marked for resource discovery. Remove it from
+ * allowed_nodes so that notifications contain only nodes that the
+ * clone can possibly run on.
*/
+ pe_rsc_trace(rsc,
+ "Skipping probe for %s on %s because resource has "
+ "exclusive discovery but is not allowed on node",
+ rsc->id, pe__node_name(node));
g_hash_table_remove(rsc->allowed_nodes, node->details->id);
-
- /* Bit of a shortcut - might as well take it */
return false;
}
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
return pcmk__probe_resource_list(rsc->children, node);
} else {
- return probe_anonymous_clone(rsc, node, rsc->cluster);
+ return probe_anonymous_clone(rsc, node);
}
}
+/*!
+ * \internal
+ * \brief Add meta-attributes relevant to transition graph actions to XML
+ *
+ * Add clone-specific meta-attributes needed for transition graph actions.
+ *
+ * \param[in] rsc Clone resource whose meta-attributes should be added
+ * \param[in,out] xml Transition graph action attributes XML to add to
+ */
void
-clone_append_meta(const pe_resource_t *rsc, xmlNode *xml)
+pcmk__clone_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
+ CRM_ASSERT(pe_rsc_is_clone(rsc) && (xml != NULL));
+
name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
- crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
+ crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pcmk_rsc_unique));
free(name);
name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
- crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
+ crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pcmk_rsc_notify));
free(name);
- name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
+ name = crm_meta_name(PCMK_META_CLONE_MAX);
crm_xml_add_int(xml, name, pe__clone_max(rsc));
free(name);
- name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
+ name = crm_meta_name(PCMK_META_CLONE_NODE_MAX);
crm_xml_add_int(xml, name, pe__clone_node_max(rsc));
free(name);
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
int promoted_max = pe__clone_promoted_max(rsc);
int promoted_node_max = pe__clone_promoted_node_max(rsc);
- name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
+ name = crm_meta_name(PCMK_META_PROMOTED_MAX);
crm_xml_add_int(xml, name, promoted_max);
free(name);
- name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
+ name = crm_meta_name(PCMK_META_PROMOTED_NODE_MAX);
crm_xml_add_int(xml, name, promoted_node_max);
free(name);
@@ -591,22 +653,25 @@ clone_append_meta(const pe_resource_t *rsc, xmlNode *xml)
}
}
-// Clone implementation of resource_alloc_functions_t:add_utilization()
+// Clone implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__clone_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+pcmk__clone_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
bool existing = false;
- pe_resource_t *child = NULL;
+ pcmk_resource_t *child = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ CRM_ASSERT(pe_rsc_is_clone(rsc) && (orig_rsc != NULL)
+ && (utilization != NULL));
+
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
// Look for any child already existing in the list
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- child = (pe_resource_t *) iter->data;
+ child = (pcmk_resource_t *) iter->data;
if (g_list_find(all_rscs, child)) {
existing = true; // Keep checking remaining children
} else {
@@ -614,7 +679,7 @@ pcmk__clone_add_utilization(const pe_resource_t *rsc,
for (GList *member_iter = child->children; member_iter != NULL;
member_iter = member_iter->next) {
- pe_resource_t *member = (pe_resource_t *) member_iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) member_iter->data;
if (g_list_find(all_rscs, member) != NULL) {
// Add *child's* utilization, not group member's
@@ -629,15 +694,16 @@ pcmk__clone_add_utilization(const pe_resource_t *rsc,
if (!existing && (rsc->children != NULL)) {
// If nothing was found, still add first child's utilization
- child = (pe_resource_t *) rsc->children->data;
+ child = (pcmk_resource_t *) rsc->children->data;
child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
}
}
-// Clone implementation of resource_alloc_functions_t:shutdown_lock()
+// Clone implementation of pcmk_assignment_methods_t:shutdown_lock()
void
-pcmk__clone_shutdown_lock(pe_resource_t *rsc)
+pcmk__clone_shutdown_lock(pcmk_resource_t *rsc)
{
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
return; // Clones currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c
index eeef4f1..733d70a 100644
--- a/lib/pacemaker/pcmk_sched_colocation.c
+++ b/lib/pacemaker/pcmk_sched_colocation.c
@@ -13,6 +13,7 @@
#include <glib.h>
#include <crm/crm.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
@@ -21,46 +22,69 @@
#include "crm/msg_xml.h"
#include "libpacemaker_private.h"
-#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \
- __rsc = pcmk__find_constraint_resource(data_set->resources, __name); \
- if (__rsc == NULL) { \
- pcmk__config_err("%s: No resource found for %s", __set, __name); \
- return; \
- } \
- } while(0)
-
// Used to temporarily mark a node as unusable
#define INFINITY_HACK (INFINITY * -100)
+/*!
+ * \internal
+ * \brief Compare two colocations according to priority
+ *
+ * Compare two colocations according to the order in which they should be
+ * considered, based on either their dependent resources or their primary
+ * resources -- preferring (in order):
+ * * Colocation that is not \c NULL
+ * * Colocation whose resource has higher priority
+ * * Colocation whose resource is of a higher-level variant
+ * (bundle > clone > group > primitive)
+ * * Colocation whose resource is promotable, if both are clones
+ * * Colocation whose resource has lower ID in lexicographic order
+ *
+ * \param[in] colocation1 First colocation to compare
+ * \param[in] colocation2 Second colocation to compare
+ * \param[in] dependent If \c true, compare colocations by dependent
+ * priority; otherwise compare them by primary priority
+ *
+ * \return A negative number if \p colocation1 should be considered first,
+ * a positive number if \p colocation2 should be considered first,
+ * or 0 if order doesn't matter
+ */
static gint
-cmp_dependent_priority(gconstpointer a, gconstpointer b)
+cmp_colocation_priority(const pcmk__colocation_t *colocation1,
+ const pcmk__colocation_t *colocation2, bool dependent)
{
- const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a;
- const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b;
+ const pcmk_resource_t *rsc1 = NULL;
+ const pcmk_resource_t *rsc2 = NULL;
- if (a == NULL) {
+ if (colocation1 == NULL) {
return 1;
}
- if (b == NULL) {
+ if (colocation2 == NULL) {
return -1;
}
- CRM_ASSERT(rsc_constraint1->dependent != NULL);
- CRM_ASSERT(rsc_constraint1->primary != NULL);
+ if (dependent) {
+ rsc1 = colocation1->dependent;
+ rsc2 = colocation2->dependent;
+ CRM_ASSERT(colocation1->primary != NULL);
+ } else {
+ rsc1 = colocation1->primary;
+ rsc2 = colocation2->primary;
+ CRM_ASSERT(colocation1->dependent != NULL);
+ }
+ CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
- if (rsc_constraint1->dependent->priority > rsc_constraint2->dependent->priority) {
+ if (rsc1->priority > rsc2->priority) {
return -1;
}
-
- if (rsc_constraint1->dependent->priority < rsc_constraint2->dependent->priority) {
+ if (rsc1->priority < rsc2->priority) {
return 1;
}
- /* Process clones before primitives and groups */
- if (rsc_constraint1->dependent->variant > rsc_constraint2->dependent->variant) {
+ // Process clones before primitives and groups
+ if (rsc1->variant > rsc2->variant) {
return -1;
}
- if (rsc_constraint1->dependent->variant < rsc_constraint2->dependent->variant) {
+ if (rsc1->variant < rsc2->variant) {
return 1;
}
@@ -68,66 +92,70 @@ cmp_dependent_priority(gconstpointer a, gconstpointer b)
* clones (probably unnecessary, but avoids having to update regression
* tests)
*/
- if (rsc_constraint1->dependent->variant == pe_clone) {
- if (pcmk_is_set(rsc_constraint1->dependent->flags, pe_rsc_promotable)
- && !pcmk_is_set(rsc_constraint2->dependent->flags, pe_rsc_promotable)) {
+ if (rsc1->variant == pcmk_rsc_variant_clone) {
+ if (pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
+ && !pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return -1;
- } else if (!pcmk_is_set(rsc_constraint1->dependent->flags, pe_rsc_promotable)
- && pcmk_is_set(rsc_constraint2->dependent->flags, pe_rsc_promotable)) {
+ }
+ if (!pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
+ && pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return 1;
}
}
- return strcmp(rsc_constraint1->dependent->id,
- rsc_constraint2->dependent->id);
+ return strcmp(rsc1->id, rsc2->id);
}
+/*!
+ * \internal
+ * \brief Compare two colocations according to priority based on dependents
+ *
+ * Compare two colocations according to the order in which they should be
+ * considered, based on their dependent resources -- preferring (in order):
+ * * Colocation that is not \c NULL
+ * * Colocation whose resource has higher priority
+ * * Colocation whose resource is of a higher-level variant
+ * (bundle > clone > group > primitive)
+ * * Colocation whose resource is promotable, if both are clones
+ * * Colocation whose resource has lower ID in lexicographic order
+ *
+ * \param[in] a First colocation to compare
+ * \param[in] b Second colocation to compare
+ *
+ * \return A negative number if \p a should be considered first,
+ * a positive number if \p b should be considered first,
+ * or 0 if order doesn't matter
+ */
static gint
-cmp_primary_priority(gconstpointer a, gconstpointer b)
+cmp_dependent_priority(gconstpointer a, gconstpointer b)
{
- const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a;
- const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b;
-
- if (a == NULL) {
- return 1;
- }
- if (b == NULL) {
- return -1;
- }
-
- CRM_ASSERT(rsc_constraint1->dependent != NULL);
- CRM_ASSERT(rsc_constraint1->primary != NULL);
-
- if (rsc_constraint1->primary->priority > rsc_constraint2->primary->priority) {
- return -1;
- }
-
- if (rsc_constraint1->primary->priority < rsc_constraint2->primary->priority) {
- return 1;
- }
-
- /* Process clones before primitives and groups */
- if (rsc_constraint1->primary->variant > rsc_constraint2->primary->variant) {
- return -1;
- } else if (rsc_constraint1->primary->variant < rsc_constraint2->primary->variant) {
- return 1;
- }
-
- /* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable
- * clones (probably unnecessary, but avoids having to update regression
- * tests)
- */
- if (rsc_constraint1->primary->variant == pe_clone) {
- if (pcmk_is_set(rsc_constraint1->primary->flags, pe_rsc_promotable)
- && !pcmk_is_set(rsc_constraint2->primary->flags, pe_rsc_promotable)) {
- return -1;
- } else if (!pcmk_is_set(rsc_constraint1->primary->flags, pe_rsc_promotable)
- && pcmk_is_set(rsc_constraint2->primary->flags, pe_rsc_promotable)) {
- return 1;
- }
- }
+ return cmp_colocation_priority(a, b, true);
+}
- return strcmp(rsc_constraint1->primary->id, rsc_constraint2->primary->id);
+/*!
+ * \internal
+ * \brief Compare two colocations according to priority based on primaries
+ *
+ * Compare two colocations according to the order in which they should be
+ * considered, based on their primary resources -- preferring (in order):
+ * * Colocation that is not \c NULL
+ * * Colocation whose primary has higher priority
+ * * Colocation whose primary is of a higher-level variant
+ * (bundle > clone > group > primitive)
+ * * Colocation whose primary is promotable, if both are clones
+ * * Colocation whose primary has lower ID in lexicographic order
+ *
+ * \param[in] a First colocation to compare
+ * \param[in] b Second colocation to compare
+ *
+ * \return A negative number if \p a should be considered first,
+ * a positive number if \p b should be considered first,
+ * or 0 if order doesn't matter
+ */
+static gint
+cmp_primary_priority(gconstpointer a, gconstpointer b)
+{
+ return cmp_colocation_priority(a, b, false);
}
/*!
@@ -136,21 +164,23 @@ cmp_primary_priority(gconstpointer a, gconstpointer b)
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The list will be sorted using cmp_primary_priority().
*/
void
-pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation)
+pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc)
{
- CRM_ASSERT((list != NULL) && (colocation != NULL));
-
- crm_trace("Adding colocation %s (%s with %s%s%s @%d) "
- "to 'this with' list",
- colocation->id, colocation->dependent->id,
- colocation->primary->id,
- (colocation->node_attribute == NULL)? "" : " using ",
- pcmk__s(colocation->node_attribute, ""),
- colocation->score);
+ CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
+
+ pe_rsc_trace(rsc,
+ "Adding colocation %s (%s with %s using %s @%s) to "
+ "'this with' list for %s",
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id, colocation->node_attribute,
+ pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_primary_priority);
}
@@ -161,23 +191,30 @@ pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation)
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The lists must be pre-sorted by cmp_primary_priority().
*/
void
-pcmk__add_this_with_list(GList **list, GList *addition)
+pcmk__add_this_with_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc)
{
- CRM_CHECK((list != NULL), return);
-
- if (*list == NULL) { // Trivial case for efficiency
- crm_trace("Copying %u 'this with' colocations to new list",
- g_list_length(addition));
- *list = g_list_copy(addition);
- } else {
- while (addition != NULL) {
- pcmk__add_this_with(list, addition->data);
- addition = addition->next;
+ CRM_ASSERT((list != NULL) && (rsc != NULL));
+
+ pcmk__if_tracing(
+ {}, // Always add each colocation individually if tracing
+ {
+ if (*list == NULL) {
+ // Trivial case for efficiency if not tracing
+ *list = g_list_copy(addition);
+ return;
+ }
}
+ );
+
+ for (const GList *iter = addition; iter != NULL; iter = iter->next) {
+ pcmk__add_this_with(list, addition->data, rsc);
}
}
@@ -187,21 +224,23 @@ pcmk__add_this_with_list(GList **list, GList *addition)
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The list will be sorted using cmp_dependent_priority().
*/
void
-pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation)
+pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc)
{
- CRM_ASSERT((list != NULL) && (colocation != NULL));
-
- crm_trace("Adding colocation %s (%s with %s%s%s @%d) "
- "to 'with this' list",
- colocation->id, colocation->dependent->id,
- colocation->primary->id,
- (colocation->node_attribute == NULL)? "" : " using ",
- pcmk__s(colocation->node_attribute, ""),
- colocation->score);
+ CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
+
+ pe_rsc_trace(rsc,
+ "Adding colocation %s (%s with %s using %s @%s) to "
+ "'with this' list for %s",
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id, colocation->node_attribute,
+ pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_dependent_priority);
}
@@ -212,23 +251,30 @@ pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation)
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The lists must be pre-sorted by cmp_dependent_priority().
*/
void
-pcmk__add_with_this_list(GList **list, GList *addition)
+pcmk__add_with_this_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc)
{
- CRM_CHECK((list != NULL), return);
-
- if (*list == NULL) { // Trivial case for efficiency
- crm_trace("Copying %u 'with this' colocations to new list",
- g_list_length(addition));
- *list = g_list_copy(addition);
- } else {
- while (addition != NULL) {
- pcmk__add_with_this(list, addition->data);
- addition = addition->next;
+ CRM_ASSERT((list != NULL) && (rsc != NULL));
+
+ pcmk__if_tracing(
+ {}, // Always add each colocation individually if tracing
+ {
+ if (*list == NULL) {
+ // Trivial case for efficiency if not tracing
+ *list = g_list_copy(addition);
+ return;
+ }
}
+ );
+
+ for (const GList *iter = addition; iter != NULL; iter = iter->next) {
+ pcmk__add_with_this(list, addition->data, rsc);
}
}
@@ -242,33 +288,33 @@ pcmk__add_with_this_list(GList **list, GList *addition)
* \param[in] then_role Anti-colocation role of \p then_rsc
*/
static void
-anti_colocation_order(pe_resource_t *first_rsc, int first_role,
- pe_resource_t *then_rsc, int then_role)
+anti_colocation_order(pcmk_resource_t *first_rsc, int first_role,
+ pcmk_resource_t *then_rsc, int then_role)
{
const char *first_tasks[] = { NULL, NULL };
const char *then_tasks[] = { NULL, NULL };
/* Actions to make first_rsc lose first_role */
- if (first_role == RSC_ROLE_PROMOTED) {
- first_tasks[0] = CRMD_ACTION_DEMOTE;
+ if (first_role == pcmk_role_promoted) {
+ first_tasks[0] = PCMK_ACTION_DEMOTE;
} else {
- first_tasks[0] = CRMD_ACTION_STOP;
+ first_tasks[0] = PCMK_ACTION_STOP;
- if (first_role == RSC_ROLE_UNPROMOTED) {
- first_tasks[1] = CRMD_ACTION_PROMOTE;
+ if (first_role == pcmk_role_unpromoted) {
+ first_tasks[1] = PCMK_ACTION_PROMOTE;
}
}
/* Actions to make then_rsc gain then_role */
- if (then_role == RSC_ROLE_PROMOTED) {
- then_tasks[0] = CRMD_ACTION_PROMOTE;
+ if (then_role == pcmk_role_promoted) {
+ then_tasks[0] = PCMK_ACTION_PROMOTE;
} else {
- then_tasks[0] = CRMD_ACTION_START;
+ then_tasks[0] = PCMK_ACTION_START;
- if (then_role == RSC_ROLE_UNPROMOTED) {
- then_tasks[1] = CRMD_ACTION_DEMOTE;
+ if (then_role == pcmk_role_unpromoted) {
+ then_tasks[1] = PCMK_ACTION_DEMOTE;
}
}
@@ -280,14 +326,14 @@ anti_colocation_order(pe_resource_t *first_rsc, int first_role,
pcmk__order_resource_actions(first_rsc, first_tasks[first_lpc],
then_rsc, then_tasks[then_lpc],
- pe_order_anti_colocation);
+ pcmk__ar_if_required_on_same_node);
}
}
}
/*!
* \internal
- * \brief Add a new colocation constraint to a cluster working set
+ * \brief Add a new colocation constraint to scheduler data
*
* \param[in] id XML ID for this constraint
* \param[in] node_attr Colocate by this attribute (NULL for #uname)
@@ -296,40 +342,42 @@ anti_colocation_order(pe_resource_t *first_rsc, int first_role,
* \param[in,out] primary Resource to colocate \p dependent with
* \param[in] dependent_role Current role of \p dependent
* \param[in] primary_role Current role of \p primary
- * \param[in] influence Whether colocation constraint has influence
- * \param[in,out] data_set Cluster working set to add constraint to
+ * \param[in] flags Group of enum pcmk__coloc_flags
*/
void
pcmk__new_colocation(const char *id, const char *node_attr, int score,
- pe_resource_t *dependent, pe_resource_t *primary,
+ pcmk_resource_t *dependent, pcmk_resource_t *primary,
const char *dependent_role, const char *primary_role,
- bool influence, pe_working_set_t *data_set)
+ uint32_t flags)
{
pcmk__colocation_t *new_con = NULL;
- if (score == 0) {
- crm_trace("Ignoring colocation '%s' because score is 0", id);
- return;
- }
+ CRM_CHECK(id != NULL, return);
+
if ((dependent == NULL) || (primary == NULL)) {
pcmk__config_err("Ignoring colocation '%s' because resource "
"does not exist", id);
return;
}
- new_con = calloc(1, sizeof(pcmk__colocation_t));
- if (new_con == NULL) {
+ if (score == 0) {
+ pe_rsc_trace(dependent,
+ "Ignoring colocation '%s' (%s with %s) because score is 0",
+ id, dependent->id, primary->id);
return;
}
- if (pcmk__str_eq(dependent_role, RSC_ROLE_STARTED_S,
+ new_con = calloc(1, sizeof(pcmk__colocation_t));
+ CRM_ASSERT(new_con != NULL);
+
+ if (pcmk__str_eq(dependent_role, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
- dependent_role = RSC_ROLE_UNKNOWN_S;
+ dependent_role = PCMK__ROLE_UNKNOWN;
}
- if (pcmk__str_eq(primary_role, RSC_ROLE_STARTED_S,
+ if (pcmk__str_eq(primary_role, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
- primary_role = RSC_ROLE_UNKNOWN_S;
+ primary_role = PCMK__ROLE_UNKNOWN;
}
new_con->id = id;
@@ -338,21 +386,14 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score,
new_con->score = score;
new_con->dependent_role = text2role(dependent_role);
new_con->primary_role = text2role(primary_role);
- new_con->node_attribute = node_attr;
- new_con->influence = influence;
-
- if (node_attr == NULL) {
- node_attr = CRM_ATTR_UNAME;
- }
-
- pe_rsc_trace(dependent, "%s ==> %s (%s %d)",
- dependent->id, primary->id, node_attr, score);
+ new_con->node_attribute = pcmk__s(node_attr, CRM_ATTR_UNAME);
+ new_con->flags = flags;
- pcmk__add_this_with(&(dependent->rsc_cons), new_con);
- pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con);
+ pcmk__add_this_with(&(dependent->rsc_cons), new_con, dependent);
+ pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con, primary);
- data_set->colocation_constraints = g_list_append(data_set->colocation_constraints,
- new_con);
+ dependent->cluster->colocation_constraints = g_list_prepend(
+ dependent->cluster->colocation_constraints, new_con);
if (score <= -INFINITY) {
anti_colocation_order(dependent, new_con->dependent_role, primary,
@@ -370,11 +411,12 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score,
* \param[in] rsc Resource involved in constraint (for default)
* \param[in] influence_s String value of influence option
*
- * \return true if string evaluates true, false if string evaluates false,
- * or value of resource's critical option if string is NULL or invalid
+ * \return pcmk__coloc_influence if string evaluates true, or string is NULL or
+ * invalid and resource's critical option evaluates true, otherwise
+ * pcmk__coloc_none
*/
-static bool
-unpack_influence(const char *coloc_id, const pe_resource_t *rsc,
+static uint32_t
+unpack_influence(const char *coloc_id, const pcmk_resource_t *rsc,
const char *influence_s)
{
if (influence_s != NULL) {
@@ -385,25 +427,29 @@ unpack_influence(const char *coloc_id, const pe_resource_t *rsc,
XML_COLOC_ATTR_INFLUENCE " (using default)",
coloc_id);
} else {
- return (influence_i != 0);
+ return (influence_i == 0)? pcmk__coloc_none : pcmk__coloc_influence;
}
}
- return pcmk_is_set(rsc->flags, pe_rsc_critical);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_critical)) {
+ return pcmk__coloc_influence;
+ }
+ return pcmk__coloc_none;
}
static void
unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
- const char *influence_s, pe_working_set_t *data_set)
+ const char *influence_s, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
- pe_resource_t *with = NULL;
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *other = NULL;
+ pcmk_resource_t *resource = NULL;
const char *set_id = ID(set);
const char *role = crm_element_value(set, "role");
- const char *ordering = crm_element_value(set, "ordering");
+ bool with_previous = false;
int local_score = score;
bool sequential = false;
-
+ uint32_t flags = pcmk__coloc_none;
+ const char *xml_rsc_id = NULL;
const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE);
if (score_s) {
@@ -415,46 +461,53 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
return;
}
- if (ordering == NULL) {
- ordering = "group";
+ /* @COMPAT The deprecated "ordering" attribute specifies whether resources
+ * in a positive-score set are colocated with the previous or next resource.
+ */
+ if (pcmk__str_eq(crm_element_value(set, "ordering"), "group",
+ pcmk__str_null_matches|pcmk__str_casei)) {
+ with_previous = true;
+ } else {
+ pe_warn_once(pcmk__wo_set_ordering,
+ "Support for 'ordering' other than 'group' in "
+ XML_CONS_TAG_RSC_SET " (such as %s) is deprecated and "
+ "will be removed in a future release", set_id);
}
- if (pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok && !sequential) {
+ if ((pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok)
+ && !sequential) {
return;
+ }
- } else if ((local_score > 0)
- && pcmk__str_eq(ordering, "group", pcmk__str_casei)) {
+ if (local_score > 0) {
for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
- if (with != NULL) {
- pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id);
- pcmk__new_colocation(set_id, NULL, local_score, resource,
- with, role, role,
- unpack_influence(coloc_id, resource,
- influence_s), data_set);
+ xml_rsc_id = ID(xml_rsc);
+ resource = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (resource == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring %s and later resources in set %s: "
+ "No such resource", xml_rsc_id, set_id);
+ return;
}
- with = resource;
- }
-
- } else if (local_score > 0) {
- pe_resource_t *last = NULL;
-
- for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
- xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
-
- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
- if (last != NULL) {
- pe_rsc_trace(resource, "Colocating %s with %s",
- last->id, resource->id);
- pcmk__new_colocation(set_id, NULL, local_score, last,
- resource, role, role,
- unpack_influence(coloc_id, last,
- influence_s), data_set);
+ if (other != NULL) {
+ flags = pcmk__coloc_explicit
+ | unpack_influence(coloc_id, resource, influence_s);
+ if (with_previous) {
+ pe_rsc_trace(resource, "Colocating %s with %s in set %s",
+ resource->id, other->id, set_id);
+ pcmk__new_colocation(set_id, NULL, local_score, resource,
+ other, role, role, flags);
+ } else {
+ pe_rsc_trace(resource, "Colocating %s with %s in set %s",
+ other->id, resource->id, set_id);
+ pcmk__new_colocation(set_id, NULL, local_score, other,
+ resource, role, role, flags);
+ }
}
-
- last = resource;
+ other = resource;
}
} else {
@@ -467,117 +520,187 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xmlNode *xml_rsc_with = NULL;
- bool influence = true;
-
- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
- influence = unpack_influence(coloc_id, resource, influence_s);
+ xml_rsc_id = ID(xml_rsc);
+ resource = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (resource == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring %s and later resources in set %s: "
+ "No such resource", xml_rsc_id, set_id);
+ return;
+ }
+ flags = pcmk__coloc_explicit
+ | unpack_influence(coloc_id, resource, influence_s);
for (xml_rsc_with = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc_with != NULL;
xml_rsc_with = crm_next_same_xml(xml_rsc_with)) {
- if (pcmk__str_eq(resource->id, ID(xml_rsc_with),
- pcmk__str_casei)) {
+ xml_rsc_id = ID(xml_rsc_with);
+ if (pcmk__str_eq(resource->id, xml_rsc_id, pcmk__str_none)) {
break;
}
- EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with));
- pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id,
- with->id);
+ other = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ CRM_ASSERT(other != NULL); // We already processed it
pcmk__new_colocation(set_id, NULL, local_score,
- resource, with, role, role,
- influence, data_set);
+ resource, other, role, role, flags);
}
}
}
}
+/*!
+ * \internal
+ * \brief Colocate two resource sets relative to each other
+ *
+ * \param[in] id Colocation XML ID
+ * \param[in] set1 Dependent set
+ * \param[in] set2 Primary set
+ * \param[in] score Colocation score
+ * \param[in] influence_s Value of colocation's "influence" attribute
+ * \param[in,out] scheduler Scheduler data
+ */
static void
-colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score,
- const char *influence_s, pe_working_set_t *data_set)
+colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
+ int score, const char *influence_s,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
- pe_resource_t *rsc_1 = NULL;
- pe_resource_t *rsc_2 = NULL;
+ pcmk_resource_t *rsc_1 = NULL;
+ pcmk_resource_t *rsc_2 = NULL;
+ const char *xml_rsc_id = NULL;
const char *role_1 = crm_element_value(set1, "role");
const char *role_2 = crm_element_value(set2, "role");
int rc = pcmk_rc_ok;
bool sequential = false;
+ uint32_t flags = pcmk__coloc_none;
if (score == 0) {
- crm_trace("Ignoring colocation '%s' between sets because score is 0",
- id);
+ crm_trace("Ignoring colocation '%s' between sets %s and %s "
+ "because score is 0", id, ID(set1), ID(set2));
return;
}
rc = pcmk__xe_get_bool_attr(set1, "sequential", &sequential);
- if (rc != pcmk_rc_ok || sequential) {
+ if ((rc != pcmk_rc_ok) || sequential) {
// Get the first one
xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
if (xml_rsc != NULL) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+ xml_rsc_id = ID(xml_rsc);
+ rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_1 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s with set %s "
+ "because first resource %s not found",
+ ID(set1), ID(set2), xml_rsc_id);
+ return;
+ }
}
}
rc = pcmk__xe_get_bool_attr(set2, "sequential", &sequential);
- if (rc != pcmk_rc_ok || sequential) {
+ if ((rc != pcmk_rc_ok) || sequential) {
// Get the last one
- const char *rid = NULL;
-
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- rid = ID(xml_rsc);
+ xml_rsc_id = ID(xml_rsc);
+ }
+ rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_2 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s with set %s "
+ "because last resource %s not found",
+ ID(set1), ID(set2), xml_rsc_id);
+ return;
}
- EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid);
}
- if ((rsc_1 != NULL) && (rsc_2 != NULL)) {
+ if ((rsc_1 != NULL) && (rsc_2 != NULL)) { // Both sets are sequential
+ flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2,
- unpack_influence(id, rsc_1, influence_s),
- data_set);
-
- } else if (rsc_1 != NULL) {
- bool influence = unpack_influence(id, rsc_1, influence_s);
+ flags);
+ } else if (rsc_1 != NULL) { // Only set1 is sequential
+ flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
+ xml_rsc_id = ID(xml_rsc);
+ rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_2 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring set %s colocation with resource %s "
+ "in set %s: No such resource",
+ ID(set1), xml_rsc_id, ID(set2));
+ continue;
+ }
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
- role_2, influence, data_set);
+ role_2, flags);
}
- } else if (rsc_2 != NULL) {
+ } else if (rsc_2 != NULL) { // Only set2 is sequential
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+ xml_rsc_id = ID(xml_rsc);
+ rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_1 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s resource %s "
+ "with set %s: No such resource",
+ ID(set1), xml_rsc_id, ID(set2));
+ continue;
+ }
+ flags = pcmk__coloc_explicit
+ | unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
- role_2,
- unpack_influence(id, rsc_1, influence_s),
- data_set);
+ role_2, flags);
}
- } else {
+ } else { // Neither set is sequential
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xmlNode *xml_rsc_2 = NULL;
- bool influence = true;
- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
- influence = unpack_influence(id, rsc_1, influence_s);
+ xml_rsc_id = ID(xml_rsc);
+ rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_1 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s resource %s "
+ "with set %s: No such resource",
+ ID(set1), xml_rsc_id, ID(set2));
+ continue;
+ }
+ flags = pcmk__coloc_explicit
+ | unpack_influence(id, rsc_1, influence_s);
for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL;
xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
+ xml_rsc_id = ID(xml_rsc_2);
+ rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_2 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s resource "
+ "%s with set %s resource %s: No such "
+ "resource", ID(set1), ID(xml_rsc),
+ ID(set2), xml_rsc_id);
+ continue;
+ }
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2,
- role_1, role_2, influence,
- data_set);
+ role_1, role_2, flags);
}
}
}
@@ -585,9 +708,10 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score,
static void
unpack_simple_colocation(xmlNode *xml_obj, const char *id,
- const char *influence_s, pe_working_set_t *data_set)
+ const char *influence_s, pcmk_scheduler_t *scheduler)
{
int score_i = 0;
+ uint32_t flags = pcmk__coloc_none;
const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
const char *dependent_id = crm_element_value(xml_obj,
@@ -599,26 +723,27 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id,
XML_COLOC_ATTR_TARGET_ROLE);
const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR);
- // @COMPAT: Deprecated since 2.1.5
- const char *dependent_instance = crm_element_value(xml_obj,
- XML_COLOC_ATTR_SOURCE_INSTANCE);
- // @COMPAT: Deprecated since 2.1.5
- const char *primary_instance = crm_element_value(xml_obj,
- XML_COLOC_ATTR_TARGET_INSTANCE);
+ const char *primary_instance = NULL;
+ const char *dependent_instance = NULL;
+ pcmk_resource_t *primary = NULL;
+ pcmk_resource_t *dependent = NULL;
- pe_resource_t *dependent = pcmk__find_constraint_resource(data_set->resources,
- dependent_id);
- pe_resource_t *primary = pcmk__find_constraint_resource(data_set->resources,
- primary_id);
+ primary = pcmk__find_constraint_resource(scheduler->resources, primary_id);
+ dependent = pcmk__find_constraint_resource(scheduler->resources,
+ dependent_id);
+ // @COMPAT: Deprecated since 2.1.5
+ primary_instance = crm_element_value(xml_obj,
+ XML_COLOC_ATTR_TARGET_INSTANCE);
+ dependent_instance = crm_element_value(xml_obj,
+ XML_COLOC_ATTR_SOURCE_INSTANCE);
if (dependent_instance != NULL) {
- pe_warn_once(pe_wo_coloc_inst,
+ pe_warn_once(pcmk__wo_coloc_inst,
"Support for " XML_COLOC_ATTR_SOURCE_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
-
if (primary_instance != NULL) {
- pe_warn_once(pe_wo_coloc_inst,
+ pe_warn_once(pcmk__wo_coloc_inst,
"Support for " XML_COLOC_ATTR_TARGET_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
@@ -676,15 +801,15 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id,
score_i = char2score(score);
}
+ flags = pcmk__coloc_explicit | unpack_influence(id, dependent, influence_s);
pcmk__new_colocation(id, attr, score_i, dependent, primary,
- dependent_role, primary_role,
- unpack_influence(id, dependent, influence_s), data_set);
+ dependent_role, primary_role, flags);
}
// \return Standard Pacemaker return code
static int
unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *dependent_id = NULL;
@@ -692,11 +817,11 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
const char *dependent_role = NULL;
const char *primary_role = NULL;
- pe_resource_t *dependent = NULL;
- pe_resource_t *primary = NULL;
+ pcmk_resource_t *dependent = NULL;
+ pcmk_resource_t *primary = NULL;
- pe_tag_t *dependent_tag = NULL;
- pe_tag_t *primary_tag = NULL;
+ pcmk_tag_t *dependent_tag = NULL;
+ pcmk_tag_t *primary_tag = NULL;
xmlNode *dependent_set = NULL;
xmlNode *primary_set = NULL;
@@ -709,12 +834,12 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_colocation");
return pcmk_rc_ok;
@@ -726,14 +851,14 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, dependent_id, &dependent,
+ if (!pcmk__valid_resource_or_tag(scheduler, dependent_id, &dependent,
&dependent_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, dependent_id);
return pcmk_rc_unpack_error;
}
- if (!pcmk__valid_resource_or_tag(data_set, primary_id, &primary,
+ if (!pcmk__valid_resource_or_tag(scheduler, primary_id, &primary,
&primary_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, primary_id);
@@ -757,9 +882,9 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "rsc" into resource_set under constraint
+ // Convert dependent's template/tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &dependent_set, XML_COLOC_ATTR_SOURCE,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -774,9 +899,9 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
any_sets = true;
}
- // Convert template/tag reference in "with-rsc" into resource_set under constraint
+ // Convert primary's template/tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &primary_set, XML_COLOC_ATTR_TARGET,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -803,13 +928,13 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
/*!
* \internal
- * \brief Parse a colocation constraint from XML into a cluster working set
+ * \brief Parse a colocation constraint from XML into scheduler data
*
- * \param[in,out] xml_obj Colocation constraint XML to unpack
- * \param[in,out] data_set Cluster working set to add constraint to
+ * \param[in,out] xml_obj Colocation constraint XML to unpack
+ * \param[in,out] scheduler Scheduler data to add constraint to
*/
void
-pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_colocation(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
int score_i = 0;
xmlNode *set = NULL;
@@ -819,27 +944,34 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
xmlNode *expanded_xml = NULL;
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
- const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
- const char *influence_s = crm_element_value(xml_obj,
- XML_COLOC_ATTR_INFLUENCE);
+ const char *score = NULL;
+ const char *influence_s = NULL;
- if (score) {
- score_i = char2score(score);
+ if (pcmk__str_empty(id)) {
+ pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_DEPEND
+ " without " CRM_ATTR_ID);
+ return;
}
if (unpack_colocation_tags(xml_obj, &expanded_xml,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
return;
}
- if (expanded_xml) {
+ if (expanded_xml != NULL) {
orig_xml = xml_obj;
xml_obj = expanded_xml;
}
+ score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
+ if (score != NULL) {
+ score_i = char2score(score);
+ }
+ influence_s = crm_element_value(xml_obj, XML_COLOC_ATTR_INFLUENCE);
+
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
set = crm_next_same_xml(set)) {
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
if (set == NULL) { // Configuration error, message already logged
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -847,10 +979,15 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
return;
}
- unpack_colocation_set(set, score_i, id, influence_s, data_set);
+ if (pcmk__str_empty(ID(set))) {
+ pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_SET
+ " without " CRM_ATTR_ID);
+ continue;
+ }
+ unpack_colocation_set(set, score_i, id, influence_s, scheduler);
if (last != NULL) {
- colocate_rsc_sets(id, last, set, score_i, influence_s, data_set);
+ colocate_rsc_sets(id, last, set, score_i, influence_s, scheduler);
}
last = set;
}
@@ -861,7 +998,7 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if (last == NULL) {
- unpack_simple_colocation(xml_obj, id, influence_s, data_set);
+ unpack_simple_colocation(xml_obj, id, influence_s, scheduler);
}
}
@@ -874,27 +1011,28 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
* \param[in] reason Unrunnable start action causing the block
*/
static void
-mark_action_blocked(pe_resource_t *rsc, const char *task,
- const pe_resource_t *reason)
+mark_action_blocked(pcmk_resource_t *rsc, const char *task,
+ const pcmk_resource_t *reason)
{
+ GList *iter = NULL;
char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
- for (GList *gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = rsc->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
- if (pcmk_is_set(action->flags, pe_action_runnable)
- && pcmk__str_eq(action->task, task, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)
+ && pcmk__str_eq(action->task, task, pcmk__str_none)) {
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, reason_text, false);
- pcmk__block_colocation_dependents(action, rsc->cluster);
+ pcmk__block_colocation_dependents(action);
pcmk__update_action_for_orderings(action, rsc->cluster);
}
}
// If parent resource can't perform an action, neither can any children
- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- mark_action_blocked((pe_resource_t *) (iter->data), task, reason);
+ for (iter = rsc->children; iter != NULL; iter = iter->next) {
+ mark_action_blocked((pcmk_resource_t *) (iter->data), task, reason);
}
free(reason_text);
}
@@ -907,24 +1045,23 @@ mark_action_blocked(pe_resource_t *rsc, const char *task,
* promote actions of resources colocated with it, as appropriate to the
* colocations' configured roles.
*
- * \param[in,out] action Action to check
- * \param[in] data_set Cluster working set (ignored)
+ * \param[in,out] action Action to check
*/
void
-pcmk__block_colocation_dependents(pe_action_t *action,
- pe_working_set_t *data_set)
+pcmk__block_colocation_dependents(pcmk_action_t *action)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
GList *colocations = NULL;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
bool is_start = false;
- if (pcmk_is_set(action->flags, pe_action_runnable)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
return; // Only unrunnable actions block dependents
}
- is_start = pcmk__str_eq(action->task, RSC_START, pcmk__str_none);
- if (!is_start && !pcmk__str_eq(action->task, RSC_PROMOTE, pcmk__str_none)) {
+ is_start = pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none);
+ if (!is_start
+ && !pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
return; // Only unrunnable starts and promotes block dependents
}
@@ -940,13 +1077,13 @@ pcmk__block_colocation_dependents(pe_action_t *action,
}
// Colocation fails only if entire primary can't reach desired role
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
- pe_action_t *child_action = find_first_action(child->actions, NULL,
- action->task, NULL);
+ for (iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child = iter->data;
+ pcmk_action_t *child_action = find_first_action(child->actions, NULL,
+ action->task, NULL);
if ((child_action == NULL)
- || pcmk_is_set(child_action->flags, pe_action_runnable)) {
+ || pcmk_is_set(child_action->flags, pcmk_action_runnable)) {
crm_trace("Not blocking %s colocation dependents because "
"at least %s has runnable %s",
rsc->id, child->id, action->task);
@@ -959,8 +1096,8 @@ pcmk__block_colocation_dependents(pe_action_t *action,
// Check each colocation where this resource is primary
colocations = pcmk__with_this_colocations(rsc);
- for (gIter = colocations; gIter != NULL; gIter = gIter->next) {
- pcmk__colocation_t *colocation = (pcmk__colocation_t *) gIter->data;
+ for (iter = colocations; iter != NULL; iter = iter->next) {
+ pcmk__colocation_t *colocation = iter->data;
if (colocation->score < INFINITY) {
continue; // Only mandatory colocations block dependent
@@ -972,16 +1109,17 @@ pcmk__block_colocation_dependents(pe_action_t *action,
* If the primary can't be promoted, the dependent can't reach its
* colocated role if the primary's colocation role is promoted.
*/
- if (!is_start && (colocation->primary_role != RSC_ROLE_PROMOTED)) {
+ if (!is_start && (colocation->primary_role != pcmk_role_promoted)) {
continue;
}
// Block the dependent from reaching its colocated role
- if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
- mark_action_blocked(colocation->dependent, RSC_PROMOTE,
+ if (colocation->dependent_role == pcmk_role_promoted) {
+ mark_action_blocked(colocation->dependent, PCMK_ACTION_PROMOTE,
action->rsc);
} else {
- mark_action_blocked(colocation->dependent, RSC_START, action->rsc);
+ mark_action_blocked(colocation->dependent, PCMK_ACTION_START,
+ action->rsc);
}
}
g_list_free(colocations);
@@ -989,6 +1127,37 @@ pcmk__block_colocation_dependents(pe_action_t *action,
/*!
* \internal
+ * \brief Get the resource to use for role comparisons
+ *
+ * A bundle replica includes a container and possibly an instance of the bundled
+ * resource. The dependent in a "with bundle" colocation is colocated with a
+ * particular bundle container. However, if the colocation includes a role, then
+ * the role must be checked on the bundled resource instance inside the
+ * container. The container itself will never be promoted; the bundled resource
+ * may be.
+ *
+ * If the given resource is a bundle replica container, return the resource
+ * inside it, if any. Otherwise, return the resource itself.
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Resource to use for role comparisons
+ */
+static const pcmk_resource_t *
+get_resource_for_role(const pcmk_resource_t *rsc)
+{
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_replica_container)) {
+ const pcmk_resource_t *child = pe__get_rsc_in_container(rsc);
+
+ if (child != NULL) {
+ return child;
+ }
+ }
+ return rsc;
+}
+
+/*!
+ * \internal
* \brief Determine how a colocation constraint should affect a resource
*
* Colocation constraints have different effects at different points in the
@@ -1001,39 +1170,48 @@ pcmk__block_colocation_dependents(pe_action_t *action,
* \param[in] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
- * \param[in] preview If true, pretend resources have already been allocated
+ * \param[in] preview If true, pretend resources have already been assigned
*
* \return How colocation constraint should be applied at this point
*/
enum pcmk__coloc_affects
-pcmk__colocation_affects(const pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__colocation_affects(const pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation, bool preview)
{
- if (!preview && pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- // Primary resource has not been allocated yet, so we can't do anything
+ const pcmk_resource_t *dependent_role_rsc = NULL;
+ const pcmk_resource_t *primary_role_rsc = NULL;
+
+ CRM_ASSERT((dependent != NULL) && (primary != NULL)
+ && (colocation != NULL));
+
+ if (!preview && pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
+ // Primary resource has not been assigned yet, so we can't do anything
return pcmk__coloc_affects_nothing;
}
- if ((colocation->dependent_role >= RSC_ROLE_UNPROMOTED)
- && (dependent->parent != NULL)
- && pcmk_is_set(dependent->parent->flags, pe_rsc_promotable)
- && !pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
+ dependent_role_rsc = get_resource_for_role(dependent);
+ primary_role_rsc = get_resource_for_role(primary);
+
+ if ((colocation->dependent_role >= pcmk_role_unpromoted)
+ && (dependent_role_rsc->parent != NULL)
+ && pcmk_is_set(dependent_role_rsc->parent->flags, pcmk_rsc_promotable)
+ && !pcmk_is_set(dependent_role_rsc->flags, pcmk_rsc_unassigned)) {
/* This is a colocation by role, and the dependent is a promotable clone
- * that has already been allocated, so the colocation should now affect
+ * that has already been assigned, so the colocation should now affect
* the role.
*/
return pcmk__coloc_affects_role;
}
- if (!preview && !pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
- /* The dependent resource has already been through allocation, so the
+ if (!preview && !pcmk_is_set(dependent->flags, pcmk_rsc_unassigned)) {
+ /* The dependent resource has already been through assignment, so the
* constraint no longer has any effect. Log an error if a mandatory
* colocation constraint has been violated.
*/
- const pe_node_t *primary_node = primary->allocated_to;
+ const pcmk_node_t *primary_node = primary->allocated_to;
if (dependent->allocated_to == NULL) {
crm_trace("Skipping colocation '%s': %s will not run anywhere",
@@ -1042,8 +1220,7 @@ pcmk__colocation_affects(const pe_resource_t *dependent,
} else if (colocation->score >= INFINITY) {
// Dependent resource must colocate with primary resource
- if ((primary_node == NULL) ||
- (primary_node->details != dependent->allocated_to->details)) {
+ if (!pe__same_node(primary_node, dependent->allocated_to)) {
crm_err("%s must be colocated with %s but is not (%s vs. %s)",
dependent->id, primary->id,
pe__node_name(dependent->allocated_to),
@@ -1053,51 +1230,35 @@ pcmk__colocation_affects(const pe_resource_t *dependent,
} else if (colocation->score <= -CRM_SCORE_INFINITY) {
// Dependent resource must anti-colocate with primary resource
- if ((primary_node != NULL) &&
- (dependent->allocated_to->details == primary_node->details)) {
- crm_err("%s and %s must be anti-colocated but are allocated "
+ if (pe__same_node(dependent->allocated_to, primary_node)) {
+ crm_err("%s and %s must be anti-colocated but are assigned "
"to the same node (%s)",
- dependent->id, primary->id, pe__node_name(primary_node));
+ dependent->id, primary->id,
+ pe__node_name(primary_node));
}
}
return pcmk__coloc_affects_nothing;
}
- if ((colocation->score > 0)
- && (colocation->dependent_role != RSC_ROLE_UNKNOWN)
- && (colocation->dependent_role != dependent->next_role)) {
+ if ((colocation->dependent_role != pcmk_role_unknown)
+ && (colocation->dependent_role != dependent_role_rsc->next_role)) {
+ crm_trace("Skipping %scolocation '%s': dependent limited to %s role "
- crm_trace("Skipping colocation '%s': dependent limited to %s role "
"but %s next role is %s",
+ ((colocation->score < 0)? "anti-" : ""),
colocation->id, role2text(colocation->dependent_role),
- dependent->id, role2text(dependent->next_role));
+ dependent_role_rsc->id,
+ role2text(dependent_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
- if ((colocation->score > 0)
- && (colocation->primary_role != RSC_ROLE_UNKNOWN)
- && (colocation->primary_role != primary->next_role)) {
-
- crm_trace("Skipping colocation '%s': primary limited to %s role "
+ if ((colocation->primary_role != pcmk_role_unknown)
+ && (colocation->primary_role != primary_role_rsc->next_role)) {
+ crm_trace("Skipping %scolocation '%s': primary limited to %s role "
"but %s next role is %s",
+ ((colocation->score < 0)? "anti-" : ""),
colocation->id, role2text(colocation->primary_role),
- primary->id, role2text(primary->next_role));
- return pcmk__coloc_affects_nothing;
- }
-
- if ((colocation->score < 0)
- && (colocation->dependent_role != RSC_ROLE_UNKNOWN)
- && (colocation->dependent_role == dependent->next_role)) {
- crm_trace("Skipping anti-colocation '%s': dependent role %s matches",
- colocation->id, role2text(colocation->dependent_role));
- return pcmk__coloc_affects_nothing;
- }
-
- if ((colocation->score < 0)
- && (colocation->primary_role != RSC_ROLE_UNKNOWN)
- && (colocation->primary_role == primary->next_role)) {
- crm_trace("Skipping anti-colocation '%s': primary role %s matches",
- colocation->id, role2text(colocation->primary_role));
+ primary_role_rsc->id, role2text(primary_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
@@ -1106,32 +1267,29 @@ pcmk__colocation_affects(const pe_resource_t *dependent,
/*!
* \internal
- * \brief Apply colocation to dependent for allocation purposes
+ * \brief Apply colocation to dependent for assignment purposes
*
- * Update the allowed node weights of the dependent resource in a colocation,
- * for the purposes of allocating it to a node
+ * Update the allowed node scores of the dependent resource in a colocation,
+ * for the purposes of assigning it to a node.
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
*/
void
-pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation)
+pcmk__apply_coloc_to_scores(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation)
{
- const char *attribute = CRM_ATTR_ID;
+ const char *attr = colocation->node_attribute;
const char *value = NULL;
GHashTable *work = NULL;
GHashTableIter iter;
- pe_node_t *node = NULL;
-
- if (colocation->node_attribute != NULL) {
- attribute = colocation->node_attribute;
- }
+ pcmk_node_t *node = NULL;
if (primary->allocated_to != NULL) {
- value = pe_node_attribute_raw(primary->allocated_to, attribute);
+ value = pcmk__colocation_node_attr(primary->allocated_to, attr,
+ primary);
} else if (colocation->score < 0) {
// Nothing to do (anti-colocation with something that is not running)
@@ -1150,9 +1308,12 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
colocation->id, dependent->id, pe__node_name(node),
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score), primary->id);
+ continue;
+ }
+
+ if (pcmk__str_eq(pcmk__colocation_node_attr(node, attr, dependent),
+ value, pcmk__str_casei)) {
- } else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value,
- pcmk__str_casei)) {
/* Add colocation score only if optional (or minus infinity). A
* mandatory colocation is a requirement rather than a preference,
* so we don't need to consider it for relative assignment purposes.
@@ -1169,8 +1330,10 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score));
}
+ continue;
+ }
- } else if (colocation->score >= CRM_SCORE_INFINITY) {
+ if (colocation->score >= CRM_SCORE_INFINITY) {
/* Only mandatory colocations are relevant when the colocation
* attribute doesn't match, because an attribute not matching is not
* a negative preference -- the colocation is simply relevant only
@@ -1181,7 +1344,7 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
"Banned %s from %s because colocation %s attribute %s "
"does not match",
dependent->id, pe__node_name(node), colocation->id,
- attribute);
+ attr);
}
}
@@ -1215,40 +1378,45 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
* \param[in] colocation Colocation constraint
*/
void
-pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__apply_coloc_to_priority(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const char *dependent_value = NULL;
const char *primary_value = NULL;
- const char *attribute = CRM_ATTR_ID;
+ const char *attr = colocation->node_attribute;
int score_multiplier = 1;
+ const pcmk_resource_t *primary_role_rsc = NULL;
+
+ CRM_ASSERT((dependent != NULL) && (primary != NULL) &&
+ (colocation != NULL));
+
if ((primary->allocated_to == NULL) || (dependent->allocated_to == NULL)) {
return;
}
- if (colocation->node_attribute != NULL) {
- attribute = colocation->node_attribute;
- }
+ dependent_value = pcmk__colocation_node_attr(dependent->allocated_to, attr,
+ dependent);
+ primary_value = pcmk__colocation_node_attr(primary->allocated_to, attr,
+ primary);
- dependent_value = pe_node_attribute_raw(dependent->allocated_to, attribute);
- primary_value = pe_node_attribute_raw(primary->allocated_to, attribute);
+ primary_role_rsc = get_resource_for_role(primary);
if (!pcmk__str_eq(dependent_value, primary_value, pcmk__str_casei)) {
if ((colocation->score == INFINITY)
- && (colocation->dependent_role == RSC_ROLE_PROMOTED)) {
+ && (colocation->dependent_role == pcmk_role_promoted)) {
dependent->priority = -INFINITY;
}
return;
}
- if ((colocation->primary_role != RSC_ROLE_UNKNOWN)
- && (colocation->primary_role != primary->next_role)) {
+ if ((colocation->primary_role != pcmk_role_unknown)
+ && (colocation->primary_role != primary_role_rsc->next_role)) {
return;
}
- if (colocation->dependent_role == RSC_ROLE_UNPROMOTED) {
+ if (colocation->dependent_role == pcmk_role_unpromoted) {
score_multiplier = -1;
}
@@ -1271,11 +1439,11 @@ pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
* \param[in] value Colocation attribute value to require
*/
static int
-best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
+best_node_score_matching_attr(const pcmk_resource_t *rsc, const char *attr,
const char *value)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
@@ -1283,15 +1451,17 @@ best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
- if ((node->weight > best_score) && pcmk__node_available(node, false, false)
- && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
+ if ((node->weight > best_score)
+ && pcmk__node_available(node, false, false)
+ && pcmk__str_eq(value, pcmk__colocation_node_attr(node, attr, rsc),
+ pcmk__str_casei)) {
best_score = node->weight;
best_node = node->details->uname;
}
}
- if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
+ if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_none)) {
if (best_node == NULL) {
crm_info("No allowed node for %s matches node attribute %s=%s",
rsc->id, attr, value);
@@ -1306,50 +1476,113 @@ best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
/*!
* \internal
- * \brief Add resource's colocation matches to current node allocation scores
+ * \brief Check whether a resource is allowed only on a single node
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return \c true if \p rsc is allowed only on one node, otherwise \c false
+ */
+static bool
+allowed_on_one(const pcmk_resource_t *rsc)
+{
+ GHashTableIter iter;
+ pcmk_node_t *allowed_node = NULL;
+ int allowed_nodes = 0;
+
+ g_hash_table_iter_init(&iter, rsc->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &allowed_node)) {
+ if ((allowed_node->weight >= 0) && (++allowed_nodes > 1)) {
+ pe_rsc_trace(rsc, "%s is allowed on multiple nodes", rsc->id);
+ return false;
+ }
+ }
+ pe_rsc_trace(rsc, "%s is allowed %s", rsc->id,
+ ((allowed_nodes == 1)? "on a single node" : "nowhere"));
+ return (allowed_nodes == 1);
+}
+
+/*!
+ * \internal
+ * \brief Add resource's colocation matches to current node assignment scores
*
* For each node in a given table, if any of a given resource's allowed nodes
* have a matching value for the colocation attribute, add the highest of those
* nodes' scores to the node's score.
*
- * \param[in,out] nodes Hash table of nodes with allocation scores so far
- * \param[in] rsc Resource whose allowed nodes should be compared
- * \param[in] attr Colocation attribute that must match (NULL for default)
- * \param[in] factor Factor by which to multiply scores being added
+ * \param[in,out] nodes Table of nodes with assignment scores so far
+ * \param[in] source_rsc Resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p nodes
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; pass NULL to
+ * ignore stickiness and use default attribute)
+ * \param[in] factor Factor by which to multiply scores being added
* \param[in] only_positive Whether to add only positive scores
*/
static void
-add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
- const char *attr, float factor,
- bool only_positive)
+add_node_scores_matching_attr(GHashTable *nodes,
+ const pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const pcmk__colocation_t *colocation,
+ float factor, bool only_positive)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
-
- if (attr == NULL) {
- attr = CRM_ATTR_UNAME;
- }
+ pcmk_node_t *node = NULL;
+ const char *attr = colocation->node_attribute;
// Iterate through each node
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
- float weight_f = 0;
- int weight = 0;
+ float delta_f = 0;
+ int delta = 0;
int score = 0;
int new_score = 0;
+ const char *value = pcmk__colocation_node_attr(node, attr, target_rsc);
- score = best_node_score_matching_attr(rsc, attr,
- pe_node_attribute_raw(node, attr));
+ score = best_node_score_matching_attr(source_rsc, attr, value);
if ((factor < 0) && (score < 0)) {
- /* Negative preference for a node with a negative score
- * should not become a positive preference.
+ /* If the dependent is anti-colocated, we generally don't want the
+ * primary to prefer nodes that the dependent avoids. That could
+ * lead to unnecessary shuffling of the primary when the dependent
+ * hits its migration threshold somewhere, for example.
+ *
+ * However, there are cases when it is desirable. If the dependent
+ * can't run anywhere but where the primary is, it would be
+ * worthwhile to move the primary for the sake of keeping the
+ * dependent active.
+ *
+ * We can't know that exactly at this point since we don't know
+ * where the primary will be assigned, but we can limit considering
+ * the preference to when the dependent is allowed only on one node.
+ * This is less than ideal for multiple reasons:
+ *
+ * - the dependent could be allowed on more than one node but have
+ * anti-colocation primaries on each;
+ * - the dependent could be a clone or bundle with multiple
+ * instances, and the dependent as a whole is allowed on multiple
+ * nodes but some instance still can't run
+ * - the dependent has considered node-specific criteria such as
+ * location constraints and stickiness by this point, but might
+ * have other factors that end up disallowing a node
+ *
+ * but the alternative is making the primary move when it doesn't
+ * need to.
*
- * @TODO Consider filtering only if weight is -INFINITY
+ * We also consider the primary's stickiness and influence, so the
+ * user has some say in the matter. (This is the configured primary,
+ * not a particular instance of the primary, but that doesn't matter
+ * unless stickiness uses a rule to vary by node, and that seems
+ * acceptable to ignore.)
*/
- crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
- pe__node_name(node), node->weight, factor, score);
- continue;
+ if ((colocation->primary->stickiness >= -score)
+ || !pcmk__colocation_has_influence(colocation, NULL)
+ || !allowed_on_one(colocation->dependent)) {
+ crm_trace("%s: Filtering %d + %f * %d "
+ "(double negative disallowed)",
+ pe__node_name(node), node->weight, factor, score);
+ continue;
+ }
}
if (node->weight == INFINITY_HACK) {
@@ -1358,24 +1591,24 @@ add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
continue;
}
- weight_f = factor * score;
+ delta_f = factor * score;
// Round the number; see http://c-faq.com/fp/round.html
- weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
+ delta = (int) ((delta_f < 0)? (delta_f - 0.5) : (delta_f + 0.5));
/* Small factors can obliterate the small scores that are often actually
* used in configurations. If the score and factor are nonzero, ensure
* that the result is nonzero as well.
*/
- if ((weight == 0) && (score != 0)) {
+ if ((delta == 0) && (score != 0)) {
if (factor > 0.0) {
- weight = 1;
+ delta = 1;
} else if (factor < 0.0) {
- weight = -1;
+ delta = -1;
}
}
- new_score = pcmk__add_scores(weight, node->weight);
+ new_score = pcmk__add_scores(delta, node->weight);
if (only_positive && (new_score < 0) && (node->weight > 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d "
@@ -1407,52 +1640,69 @@ add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
- * \param[in,out] rsc Resource to check colocations for
- * \param[in] log_id Resource ID to use in logs (if NULL, use \p rsc ID)
- * \param[in,out] nodes Nodes to update
- * \param[in] attr Colocation attribute (NULL to use default)
- * \param[in] factor Incorporate scores multiplied by this factor
- * \param[in] flags Bitmask of enum pcmk__coloc_select values
+ * \param[in,out] source_rsc Resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p *nodes
+ * \param[in] log_id Resource ID for logs (if \c NULL, use
+ * \p source_rsc ID)
+ * \param[in,out] nodes Nodes to update (set initial contents to \c NULL
+ * to copy allowed nodes from \p source_rsc)
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; if \c NULL,
+ * <tt>source_rsc</tt>'s own matching node scores
+ * will not be added, and \p *nodes must be \c NULL
+ * as well)
+ * \param[in] factor Incorporate scores multiplied by this factor
+ * \param[in] flags Bitmask of enum pcmk__coloc_select values
*
+ * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
+ * the \c pcmk__coloc_select_this_with flag are used together (and only by
+ * \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
+ * \note This is the shared implementation of
+ * \c pcmk_assignment_methods_t:add_colocated_node_scores().
*/
void
-pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+pcmk__add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id,
+ GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
GHashTable *work = NULL;
- CRM_CHECK((rsc != NULL) && (nodes != NULL), return);
+ CRM_ASSERT((source_rsc != NULL) && (nodes != NULL)
+ && ((colocation != NULL)
+ || ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
- log_id = rsc->id;
+ log_id = source_rsc->id;
}
// Avoid infinite recursion
- if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
- pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
- log_id, rsc->id);
+ if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
+ pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
+ log_id, source_rsc->id);
return;
}
- pe__set_resource_flags(rsc, pe_rsc_merging);
+ pe__set_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
if (*nodes == NULL) {
- /* Only cmp_resources() passes a NULL nodes table, which indicates we
- * should initialize it with the resource's allowed node scores.
- */
- work = pcmk__copy_node_table(rsc->allowed_nodes);
+ work = pcmk__copy_node_table(source_rsc->allowed_nodes);
+ target_rsc = source_rsc;
} else {
- pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
- log_id, rsc->id, factor);
+ const bool pos = pcmk_is_set(flags, pcmk__coloc_select_nonnegative);
+
+ pe_rsc_trace(source_rsc, "%s: Merging %s scores from %s (at %.6f)",
+ log_id, (pos? "positive" : "all"), source_rsc->id, factor);
work = pcmk__copy_node_table(*nodes);
- add_node_scores_matching_attr(work, rsc, attr, factor,
- pcmk_is_set(flags,
- pcmk__coloc_select_nonnegative));
+ add_node_scores_matching_attr(work, source_rsc, target_rsc, colocation,
+ factor, pos);
}
if (work == NULL) {
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
@@ -1460,22 +1710,24 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
GList *colocations = NULL;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
- colocations = pcmk__this_with_colocations(rsc);
- pe_rsc_trace(rsc,
- "Checking additional %d optional '%s with' constraints",
- g_list_length(colocations), rsc->id);
+ colocations = pcmk__this_with_colocations(source_rsc);
+ pe_rsc_trace(source_rsc,
+ "Checking additional %d optional '%s with' "
+ "constraints",
+ g_list_length(colocations), source_rsc->id);
} else {
- colocations = pcmk__with_this_colocations(rsc);
- pe_rsc_trace(rsc,
- "Checking additional %d optional 'with %s' constraints",
- g_list_length(colocations), rsc->id);
+ colocations = pcmk__with_this_colocations(source_rsc);
+ pe_rsc_trace(source_rsc,
+ "Checking additional %d optional 'with %s' "
+ "constraints",
+ g_list_length(colocations), source_rsc->id);
}
flags |= pcmk__coloc_select_active;
for (GList *iter = colocations; iter != NULL; iter = iter->next) {
- pcmk__colocation_t *constraint = (pcmk__colocation_t *) iter->data;
+ pcmk__colocation_t *constraint = iter->data;
- pe_resource_t *other = NULL;
+ pcmk_resource_t *other = NULL;
float other_factor = factor * constraint->score / (float) INFINITY;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
@@ -1486,27 +1738,29 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
other = constraint->dependent;
}
- pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
+ pe_rsc_trace(source_rsc,
+ "Optionally merging score of '%s' constraint "
+ "(%s with %s)",
constraint->id, constraint->dependent->id,
constraint->primary->id);
- other->cmds->add_colocated_node_scores(other, log_id, &work,
- constraint->node_attribute,
+ other->cmds->add_colocated_node_scores(other, target_rsc, log_id,
+ &work, constraint,
other_factor, flags);
- pe__show_node_weights(true, NULL, log_id, work, rsc->cluster);
+ pe__show_node_scores(true, NULL, log_id, work, source_rsc->cluster);
}
g_list_free(colocations);
} else if (pcmk_is_set(flags, pcmk__coloc_select_active)) {
- pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
- log_id, rsc->id);
+ pe_rsc_info(source_rsc, "%s: Rolling back optional scores from %s",
+ log_id, source_rsc->id);
g_hash_table_destroy(work);
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
if (pcmk_is_set(flags, pcmk__coloc_select_nonnegative)) {
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
@@ -1522,7 +1776,7 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
}
*nodes = work;
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
}
/*!
@@ -1535,25 +1789,83 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
void
pcmk__add_dependent_scores(gpointer data, gpointer user_data)
{
- pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ pcmk__colocation_t *colocation = data;
+ pcmk_resource_t *target_rsc = user_data;
- pe_resource_t *other = colocation->dependent;
+ pcmk_resource_t *source_rsc = colocation->dependent;
const float factor = colocation->score / (float) INFINITY;
uint32_t flags = pcmk__coloc_select_active;
if (!pcmk__colocation_has_influence(colocation, NULL)) {
return;
}
- if (rsc->variant == pe_clone) {
+ if (target_rsc->variant == pcmk_rsc_variant_clone) {
flags |= pcmk__coloc_select_nonnegative;
}
- pe_rsc_trace(rsc,
+ pe_rsc_trace(target_rsc,
"%s: Incorporating attenuated %s assignment scores due "
- "to colocation %s", rsc->id, other->id, colocation->id);
- other->cmds->add_colocated_node_scores(other, rsc->id, &rsc->allowed_nodes,
- colocation->node_attribute, factor,
- flags);
+ "to colocation %s",
+ target_rsc->id, source_rsc->id, colocation->id);
+ source_rsc->cmds->add_colocated_node_scores(source_rsc, target_rsc,
+ source_rsc->id,
+ &target_rsc->allowed_nodes,
+ colocation, factor, flags);
+}
+
+/*!
+ * \internal
+ * \brief Exclude nodes from a dependent's node table if not in a given list
+ *
+ * Given a dependent resource in a colocation and a list of nodes where the
+ * primary resource will run, set a node's score to \c -INFINITY in the
+ * dependent's node table if not found in the primary nodes list.
+ *
+ * \param[in,out] dependent Dependent resource
+ * \param[in] primary Primary resource (for logging only)
+ * \param[in] colocation Colocation constraint (for logging only)
+ * \param[in] primary_nodes List of nodes where the primary will have
+ * unblocked instances in a suitable role
+ * \param[in] merge_scores If \c true and a node is found in both \p table
+ * and \p list, add the node's score in \p list to
+ * the node's score in \p table
+ */
+void
+pcmk__colocation_intersect_nodes(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation,
+ const GList *primary_nodes, bool merge_scores)
+{
+ GHashTableIter iter;
+ pcmk_node_t *dependent_node = NULL;
+
+ CRM_ASSERT((dependent != NULL) && (primary != NULL)
+ && (colocation != NULL));
+
+ g_hash_table_iter_init(&iter, dependent->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &dependent_node)) {
+ const pcmk_node_t *primary_node = NULL;
+
+ primary_node = pe_find_node_id(primary_nodes,
+ dependent_node->details->id);
+ if (primary_node == NULL) {
+ dependent_node->weight = -INFINITY;
+ pe_rsc_trace(dependent,
+ "Banning %s from %s (no primary instance) for %s",
+ dependent->id, pe__node_name(dependent_node),
+ colocation->id);
+
+ } else if (merge_scores) {
+ dependent_node->weight = pcmk__add_scores(dependent_node->weight,
+ primary_node->weight);
+ pe_rsc_trace(dependent,
+ "Added %s's score %s to %s's score for %s (now %s) "
+ "for colocation %s",
+ primary->id, pcmk_readable_score(primary_node->weight),
+ dependent->id, pe__node_name(dependent_node),
+ pcmk_readable_score(dependent_node->weight),
+ colocation->id);
+ }
+ }
}
/*!
@@ -1567,7 +1879,7 @@ pcmk__add_dependent_scores(gpointer data, gpointer user_data)
* \note This is a convenience wrapper for the with_this_colocations() method.
*/
GList *
-pcmk__with_this_colocations(const pe_resource_t *rsc)
+pcmk__with_this_colocations(const pcmk_resource_t *rsc)
{
GList *list = NULL;
@@ -1586,7 +1898,7 @@ pcmk__with_this_colocations(const pe_resource_t *rsc)
* \note This is a convenience wrapper for the this_with_colocations() method.
*/
GList *
-pcmk__this_with_colocations(const pe_resource_t *rsc)
+pcmk__this_with_colocations(const pcmk_resource_t *rsc)
{
GList *list = NULL;
diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c
index bae6827..0d1beb9 100644
--- a/lib/pacemaker/pcmk_sched_constraints.c
+++ b/lib/pacemaker/pcmk_sched_constraints.c
@@ -28,16 +28,16 @@
#include "libpacemaker_private.h"
static bool
-evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set)
+evaluate_lifetime(xmlNode *lifetime, pcmk_scheduler_t *scheduler)
{
bool result = FALSE;
crm_time_t *next_change = crm_time_new_undefined();
- result = pe_evaluate_rules(lifetime, NULL, data_set->now, next_change);
+ result = pe_evaluate_rules(lifetime, NULL, scheduler->now, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
- pe__update_recheck_time(recheck, data_set);
+ pe__update_recheck_time(recheck, scheduler, "constraint lifetime");
}
crm_time_free(next_change);
return result;
@@ -47,15 +47,15 @@ evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set)
* \internal
* \brief Unpack constraints from XML
*
- * Given a cluster working set, unpack all constraints from its input XML into
+ * Given scheduler data, unpack all constraints from its input XML into
* data structures.
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__unpack_constraints(pe_working_set_t *data_set)
+pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
{
- xmlNode *xml_constraints = pcmk_find_cib_element(data_set->input,
+ xmlNode *xml_constraints = pcmk_find_cib_element(scheduler->input,
XML_CIB_TAG_CONSTRAINTS);
for (xmlNode *xml_obj = pcmk__xe_first_child(xml_constraints);
@@ -63,7 +63,7 @@ pcmk__unpack_constraints(pe_working_set_t *data_set)
xmlNode *lifetime = NULL;
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
- const char *tag = crm_element_name(xml_obj);
+ const char *tag = (const char *) xml_obj->name;
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without "
@@ -81,20 +81,21 @@ pcmk__unpack_constraints(pe_working_set_t *data_set)
"constraint object)", id);
}
- if ((lifetime != NULL) && !evaluate_lifetime(lifetime, data_set)) {
+ if ((lifetime != NULL) && !evaluate_lifetime(lifetime, scheduler)) {
crm_info("Constraint %s %s is not active", tag, id);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_ORDER, tag, pcmk__str_casei)) {
- pcmk__unpack_ordering(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_ORDER, tag, pcmk__str_none)) {
+ pcmk__unpack_ordering(xml_obj, scheduler);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, tag, pcmk__str_casei)) {
- pcmk__unpack_colocation(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, tag, pcmk__str_none)) {
+ pcmk__unpack_colocation(xml_obj, scheduler);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_LOCATION, tag, pcmk__str_casei)) {
- pcmk__unpack_location(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_LOCATION, tag,
+ pcmk__str_none)) {
+ pcmk__unpack_location(xml_obj, scheduler);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_TICKET, tag, pcmk__str_casei)) {
- pcmk__unpack_rsc_ticket(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_TICKET, tag, pcmk__str_none)) {
+ pcmk__unpack_rsc_ticket(xml_obj, scheduler);
} else {
pe_err("Unsupported constraint type: %s", tag);
@@ -102,18 +103,19 @@ pcmk__unpack_constraints(pe_working_set_t *data_set)
}
}
-pe_resource_t *
+pcmk_resource_t *
pcmk__find_constraint_resource(GList *rsc_list, const char *id)
{
- GList *rIter = NULL;
-
- for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
- pe_resource_t *parent = rIter->data;
- pe_resource_t *match = parent->fns->find_rsc(parent, id, NULL,
- pe_find_renamed);
+ if (id == NULL) {
+ return NULL;
+ }
+ for (GList *iter = rsc_list; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *parent = iter->data;
+ pcmk_resource_t *match = parent->fns->find_rsc(parent, id, NULL,
+ pcmk_rsc_match_history);
if (match != NULL) {
- if(!pcmk__str_eq(match->id, id, pcmk__str_casei)) {
+ if (!pcmk__str_eq(match->id, id, pcmk__str_none)) {
/* We found an instance of a clone instead */
match = uber_parent(match);
crm_debug("Found %s for %s", match->id, id);
@@ -129,21 +131,21 @@ pcmk__find_constraint_resource(GList *rsc_list, const char *id)
* \internal
* \brief Check whether an ID references a resource tag
*
- * \param[in] data_set Cluster working set
- * \param[in] id Tag ID to search for
- * \param[out] tag Where to store tag, if found
+ * \param[in] scheduler Scheduler data
+ * \param[in] id Tag ID to search for
+ * \param[out] tag Where to store tag, if found
*
* \return true if ID refers to a tagged resource or resource set template,
* otherwise false
*/
static bool
-find_constraint_tag(const pe_working_set_t *data_set, const char *id,
- pe_tag_t **tag)
+find_constraint_tag(const pcmk_scheduler_t *scheduler, const char *id,
+ pcmk_tag_t **tag)
{
*tag = NULL;
// Check whether id refers to a resource set template
- if (g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
+ if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, (gpointer *) tag)) {
if (*tag == NULL) {
crm_warn("No resource is derived from template '%s'", id);
@@ -153,7 +155,7 @@ find_constraint_tag(const pe_working_set_t *data_set, const char *id,
}
// If not, check whether id refers to a tag
- if (g_hash_table_lookup_extended(data_set->tags, id,
+ if (g_hash_table_lookup_extended(scheduler->tags, id,
NULL, (gpointer *) tag)) {
if (*tag == NULL) {
crm_warn("No resource is tagged with '%s'", id);
@@ -170,27 +172,27 @@ find_constraint_tag(const pe_working_set_t *data_set, const char *id,
* \brief
* \internal Check whether an ID refers to a valid resource or tag
*
- * \param[in] data_set Cluster working set
- * \param[in] id ID to search for
- * \param[out] rsc Where to store resource, if found (or NULL to skip
- * searching resources)
- * \param[out] tag Where to store tag, if found (or NULL to skip searching
- * tags)
+ * \param[in] scheduler Scheduler data
+ * \param[in] id ID to search for
+ * \param[out] rsc Where to store resource, if found
+ * (or NULL to skip searching resources)
+ * \param[out] tag Where to store tag, if found
+ * (or NULL to skip searching tags)
*
* \return true if id refers to a resource (possibly indirectly via a tag)
*/
bool
-pcmk__valid_resource_or_tag(const pe_working_set_t *data_set, const char *id,
- pe_resource_t **rsc, pe_tag_t **tag)
+pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler, const char *id,
+ pcmk_resource_t **rsc, pcmk_tag_t **tag)
{
if (rsc != NULL) {
- *rsc = pcmk__find_constraint_resource(data_set->resources, id);
+ *rsc = pcmk__find_constraint_resource(scheduler->resources, id);
if (*rsc != NULL) {
return true;
}
}
- if ((tag != NULL) && find_constraint_tag(data_set, id, tag)) {
+ if ((tag != NULL) && find_constraint_tag(scheduler, id, tag)) {
return true;
}
@@ -205,14 +207,14 @@ pcmk__valid_resource_or_tag(const pe_working_set_t *data_set, const char *id,
* entries that list tags rather than resource IDs, and replace any found with
* resource_ref entries for the corresponding resource IDs.
*
- * \param[in,out] xml_obj Constraint XML
- * \param[in] data_set Cluster working set
+ * \param[in,out] xml_obj Constraint XML
+ * \param[in] scheduler Scheduler data
*
* \return Equivalent XML with resource tags replaced (or NULL if none)
* \note It is the caller's responsibility to free the result with free_xml().
*/
xmlNode *
-pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
+pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pcmk_scheduler_t *scheduler)
{
xmlNode *new_xml = NULL;
bool any_refs = false;
@@ -228,15 +230,15 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
set != NULL; set = crm_next_same_xml(set)) {
GList *tag_refs = NULL;
- GList *gIter = NULL;
+ GList *iter = NULL;
for (xmlNode *xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
- if (!pcmk__valid_resource_or_tag(data_set, ID(xml_rsc), &rsc,
+ if (!pcmk__valid_resource_or_tag(scheduler, ID(xml_rsc), &rsc,
&tag)) {
pcmk__config_err("Ignoring resource sets for constraint '%s' "
"because '%s' is not a valid resource or tag",
@@ -248,37 +250,36 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
continue;
} else if (tag) {
- /* The resource_ref under the resource_set references a template/tag */
+ // resource_ref under resource_set references template or tag
xmlNode *last_ref = xml_rsc;
- /* A sample:
-
- Original XML:
-
- <resource_set id="tag1-colocation-0" sequential="true">
- <resource_ref id="rsc1"/>
- <resource_ref id="tag1"/>
- <resource_ref id="rsc4"/>
- </resource_set>
-
- Now we are appending rsc2 and rsc3 which are tagged with tag1 right after it:
-
- <resource_set id="tag1-colocation-0" sequential="true">
- <resource_ref id="rsc1"/>
- <resource_ref id="tag1"/>
- <resource_ref id="rsc2"/>
- <resource_ref id="rsc3"/>
- <resource_ref id="rsc4"/>
- </resource_set>
-
+ /* For example, given the original XML:
+ *
+ * <resource_set id="tag1-colocation-0" sequential="true">
+ * <resource_ref id="rsc1"/>
+ * <resource_ref id="tag1"/>
+ * <resource_ref id="rsc4"/>
+ * </resource_set>
+ *
+ * If rsc2 and rsc3 are tagged with tag1, we add them after it:
+ *
+ * <resource_set id="tag1-colocation-0" sequential="true">
+ * <resource_ref id="rsc1"/>
+ * <resource_ref id="tag1"/>
+ * <resource_ref id="rsc2"/>
+ * <resource_ref id="rsc3"/>
+ * <resource_ref id="rsc4"/>
+ * </resource_set>
*/
- for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
- const char *obj_ref = (const char *) gIter->data;
+ for (iter = tag->refs; iter != NULL; iter = iter->next) {
+ const char *obj_ref = iter->data;
xmlNode *new_rsc_ref = NULL;
- new_rsc_ref = xmlNewDocRawNode(getDocPtr(set), NULL,
- (pcmkXmlStr) XML_TAG_RESOURCE_REF, NULL);
+ new_rsc_ref = xmlNewDocRawNode(set->doc, NULL,
+ (pcmkXmlStr)
+ XML_TAG_RESOURCE_REF,
+ NULL);
crm_xml_add(new_rsc_ref, XML_ATTR_ID, obj_ref);
xmlAddNextSibling(last_ref, new_rsc_ref);
@@ -304,8 +305,8 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
</resource_set>
*/
- for (gIter = tag_refs; gIter != NULL; gIter = gIter->next) {
- xmlNode *tag_ref = gIter->data;
+ for (iter = tag_refs; iter != NULL; iter = iter->next) {
+ xmlNode *tag_ref = iter->data;
free_xml(tag_ref);
}
@@ -324,20 +325,21 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
* \brief Convert a tag into a resource set of tagged resources
*
* \param[in,out] xml_obj Constraint XML
- * \param[out] rsc_set Where to store resource set XML created based on tag
- * \param[in] attr Name of XML attribute containing resource or tag ID
- * \param[in] convert_rsc Convert to set even if \p attr references a resource
- * \param[in] data_set Cluster working set
+ * \param[out] rsc_set Where to store resource set XML
+ * \param[in] attr Name of XML attribute with resource or tag ID
+ * \param[in] convert_rsc If true, convert to set even if \p attr
+ * references a resource
+ * \param[in] scheduler Scheduler data
*/
bool
pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
- bool convert_rsc, const pe_working_set_t *data_set)
+ bool convert_rsc, const pcmk_scheduler_t *scheduler)
{
const char *cons_id = NULL;
const char *id = NULL;
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
*rsc_set = NULL;
@@ -346,7 +348,7 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
cons_id = ID(xml_obj);
if (cons_id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return false;
}
@@ -355,22 +357,21 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
return true;
}
- if (!pcmk__valid_resource_or_tag(data_set, id, &rsc, &tag)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, id, &rsc, &tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", cons_id, id);
return false;
} else if (tag) {
- GList *gIter = NULL;
-
- /* A template/tag is referenced by the "attr" attribute (first, then, rsc or with-rsc).
- Add the template/tag's corresponding "resource_set" which contains the resources derived
- from it or tagged with it under the constraint. */
+ /* The "attr" attribute (for a resource in a constraint) specifies a
+ * template or tag. Add the corresponding resource_set containing the
+ * resources derived from or tagged with it.
+ */
*rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET);
crm_xml_add(*rsc_set, XML_ATTR_ID, id);
- for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
- const char *obj_ref = (const char *) gIter->data;
+ for (GList *iter = tag->refs; iter != NULL; iter = iter->next) {
+ const char *obj_ref = iter->data;
xmlNode *rsc_ref = NULL;
rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF);
@@ -381,8 +382,10 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
pcmk__xe_set_bool_attr(*rsc_set, "sequential", false);
} else if ((rsc != NULL) && convert_rsc) {
- /* Even a regular resource is referenced by "attr", convert it into a resource_set.
- Because the other side of the constraint could be a template/tag reference. */
+ /* Even if a regular resource is referenced by "attr", convert it into a
+ * resource_set, because the other resource reference in the constraint
+ * could be a template or tag.
+ */
xmlNode *rsc_ref = NULL;
*rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET);
@@ -407,14 +410,14 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
* \internal
* \brief Create constraints inherent to resource types
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__create_internal_constraints(pe_working_set_t *data_set)
+pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
{
crm_trace("Create internal constraints");
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->internal_constraints(rsc);
}
diff --git a/lib/pacemaker/pcmk_sched_fencing.c b/lib/pacemaker/pcmk_sched_fencing.c
index c912640..3fe9ebc 100644
--- a/lib/pacemaker/pcmk_sched_fencing.c
+++ b/lib/pacemaker/pcmk_sched_fencing.c
@@ -26,14 +26,15 @@
* \return TRUE if resource (or parent if an anonymous clone) is known
*/
static bool
-rsc_is_known_on(const pe_resource_t *rsc, const pe_node_t *node)
+rsc_is_known_on(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
+ if (g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) {
return TRUE;
- } else if ((rsc->variant == pe_native)
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
&& pe_rsc_is_anon_clone(rsc->parent)
- && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
+ && (g_hash_table_lookup(rsc->parent->known_on,
+ node->details->id) != NULL)) {
/* We check only the parent, not the uber-parent, because we cannot
* assume that the resource is known if it is in an anonymously cloned
* group (which may be only partially known).
@@ -51,29 +52,30 @@ rsc_is_known_on(const pe_resource_t *rsc, const pe_node_t *node)
* \param[in,out] stonith_op Fence action
*/
static void
-order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
+order_start_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
- pe_node_t *target;
- GList *gIter = NULL;
+ pcmk_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
- for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
switch (action->needs) {
- case rsc_req_nothing:
+ case pcmk_requires_nothing:
// Anything other than start or promote requires nothing
break;
- case rsc_req_stonith:
- order_actions(stonith_op, action, pe_order_optional);
+ case pcmk_requires_fencing:
+ order_actions(stonith_op, action, pcmk__ar_ordered);
break;
- case rsc_req_quorum:
- if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
- && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
+ case pcmk_requires_quorum:
+ if (pcmk__str_eq(action->task, PCMK_ACTION_START,
+ pcmk__str_none)
+ && (g_hash_table_lookup(rsc->allowed_nodes,
+ target->details->id) != NULL)
&& !rsc_is_known_on(rsc, target)) {
/* If we don't know the status of the resource on the node
@@ -85,10 +87,11 @@ order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* The most likely explanation is that the DC died and took
* its status with it.
*/
- pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
- pe__node_name(target));
+ pe_rsc_debug(rsc, "Ordering %s after %s recovery",
+ action->uuid, pe__node_name(target));
order_actions(stonith_op, action,
- pe_order_optional | pe_order_runnable_left);
+ pcmk__ar_ordered
+ |pcmk__ar_unrunnable_first_blocks);
}
break;
}
@@ -103,21 +106,21 @@ order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* \param[in,out] stonith_op Fence action
*/
static void
-order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
+order_stop_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
GList *action_list = NULL;
bool order_implicit = false;
- pe_resource_t *top = uber_parent(rsc);
- pe_action_t *parent_stop = NULL;
- pe_node_t *target;
+ pcmk_resource_t *top = uber_parent(rsc);
+ pcmk_action_t *parent_stop = NULL;
+ pcmk_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
/* Get a list of stop actions potentially implied by the fencing */
- action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
+ action_list = pe__resource_actions(rsc, target, PCMK_ACTION_STOP, FALSE);
/* If resource requires fencing, implicit actions must occur after fencing.
*
@@ -125,25 +128,24 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* ordered after fencing, even if the resource does not require fencing,
* because guest node "fencing" is actually just a resource stop.
*/
- if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
|| pe__is_guest_node(target)) {
order_implicit = true;
}
if (action_list && order_implicit) {
- parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
+ parent_stop = find_first_action(top->actions, NULL, PCMK_ACTION_STOP,
+ NULL);
}
- for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = action_list; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
// The stop would never complete, so convert it into a pseudo-action.
- pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_pseudo|pcmk_action_runnable);
if (order_implicit) {
- pe__set_action_flags(action, pe_action_implied_by_stonith);
-
/* Order the stonith before the parent stop (if any).
*
* Also order the stonith before the resource stop, unless the
@@ -152,17 +154,17 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
*
* User constraints must not order a resource in a guest node
* relative to the guest node container resource. The
- * pe_order_preserve flag marks constraints as generated by the
+ * pcmk__ar_guest_allowed flag marks constraints as generated by the
* cluster and thus immune to that check (and is irrelevant if
* target is not a guest).
*/
if (!pe_rsc_is_bundled(rsc)) {
- order_actions(stonith_op, action, pe_order_preserve);
+ order_actions(stonith_op, action, pcmk__ar_guest_allowed);
}
- order_actions(stonith_op, parent_stop, pe_order_preserve);
+ order_actions(stonith_op, parent_stop, pcmk__ar_guest_allowed);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
rsc->id, (order_implicit? "after" : "because"),
pe__node_name(target));
@@ -172,7 +174,7 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
pe__node_name(target));
}
- if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_notify)) {
pe__order_notifs_after_fencing(action, rsc, stonith_op);
}
@@ -198,25 +200,26 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
crm_info("Moving healthy resource %s off %s before fencing",
rsc->id, pe__node_name(node));
pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL,
- strdup(CRM_OP_FENCE), stonith_op,
- pe_order_optional, rsc->cluster);
+ strdup(PCMK_ACTION_STONITH), stonith_op,
+ pcmk__ar_ordered, rsc->cluster);
#endif
}
g_list_free(action_list);
/* Get a list of demote actions potentially implied by the fencing */
- action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
+ action_list = pe__resource_actions(rsc, target, PCMK_ACTION_DEMOTE, FALSE);
- for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = action_list; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
if (!(action->node->details->online) || action->node->details->unclean
- || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ || pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_info(rsc,
- "Demote of failed resource %s is implicit after %s is fenced",
+ "Demote of failed resource %s is implicit "
+ "after %s is fenced",
rsc->id, pe__node_name(target));
} else {
pe_rsc_info(rsc, "%s is implicit after %s is fenced",
@@ -226,13 +229,15 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
/* The demote would never complete and is now implied by the
* fencing, so convert it into a pseudo-action.
*/
- pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable);
+ pe__set_action_flags(action,
+ pcmk_action_pseudo|pcmk_action_runnable);
if (pe_rsc_is_bundled(rsc)) {
- // Do nothing, let recovery be ordered after parent's implied stop
+ // Recovery will be ordered as usual after parent's implied stop
} else if (order_implicit) {
- order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
+ order_actions(stonith_op, action,
+ pcmk__ar_guest_allowed|pcmk__ar_ordered);
}
}
}
@@ -248,18 +253,16 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* \param[in,out] stonith_op Fencing operation to be ordered against
*/
static void
-rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op)
+rsc_stonith_ordering(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
if (rsc->children) {
- GList *gIter = NULL;
-
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child_rsc = iter->data;
rsc_stonith_ordering(child_rsc, stonith_op);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping fencing constraints for unmanaged resource: %s",
rsc->id);
@@ -279,14 +282,14 @@ rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op)
* pseudo-actions, etc.
*
* \param[in,out] stonith_op Fencing operation
- * \param[in,out] data_set Working set of cluster
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
+pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
{
- CRM_CHECK(stonith_op && data_set, return);
- for (GList *r = data_set->resources; r != NULL; r = r->next) {
- rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op);
+ CRM_CHECK(stonith_op && scheduler, return);
+ for (GList *r = scheduler->resources; r != NULL; r = r->next) {
+ rsc_stonith_ordering((pcmk_resource_t *) r->data, stonith_op);
}
}
@@ -300,8 +303,9 @@ pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
* \param[in] order Ordering flags
*/
void
-pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
- pe_action_t *action, enum pe_ordering order)
+pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_action_t *action,
+ enum pcmk__action_relation_flags order)
{
/* When unfencing is in use, we order unfence actions before any probe or
* start of resources that require unfencing, and also of fence devices.
@@ -310,16 +314,16 @@ pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
* only quorum. However, fence agents that unfence often don't have enough
* information to even probe or start unless the node is first unfenced.
*/
- if ((pcmk_is_set(rsc->flags, pe_rsc_fence_device)
- && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing))
- || pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
+ if ((pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)
+ && pcmk_is_set(rsc->cluster->flags, pcmk_sched_enable_unfencing))
+ || pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing)) {
/* Start with an optional ordering. Requiring unfencing would result in
* the node being unfenced, and all its resources being stopped,
* whenever a new resource is added -- which would be highly suboptimal.
*/
- pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE,
- node->details->data_set);
+ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, TRUE, NULL,
+ FALSE, node->details->data_set);
order_actions(unfence, action, order);
@@ -342,11 +346,11 @@ pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
* \param[in,out] node Guest node to fence
*/
void
-pcmk__fence_guest(pe_node_t *node)
+pcmk__fence_guest(pcmk_node_t *node)
{
- pe_resource_t *container = NULL;
- pe_action_t *stop = NULL;
- pe_action_t *stonith_op = NULL;
+ pcmk_resource_t *container = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *stonith_op = NULL;
/* The fence action is just a label; we don't do anything differently for
* off vs. reboot. We specify it explicitly, rather than let it default to
@@ -354,7 +358,7 @@ pcmk__fence_guest(pe_node_t *node)
* are creating a pseudo-event to describe fencing that is already occurring
* by other means (container recovery).
*/
- const char *fence_action = "off";
+ const char *fence_action = PCMK_ACTION_OFF;
CRM_ASSERT(node != NULL);
@@ -363,12 +367,12 @@ pcmk__fence_guest(pe_node_t *node)
*/
container = node->details->remote_rsc->container;
if (container) {
- stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP,
+ stop = find_first_action(container->actions, NULL, PCMK_ACTION_STOP,
NULL);
- if (find_first_action(container->actions, NULL, CRMD_ACTION_START,
+ if (find_first_action(container->actions, NULL, PCMK_ACTION_START,
NULL)) {
- fence_action = "reboot";
+ fence_action = PCMK_ACTION_REBOOT;
}
}
@@ -377,14 +381,14 @@ pcmk__fence_guest(pe_node_t *node)
*/
stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean",
FALSE, node->details->data_set);
- pe__set_action_flags(stonith_op, pe_action_pseudo|pe_action_runnable);
+ pe__set_action_flags(stonith_op, pcmk_action_pseudo|pcmk_action_runnable);
/* We want to imply stops/demotes after the guest is stopped, not wait until
* it is restarted, so we always order pseudo-fencing after stop, not start
* (even though start might be closer to what is done for a real reboot).
*/
- if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) {
- pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE,
+ if ((stop != NULL) && pcmk_is_set(stop->flags, pcmk_action_pseudo)) {
+ pcmk_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE,
NULL, FALSE,
node->details->data_set);
@@ -392,11 +396,13 @@ pcmk__fence_guest(pe_node_t *node)
pe__node_name(node), stonith_op->id,
pe__node_name(stop->node));
order_actions(parent_stonith_op, stonith_op,
- pe_order_runnable_left|pe_order_implies_then);
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then);
} else if (stop) {
order_actions(stop, stonith_op,
- pe_order_runnable_left|pe_order_implies_then);
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then);
crm_info("Implying guest %s is down (action %d) "
"after container %s is stopped (action %d)",
pe__node_name(node), stonith_op->id,
@@ -410,10 +416,10 @@ pcmk__fence_guest(pe_node_t *node)
* which will be ordered after any container (re-)probe.
*/
stop = find_first_action(node->details->remote_rsc->actions, NULL,
- RSC_STOP, NULL);
+ PCMK_ACTION_STOP, NULL);
if (stop) {
- order_actions(stop, stonith_op, pe_order_optional);
+ order_actions(stop, stonith_op, pcmk__ar_ordered);
crm_info("Implying guest %s is down (action %d) "
"after connection is stopped (action %d)",
pe__node_name(node), stonith_op->id, stop->id);
@@ -440,7 +446,7 @@ pcmk__fence_guest(pe_node_t *node)
* otherwise false
*/
bool
-pcmk__node_unfenced(const pe_node_t *node)
+pcmk__node_unfenced(const pcmk_node_t *node)
{
const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
@@ -457,11 +463,11 @@ pcmk__node_unfenced(const pe_node_t *node)
void
pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data)
{
- pe_node_t *node = (pe_node_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ pcmk_node_t *node = (pcmk_node_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
- pe_action_t *unfence = pe_fence_op(node, "on", true, NULL, false,
- rsc->cluster);
+ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, true, NULL,
+ false, rsc->cluster);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
@@ -483,11 +489,12 @@ pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data)
*/
pcmk__new_ordering(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
- pe_order_optional|pe_order_same_node,
+ pcmk__ar_ordered|pcmk__ar_if_on_same_node,
rsc->cluster);
pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
- pe_order_implies_then_on_node|pe_order_same_node,
+ pcmk__ar_first_implies_same_node_then
+ |pcmk__ar_if_on_same_node,
rsc->cluster);
}
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index cb139f7..9983c1f 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -20,23 +20,33 @@
* \internal
* \brief Assign a group resource to a node
*
- * \param[in,out] rsc Group resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Group resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and a child of \p rsc can't be
+ * assigned to a node, set the child's next role to
+ * stopped and update existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__group_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
- pe_node_t *first_assigned_node = NULL;
- pe_resource_t *first_member = NULL;
+ pcmk_node_t *first_assigned_node = NULL;
+ pcmk_resource_t *first_member = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return rsc->allocated_to; // Assignment already done
}
- if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "Assignment dependency loop detected involving %s",
rsc->id);
return NULL;
@@ -44,33 +54,34 @@ pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer)
if (rsc->children == NULL) {
// No members to assign
- pe__clear_resource_flags(rsc, pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unassigned);
return NULL;
}
- pe__set_resource_flags(rsc, pe_rsc_allocating);
- first_member = (pe_resource_t *) rsc->children->data;
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
+ first_member = (pcmk_resource_t *) rsc->children->data;
rsc->role = first_member->role;
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
- pe_node_t *node = NULL;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
+ pcmk_node_t *node = NULL;
pe_rsc_trace(rsc, "Assigning group %s member %s",
rsc->id, member->id);
- node = member->cmds->assign(member, prefer);
+ node = member->cmds->assign(member, prefer, stop_if_fail);
if (first_assigned_node == NULL) {
first_assigned_node = node;
}
}
pe__set_next_role(rsc, first_member->next_role, "first group member");
- pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned);
- if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
+ if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
return NULL;
}
return first_assigned_node;
@@ -85,12 +96,12 @@ pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer)
*
* \return Newly created pseudo-operation
*/
-static pe_action_t *
-create_group_pseudo_op(pe_resource_t *group, const char *action)
+static pcmk_action_t *
+create_group_pseudo_op(pcmk_resource_t *group, const char *action)
{
- pe_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0),
- action, NULL, TRUE, TRUE, group->cluster);
- pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
+ pcmk_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0),
+ action, NULL, TRUE, group->cluster);
+ pe__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
return op;
}
@@ -101,29 +112,29 @@ create_group_pseudo_op(pe_resource_t *group, const char *action)
* \param[in,out] rsc Group resource to create actions for
*/
void
-pcmk__group_create_actions(pe_resource_t *rsc)
+pcmk__group_create_actions(pcmk_resource_t *rsc)
{
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
pe_rsc_trace(rsc, "Creating actions for group %s", rsc->id);
// Create actions for individual group members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->cmds->create_actions(member);
}
// Create pseudo-actions for group itself to serve as ordering points
- create_group_pseudo_op(rsc, RSC_START);
- create_group_pseudo_op(rsc, RSC_STARTED);
- create_group_pseudo_op(rsc, RSC_STOP);
- create_group_pseudo_op(rsc, RSC_STOPPED);
+ create_group_pseudo_op(rsc, PCMK_ACTION_START);
+ create_group_pseudo_op(rsc, PCMK_ACTION_RUNNING);
+ create_group_pseudo_op(rsc, PCMK_ACTION_STOP);
+ create_group_pseudo_op(rsc, PCMK_ACTION_STOPPED);
if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE))) {
- create_group_pseudo_op(rsc, RSC_DEMOTE);
- create_group_pseudo_op(rsc, RSC_DEMOTED);
- create_group_pseudo_op(rsc, RSC_PROMOTE);
- create_group_pseudo_op(rsc, RSC_PROMOTED);
+ create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTE);
+ create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTED);
+ create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTE);
+ create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTED);
}
}
@@ -134,8 +145,8 @@ struct member_data {
bool colocated;
bool promotable;
- pe_resource_t *last_active;
- pe_resource_t *previous_member;
+ pcmk_resource_t *last_active;
+ pcmk_resource_t *previous_member;
};
/*!
@@ -148,14 +159,14 @@ struct member_data {
static void
member_internal_constraints(gpointer data, gpointer user_data)
{
- pe_resource_t *member = (pe_resource_t *) data;
+ pcmk_resource_t *member = (pcmk_resource_t *) data;
struct member_data *member_data = (struct member_data *) user_data;
// For ordering demote vs demote or stop vs stop
- uint32_t down_flags = pe_order_implies_first_printed;
+ uint32_t down_flags = pcmk__ar_then_implies_first_graphed;
// For ordering demote vs demoted or stop vs stopped
- uint32_t post_down_flags = pe_order_implies_then_printed;
+ uint32_t post_down_flags = pcmk__ar_first_implies_then_graphed;
// Create the individual member's implicit constraints
member->cmds->internal_constraints(member);
@@ -163,76 +174,85 @@ member_internal_constraints(gpointer data, gpointer user_data)
if (member_data->previous_member == NULL) {
// This is first member
if (member_data->ordered) {
- pe__set_order_flags(down_flags, pe_order_optional);
- post_down_flags = pe_order_implies_then;
+ pe__set_order_flags(down_flags, pcmk__ar_ordered);
+ post_down_flags = pcmk__ar_first_implies_then;
}
} else if (member_data->colocated) {
+ uint32_t flags = pcmk__coloc_none;
+
+ if (pcmk_is_set(member->flags, pcmk_rsc_critical)) {
+ flags |= pcmk__coloc_influence;
+ }
+
// Colocate this member with the previous one
- pcmk__new_colocation("group:internal_colocation", NULL, INFINITY,
- member, member_data->previous_member, NULL, NULL,
- pcmk_is_set(member->flags, pe_rsc_critical),
- member->cluster);
+ pcmk__new_colocation("#group-members", NULL, INFINITY, member,
+ member_data->previous_member, NULL, NULL, flags);
}
if (member_data->promotable) {
// Demote group -> demote member -> group is demoted
- pcmk__order_resource_actions(member->parent, RSC_DEMOTE,
- member, RSC_DEMOTE, down_flags);
- pcmk__order_resource_actions(member, RSC_DEMOTE,
- member->parent, RSC_DEMOTED,
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_DEMOTE,
+ member, PCMK_ACTION_DEMOTE, down_flags);
+ pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
+ member->parent, PCMK_ACTION_DEMOTED,
post_down_flags);
// Promote group -> promote member -> group is promoted
- pcmk__order_resource_actions(member, RSC_PROMOTE,
- member->parent, RSC_PROMOTED,
- pe_order_runnable_left
- |pe_order_implies_then
- |pe_order_implies_then_printed);
- pcmk__order_resource_actions(member->parent, RSC_PROMOTE,
- member, RSC_PROMOTE,
- pe_order_implies_first_printed);
+ pcmk__order_resource_actions(member, PCMK_ACTION_PROMOTE,
+ member->parent, PCMK_ACTION_PROMOTED,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then
+ |pcmk__ar_first_implies_then_graphed);
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ member, PCMK_ACTION_PROMOTE,
+ pcmk__ar_then_implies_first_graphed);
}
// Stop group -> stop member -> group is stopped
pcmk__order_stops(member->parent, member, down_flags);
- pcmk__order_resource_actions(member, RSC_STOP, member->parent, RSC_STOPPED,
+ pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
+ member->parent, PCMK_ACTION_STOPPED,
post_down_flags);
// Start group -> start member -> group is started
- pcmk__order_starts(member->parent, member, pe_order_implies_first_printed);
- pcmk__order_resource_actions(member, RSC_START, member->parent, RSC_STARTED,
- pe_order_runnable_left
- |pe_order_implies_then
- |pe_order_implies_then_printed);
+ pcmk__order_starts(member->parent, member,
+ pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(member, PCMK_ACTION_START,
+ member->parent, PCMK_ACTION_RUNNING,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then
+ |pcmk__ar_first_implies_then_graphed);
if (!member_data->ordered) {
pcmk__order_starts(member->parent, member,
- pe_order_implies_then
- |pe_order_runnable_left
- |pe_order_implies_first_printed);
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
if (member_data->promotable) {
- pcmk__order_resource_actions(member->parent, RSC_PROMOTE, member,
- RSC_PROMOTE,
- pe_order_implies_then
- |pe_order_runnable_left
- |pe_order_implies_first_printed);
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ member, PCMK_ACTION_PROMOTE,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
}
} else if (member_data->previous_member == NULL) {
- pcmk__order_starts(member->parent, member, pe_order_none);
+ pcmk__order_starts(member->parent, member, pcmk__ar_none);
if (member_data->promotable) {
- pcmk__order_resource_actions(member->parent, RSC_PROMOTE, member,
- RSC_PROMOTE, pe_order_none);
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ member, PCMK_ACTION_PROMOTE,
+ pcmk__ar_none);
}
} else {
// Order this member relative to the previous one
pcmk__order_starts(member_data->previous_member, member,
- pe_order_implies_then|pe_order_runnable_left);
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks);
pcmk__order_stops(member, member_data->previous_member,
- pe_order_optional|pe_order_restart);
+ pcmk__ar_ordered|pcmk__ar_intermediate_stop);
/* In unusual circumstances (such as adding a new member to the middle
* of a group with unmanaged later members), this member may be active
@@ -242,20 +262,22 @@ member_internal_constraints(gpointer data, gpointer user_data)
*/
if ((member->running_on != NULL)
&& (member_data->previous_member->running_on == NULL)) {
- pcmk__order_resource_actions(member, RSC_STOP,
- member_data->previous_member, RSC_START,
- pe_order_implies_first
- |pe_order_runnable_left);
+ pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
+ member_data->previous_member,
+ PCMK_ACTION_START,
+ pcmk__ar_then_implies_first
+ |pcmk__ar_unrunnable_first_blocks);
}
if (member_data->promotable) {
pcmk__order_resource_actions(member_data->previous_member,
- RSC_PROMOTE, member, RSC_PROMOTE,
- pe_order_implies_then
- |pe_order_runnable_left);
- pcmk__order_resource_actions(member, RSC_DEMOTE,
+ PCMK_ACTION_PROMOTE, member,
+ PCMK_ACTION_PROMOTE,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks);
+ pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
member_data->previous_member,
- RSC_DEMOTE, pe_order_optional);
+ PCMK_ACTION_DEMOTE, pcmk__ar_ordered);
}
}
@@ -265,7 +287,8 @@ member_internal_constraints(gpointer data, gpointer user_data)
&& (member_data->previous_member->running_on == NULL)
&& (member_data->last_active != NULL)
&& (member_data->last_active->running_on != NULL)) {
- pcmk__order_stops(member, member_data->last_active, pe_order_optional);
+ pcmk__order_stops(member, member_data->last_active,
+ pcmk__ar_ordered);
}
member_data->last_active = member;
}
@@ -280,35 +303,40 @@ member_internal_constraints(gpointer data, gpointer user_data)
* \param[in,out] rsc Group resource to create implicit constraints for
*/
void
-pcmk__group_internal_constraints(pe_resource_t *rsc)
+pcmk__group_internal_constraints(pcmk_resource_t *rsc)
{
struct member_data member_data = { false, };
+ const pcmk_resource_t *top = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
/* Order group pseudo-actions relative to each other for restarting:
* stop group -> group is stopped -> start group -> group is started
*/
- pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
- pe_order_runnable_left);
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
- pe_order_runnable_left);
-
- member_data.ordered = pe__group_flag_is_set(rsc, pe__group_ordered);
- member_data.colocated = pe__group_flag_is_set(rsc, pe__group_colocated);
- member_data.promotable = pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_unrunnable_first_blocks);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_unrunnable_first_blocks);
+
+ top = pe__const_top_resource(rsc, false);
+
+ member_data.ordered = pe__group_flag_is_set(rsc, pcmk__group_ordered);
+ member_data.colocated = pe__group_flag_is_set(rsc, pcmk__group_colocated);
+ member_data.promotable = pcmk_is_set(top->flags, pcmk_rsc_promotable);
g_list_foreach(rsc->children, member_internal_constraints, &member_data);
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for a group with some other resource, apply the
- * score to the dependent's allowed node weights (if we are still placing
+ * score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent group resource in colocation
@@ -316,10 +344,10 @@ pcmk__group_internal_constraints(pe_resource_t *rsc)
* \param[in] colocation Colocation constraint to apply
*/
static void
-colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
+colocate_group_with(pcmk_resource_t *dependent, const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
- pe_resource_t *member = NULL;
+ pcmk_resource_t *member = NULL;
if (dependent->children == NULL) {
return;
@@ -328,9 +356,9 @@ colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
pe_rsc_trace(primary, "Processing %s (group %s with %s) for dependent",
colocation->id, dependent->id, primary->id);
- if (pe__group_flag_is_set(dependent, pe__group_colocated)) {
+ if (pe__group_flag_is_set(dependent, pcmk__group_colocated)) {
// Colocate first member (internal colocations will handle the rest)
- member = (pe_resource_t *) dependent->children->data;
+ member = (pcmk_resource_t *) dependent->children->data;
member->cmds->apply_coloc_score(member, primary, colocation, true);
return;
}
@@ -344,17 +372,17 @@ colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
// Colocate each member individually
for (GList *iter = dependent->children; iter != NULL; iter = iter->next) {
- member = (pe_resource_t *) iter->data;
+ member = (pcmk_resource_t *) iter->data;
member->cmds->apply_coloc_score(member, primary, colocation, true);
}
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for some other resource with a group, apply the
- * score to the dependent's allowed node weights (if we are still placing
+ * score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -362,20 +390,20 @@ colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
* \param[in] colocation Colocation constraint to apply
*/
static void
-colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
+colocate_with_group(pcmk_resource_t *dependent, const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
- pe_resource_t *member = NULL;
+ const pcmk_resource_t *member = NULL;
pe_rsc_trace(primary,
"Processing colocation %s (%s with group %s) for primary",
colocation->id, dependent->id, primary->id);
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
+ if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
return;
}
- if (pe__group_flag_is_set(primary, pe__group_colocated)) {
+ if (pe__group_flag_is_set(primary, pcmk__group_colocated)) {
if (colocation->score >= INFINITY) {
/* For mandatory colocations, the entire group must be assignable
@@ -388,7 +416,7 @@ colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
* up doesn't matter, so apply the colocation based on the first
* member.
*/
- member = (pe_resource_t *) primary->children->data;
+ member = (pcmk_resource_t *) primary->children->data;
}
if (member == NULL) {
return; // Nothing to colocate with
@@ -406,18 +434,19 @@ colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
}
// Colocate dependent with each member individually
- for (GList *iter = primary->children; iter != NULL; iter = iter->next) {
- member = (pe_resource_t *) iter->data;
+ for (const GList *iter = primary->children; iter != NULL;
+ iter = iter->next) {
+ member = iter->data;
member->cmds->apply_coloc_score(dependent, member, colocation, false);
}
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -426,8 +455,8 @@ colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
* \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__group_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__group_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
@@ -439,7 +468,7 @@ pcmk__group_apply_coloc_score(pe_resource_t *dependent,
} else {
// Method should only be called for primitive dependents
- CRM_ASSERT(dependent->variant == pe_native);
+ CRM_ASSERT(dependent->variant == pcmk_rsc_variant_primitive);
colocate_with_group(dependent, primary, colocation);
}
@@ -454,62 +483,61 @@ pcmk__group_apply_coloc_score(pe_resource_t *dependent,
*
* \return Flags appropriate to \p action on \p node
*/
-enum pe_action_flags
-pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node)
+uint32_t
+pcmk__group_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
// Default flags for a group action
- enum pe_action_flags flags = pe_action_optional
- |pe_action_runnable
- |pe_action_pseudo;
+ uint32_t flags = pcmk_action_optional
+ |pcmk_action_runnable
+ |pcmk_action_pseudo;
CRM_ASSERT(action != NULL);
// Update flags considering each member's own flags for same action
for (GList *iter = action->rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
// Check whether member has the same action
enum action_tasks task = get_complex_task(member, action->task);
const char *task_s = task2text(task);
- pe_action_t *member_action = find_first_action(member->actions, NULL,
- task_s, node);
+ pcmk_action_t *member_action = find_first_action(member->actions, NULL,
+ task_s, node);
if (member_action != NULL) {
- enum pe_action_flags member_flags;
-
- member_flags = member->cmds->action_flags(member_action, node);
+ uint32_t member_flags = member->cmds->action_flags(member_action,
+ node);
// Group action is mandatory if any member action is
- if (pcmk_is_set(flags, pe_action_optional)
- && !pcmk_is_set(member_flags, pe_action_optional)) {
+ if (pcmk_is_set(flags, pcmk_action_optional)
+ && !pcmk_is_set(member_flags, pcmk_action_optional)) {
pe_rsc_trace(action->rsc, "%s is mandatory because %s is",
action->uuid, member_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
- pe_action_optional);
- pe__clear_action_flags(action, pe_action_optional);
+ pcmk_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
// Group action is unrunnable if any member action is
if (!pcmk__str_eq(task_s, action->task, pcmk__str_none)
- && pcmk_is_set(flags, pe_action_runnable)
- && !pcmk_is_set(member_flags, pe_action_runnable)) {
+ && pcmk_is_set(flags, pcmk_action_runnable)
+ && !pcmk_is_set(member_flags, pcmk_action_runnable)) {
pe_rsc_trace(action->rsc, "%s is unrunnable because %s is",
action->uuid, member_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
- pe_action_runnable);
- pe__clear_action_flags(action, pe_action_runnable);
+ pcmk_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
}
/* Group (pseudo-)actions other than stop or demote are unrunnable
* unless every member will do it.
*/
- } else if ((task != stop_rsc) && (task != action_demote)) {
+ } else if ((task != pcmk_action_stop) && (task != pcmk_action_demote)) {
pe_rsc_trace(action->rsc,
"%s is not runnable because %s will not %s",
action->uuid, member->id, task_s);
pe__clear_raw_action_flags(flags, "group action",
- pe_action_runnable);
+ pcmk_action_runnable);
}
}
@@ -524,49 +552,48 @@ pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node)
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this node
- * (only used when interleaving instances)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this node
+ * (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates (may
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
-pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+pcmk__group_update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
- CRM_ASSERT((first != NULL) && (then != NULL) && (data_set != NULL));
-
- // Group method can be called only for group action as "then" action
- CRM_ASSERT(then->rsc != NULL);
+ // Group method can be called only on behalf of "then" action
+ CRM_ASSERT((first != NULL) && (then != NULL) && (then->rsc != NULL)
+ && (scheduler != NULL));
// Update the actions for the group itself
changed |= pcmk__update_ordered_actions(first, then, node, flags, filter,
- type, data_set);
+ type, scheduler);
// Update the actions for each group member
for (GList *iter = then->rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
- pe_action_t *member_action = find_first_action(member->actions, NULL,
- then->task, node);
+ pcmk_action_t *member_action = find_first_action(member->actions, NULL,
+ then->task, node);
if (member_action != NULL) {
changed |= member->cmds->update_ordered_actions(first,
member_action, node,
flags, filter, type,
- data_set);
+ scheduler);
}
}
return changed;
@@ -580,24 +607,25 @@ pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then,
* \param[in,out] location Location constraint to apply
*/
void
-pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location)
+pcmk__group_apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
GList *node_list_orig = NULL;
GList *node_list_copy = NULL;
bool reset_scores = true;
- CRM_ASSERT((rsc != NULL) && (location != NULL));
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (location != NULL));
node_list_orig = location->node_list_rh;
node_list_copy = pcmk__copy_node_list(node_list_orig, true);
- reset_scores = pe__group_flag_is_set(rsc, pe__group_colocated);
+ reset_scores = pe__group_flag_is_set(rsc, pcmk__group_colocated);
// Apply the constraint for the group itself (updates node scores)
pcmk__apply_location(rsc, location);
// Apply the constraint for each member
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->cmds->apply_location(member, location);
@@ -615,21 +643,21 @@ pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location)
g_list_free_full(node_list_copy, free);
}
-// Group implementation of resource_alloc_functions_t:colocated_resources()
+// Group implementation of pcmk_assignment_methods_t:colocated_resources()
GList *
-pcmk__group_colocated_resources(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+pcmk__group_colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs)
{
- const pe_resource_t *member = NULL;
+ const pcmk_resource_t *member = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
- if (pe__group_flag_is_set(rsc, pe__group_colocated)
+ if (pe__group_flag_is_set(rsc, pcmk__group_colocated)
|| pe_rsc_is_clone(rsc->parent)) {
/* This group has colocated members and/or is cloned -- either way,
* add every child's colocated resources to the list. The first and last
@@ -639,7 +667,7 @@ pcmk__group_colocated_resources(const pe_resource_t *rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- member = (const pe_resource_t *) iter->data;
+ member = (const pcmk_resource_t *) iter->data;
colocated_rscs = member->cmds->colocated_resources(member, orig_rsc,
colocated_rscs);
}
@@ -648,21 +676,21 @@ pcmk__group_colocated_resources(const pe_resource_t *rsc,
/* This group's members are not colocated, and the group is not cloned,
* so just add the group's own colocations to the list.
*/
- colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc, colocated_rscs);
+ colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc,
+ colocated_rscs);
}
return colocated_rscs;
}
-// Group implementation of resource_alloc_functions_t:with_this_colocations()
+// Group implementation of pcmk_assignment_methods_t:with_this_colocations()
void
-pcmk__with_group_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__with_group_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_group)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (orig_rsc != NULL) && (list != NULL));
// Ignore empty groups
if (rsc->children == NULL) {
@@ -670,46 +698,85 @@ pcmk__with_group_colocations(const pe_resource_t *rsc,
}
/* "With this" colocations are needed only for the group itself and for its
- * last member. Add the group's colocations plus any relevant
- * parent colocations if cloned.
+ * last member. (Previous members will chain via the group internal
+ * colocations.)
*/
- if ((rsc == orig_rsc) || (orig_rsc == pe__last_group_member(rsc))) {
- crm_trace("Adding 'with %s' colocations to list for %s",
- rsc->id, orig_rsc->id);
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
- if (rsc->parent != NULL) { // Cloned group
- rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc,
- list);
+ if ((orig_rsc != rsc) && (orig_rsc != pe__last_group_member(rsc))) {
+ return;
+ }
+
+ pe_rsc_trace(rsc, "Adding 'with %s' colocations to list for %s",
+ rsc->id, orig_rsc->id);
+
+ // Add the group's own colocations
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+
+ // If cloned, add any relevant colocations with the clone
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc,
+ list);
+ }
+
+ if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
+ // @COMPAT Non-colocated groups are deprecated
+ return;
+ }
+
+ // Add explicit colocations with the group's (other) children
+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ const pcmk_resource_t *member = iter->data;
+
+ if (member != orig_rsc) {
+ member->cmds->with_this_colocations(member, orig_rsc, list);
}
}
}
-// Group implementation of resource_alloc_functions_t:this_with_colocations()
+// Group implementation of pcmk_assignment_methods_t:this_with_colocations()
void
-pcmk__group_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__group_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_group)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ const pcmk_resource_t *member = NULL;
+
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (orig_rsc != NULL) && (list != NULL));
// Ignore empty groups
if (rsc->children == NULL) {
return;
}
- /* Colocations for the group itself, or for its first member, consist of the
- * group's colocations plus any relevant parent colocations if cloned.
+ /* "This with" colocations are normally needed only for the group itself and
+ * for its first member.
*/
if ((rsc == orig_rsc)
- || (orig_rsc == (const pe_resource_t *) rsc->children->data)) {
- crm_trace("Adding '%s with' colocations to list for %s",
- rsc->id, orig_rsc->id);
- pcmk__add_this_with_list(list, rsc->rsc_cons);
- if (rsc->parent != NULL) { // Cloned group
+ || (orig_rsc == (const pcmk_resource_t *) rsc->children->data)) {
+ pe_rsc_trace(rsc, "Adding '%s with' colocations to list for %s",
+ rsc->id, orig_rsc->id);
+
+ // Add the group's own colocations
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+
+ // If cloned, add any relevant colocations involving the clone
+ if (rsc->parent != NULL) {
rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc,
list);
}
+
+ if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
+ // @COMPAT Non-colocated groups are deprecated
+ return;
+ }
+
+ // Add explicit colocations involving the group's (other) children
+ for (const GList *iter = rsc->children;
+ iter != NULL; iter = iter->next) {
+ member = iter->data;
+ if (member != orig_rsc) {
+ member->cmds->this_with_colocations(member, orig_rsc, list);
+ }
+ }
return;
}
@@ -718,14 +785,13 @@ pcmk__group_with_colocations(const pe_resource_t *rsc,
* However, if an earlier group member is unmanaged, this chaining will not
* happen, so the group's mandatory colocations must be explicitly added.
*/
- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- const pe_resource_t *member = (const pe_resource_t *) iter->data;
-
+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ member = iter->data;
if (orig_rsc == member) {
break; // We've seen all earlier members, and none are unmanaged
}
- if (!pcmk_is_set(member->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(member->flags, pcmk_rsc_managed)) {
crm_trace("Adding mandatory '%s with' colocations to list for "
"member %s because earlier member %s is unmanaged",
rsc->id, orig_rsc->id, member->id);
@@ -735,7 +801,7 @@ pcmk__group_with_colocations(const pe_resource_t *rsc,
colocation = (const pcmk__colocation_t *) cons_iter->data;
if (colocation->score == INFINITY) {
- pcmk__add_this_with(list, colocation);
+ pcmk__add_this_with(list, colocation, orig_rsc);
}
}
// @TODO Add mandatory (or all?) clone constraints if cloned
@@ -752,38 +818,57 @@ pcmk__group_with_colocations(const pe_resource_t *rsc,
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
- * \param[in,out] rsc Resource to check colocations for
- * \param[in] log_id Resource ID to use in logs (if NULL, use \p rsc ID)
- * \param[in,out] nodes Nodes to update
- * \param[in] attr Colocation attribute (NULL to use default)
- * \param[in] factor Incorporate scores multiplied by this factor
- * \param[in] flags Bitmask of enum pcmk__coloc_select values
+ * \param[in,out] source_rsc Group resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p *nodes
+ * \param[in] log_id Resource ID for logs (if \c NULL, use
+ * \p source_rsc ID)
+ * \param[in,out] nodes Nodes to update (set initial contents to \c NULL
+ * to copy allowed nodes from \p source_rsc)
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; if \c NULL,
+ * <tt>source_rsc</tt>'s own matching node scores will
+ * not be added, and \p *nodes must be \c NULL as
+ * well)
+ * \param[in] factor Incorporate scores multiplied by this factor
+ * \param[in] flags Bitmask of enum pcmk__coloc_select values
*
+ * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
+ * the \c pcmk__coloc_select_this_with flag are used together (and only by
+ * \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
+ * \note This is the group implementation of
+ * \c pcmk_assignment_methods_t:add_colocated_node_scores().
*/
void
-pcmk__group_add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+pcmk__group_add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id, GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
- pe_resource_t *member = NULL;
+ pcmk_resource_t *member = NULL;
- CRM_CHECK((rsc != NULL) && (nodes != NULL), return);
+ CRM_ASSERT((source_rsc != NULL)
+ && (source_rsc->variant == pcmk_rsc_variant_group)
+ && (nodes != NULL)
+ && ((colocation != NULL)
+ || ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
- log_id = rsc->id;
+ log_id = source_rsc->id;
}
// Avoid infinite recursion
- if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
- pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
- log_id, rsc->id);
+ if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
+ pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
+ log_id, source_rsc->id);
return;
}
- pe__set_resource_flags(rsc, pe_rsc_merging);
+ pe__set_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
// Ignore empty groups (only possible with schema validation disabled)
- if (rsc->children == NULL) {
+ if (source_rsc->children == NULL) {
return;
}
@@ -798,40 +883,41 @@ pcmk__group_add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
* For "with this" colocations, the first member works similarly.
*/
if (*nodes == NULL) {
- member = pe__last_group_member(rsc);
+ member = pe__last_group_member(source_rsc);
} else {
- member = rsc->children->data;
+ member = source_rsc->children->data;
}
- pe_rsc_trace(rsc, "%s: Merging scores from group %s using member %s "
- "(at %.6f)", log_id, rsc->id, member->id, factor);
- member->cmds->add_colocated_node_scores(member, log_id, nodes, attr, factor,
- flags);
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe_rsc_trace(source_rsc, "%s: Merging scores from group %s using member %s "
+ "(at %.6f)", log_id, source_rsc->id, member->id, factor);
+ member->cmds->add_colocated_node_scores(member, target_rsc, log_id, nodes,
+ colocation, factor, flags);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
}
-// Group implementation of resource_alloc_functions_t:add_utilization()
+// Group implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__group_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+pcmk__group_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
- pe_resource_t *member = NULL;
+ pcmk_resource_t *member = NULL;
- CRM_ASSERT((rsc != NULL) && (orig_rsc != NULL) && (utilization != NULL));
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (orig_rsc != NULL) && (utilization != NULL));
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
orig_rsc->id, rsc->id);
- if (pe__group_flag_is_set(rsc, pe__group_colocated)
+ if (pe__group_flag_is_set(rsc, pcmk__group_colocated)
|| pe_rsc_is_clone(rsc->parent)) {
// Every group member will be on same node, so sum all members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- member = (pe_resource_t *) iter->data;
+ member = (pcmk_resource_t *) iter->data;
- if (pcmk_is_set(member->flags, pe_rsc_provisional)
+ if (pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->cmds->add_utilization(member, orig_rsc, all_rscs,
utilization);
@@ -840,9 +926,9 @@ pcmk__group_add_utilization(const pe_resource_t *rsc,
} else if (rsc->children != NULL) {
// Just add first member's utilization
- member = (pe_resource_t *) rsc->children->data;
+ member = (pcmk_resource_t *) rsc->children->data;
if ((member != NULL)
- && pcmk_is_set(member->flags, pe_rsc_provisional)
+ && pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->cmds->add_utilization(member, orig_rsc, all_rscs,
@@ -851,14 +937,13 @@ pcmk__group_add_utilization(const pe_resource_t *rsc,
}
}
-// Group implementation of resource_alloc_functions_t:shutdown_lock()
void
-pcmk__group_shutdown_lock(pe_resource_t *rsc)
+pcmk__group_shutdown_lock(pcmk_resource_t *rsc)
{
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->cmds->shutdown_lock(member);
}
diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c
index c880196..4667845 100644
--- a/lib/pacemaker/pcmk_sched_instances.c
+++ b/lib/pacemaker/pcmk_sched_instances.c
@@ -18,44 +18,6 @@
/*!
* \internal
- * \brief Check whether a clone or bundle has instances for all available nodes
- *
- * \param[in] collective Clone or bundle to check
- *
- * \return true if \p collective has enough instances for all of its available
- * allowed nodes, otherwise false
- */
-static bool
-can_run_everywhere(const pe_resource_t *collective)
-{
- GHashTableIter iter;
- pe_node_t *node = NULL;
- int available_nodes = 0;
- int max_instances = 0;
-
- switch (collective->variant) {
- case pe_clone:
- max_instances = pe__clone_max(collective);
- break;
- case pe_container:
- max_instances = pe__bundle_max(collective);
- break;
- default:
- return false; // Not actually possible
- }
-
- g_hash_table_iter_init(&iter, collective->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
- if (pcmk__node_available(node, false, false)
- && (max_instances < ++available_nodes)) {
- return false;
- }
- }
- return true;
-}
-
-/*!
- * \internal
* \brief Check whether a node is allowed to run an instance
*
* \param[in] instance Clone instance or bundle container to check
@@ -65,12 +27,12 @@ can_run_everywhere(const pe_resource_t *collective)
* \return true if \p node is allowed to run \p instance, otherwise false
*/
static bool
-can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
+can_run_instance(const pcmk_resource_t *instance, const pcmk_node_t *node,
int max_per_node)
{
- pe_node_t *allowed_node = NULL;
+ pcmk_node_t *allowed_node = NULL;
- if (pcmk_is_set(instance->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(instance->flags, pcmk_rsc_removed)) {
pe_rsc_trace(instance, "%s cannot run on %s: orphaned",
instance->id, pe__node_name(node));
return false;
@@ -118,11 +80,11 @@ can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
* \param[in] max_per_node Maximum instances allowed to run on a node
*/
static void
-ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
+ban_unavailable_allowed_nodes(pcmk_resource_t *instance, int max_per_node)
{
if (instance->allowed_nodes != NULL) {
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, instance->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
@@ -132,11 +94,11 @@ ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
node->weight = -INFINITY;
for (GList *child_iter = instance->children;
child_iter != NULL; child_iter = child_iter->next) {
- pe_resource_t *child = (pe_resource_t *) child_iter->data;
- pe_node_t *child_node = NULL;
+ pcmk_resource_t *child = child_iter->data;
+ pcmk_node_t *child_node = NULL;
- child_node = pe_hash_table_lookup(child->allowed_nodes,
- node->details->id);
+ child_node = g_hash_table_lookup(child->allowed_nodes,
+ node->details->id);
if (child_node != NULL) {
pe_rsc_trace(instance,
"Banning %s child %s "
@@ -162,7 +124,7 @@ ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
* g_hash_table_destroy().
*/
static GHashTable *
-new_node_table(pe_node_t *node)
+new_node_table(pcmk_node_t *node)
{
GHashTable *table = pcmk__strkey_table(NULL, free);
@@ -179,38 +141,35 @@ new_node_table(pe_node_t *node)
* \param[in,out] nodes Node table to apply colocations to
*/
static void
-apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
+apply_parent_colocations(const pcmk_resource_t *rsc, GHashTable **nodes)
{
- GList *iter = NULL;
- pcmk__colocation_t *colocation = NULL;
- pe_resource_t *other = NULL;
- float factor = 0.0;
+ GList *colocations = pcmk__this_with_colocations(rsc);
- /* Because the this_with_colocations() and with_this_colocations() methods
- * boil down to copies of rsc_cons and rsc_cons_lhs for clones and bundles,
- * we can use those here directly for efficiency.
- */
- for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
- colocation = (pcmk__colocation_t *) iter->data;
- other = colocation->primary;
- factor = colocation->score / (float) INFINITY,
- other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
- colocation->node_attribute,
- factor,
+ for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
+ const pcmk__colocation_t *colocation = iter->data;
+ pcmk_resource_t *other = colocation->primary;
+ float factor = colocation->score / (float) INFINITY;
+
+ other->cmds->add_colocated_node_scores(other, rsc, rsc->id, nodes,
+ colocation, factor,
pcmk__coloc_select_default);
}
- for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
- colocation = (pcmk__colocation_t *) iter->data;
+ g_list_free(colocations);
+ colocations = pcmk__with_this_colocations(rsc);
+
+ for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
+ const pcmk__colocation_t *colocation = iter->data;
+ pcmk_resource_t *other = colocation->dependent;
+ float factor = colocation->score / (float) INFINITY;
+
if (!pcmk__colocation_has_influence(colocation, rsc)) {
continue;
}
- other = colocation->dependent;
- factor = colocation->score / (float) INFINITY,
- other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
- colocation->node_attribute,
- factor,
+ other->cmds->add_colocated_node_scores(other, rsc, rsc->id, nodes,
+ colocation, factor,
pcmk__coloc_select_nonnegative);
}
+ g_list_free(colocations);
}
/*!
@@ -229,14 +188,14 @@ apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
* or 0 if assignment order doesn't matter
*/
static int
-cmp_instance_by_colocation(const pe_resource_t *instance1,
- const pe_resource_t *instance2)
+cmp_instance_by_colocation(const pcmk_resource_t *instance1,
+ const pcmk_resource_t *instance2)
{
int rc = 0;
- pe_node_t *node1 = NULL;
- pe_node_t *node2 = NULL;
- pe_node_t *current_node1 = pe__current_node(instance1);
- pe_node_t *current_node2 = pe__current_node(instance2);
+ pcmk_node_t *node1 = NULL;
+ pcmk_node_t *node2 = NULL;
+ pcmk_node_t *current_node1 = pe__current_node(instance1);
+ pcmk_node_t *current_node2 = pe__current_node(instance2);
GHashTable *colocated_scores1 = NULL;
GHashTable *colocated_scores2 = NULL;
@@ -284,13 +243,13 @@ cmp_instance_by_colocation(const pe_resource_t *instance1,
* \return true if \p rsc or any of its children are failed, otherwise false
*/
static bool
-did_fail(const pe_resource_t *rsc)
+did_fail(const pcmk_resource_t *rsc)
{
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- if (did_fail((const pe_resource_t *) iter->data)) {
+ if (did_fail((const pcmk_resource_t *) iter->data)) {
return true;
}
}
@@ -307,11 +266,12 @@ did_fail(const pe_resource_t *rsc)
* \return true if *node is either NULL or allowed for \p rsc, otherwise false
*/
static bool
-node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
+node_is_allowed(const pcmk_resource_t *rsc, pcmk_node_t **node)
{
if (*node != NULL) {
- pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
- (*node)->details->id);
+ pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
+ (*node)->details->id);
+
if ((allowed == NULL) || (allowed->weight < 0)) {
pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
rsc->id, pe__node_name(*node));
@@ -336,8 +296,8 @@ node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
gint
pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
{
- const pe_resource_t *instance1 = (const pe_resource_t *) a;
- const pe_resource_t *instance2 = (const pe_resource_t *) b;
+ const pcmk_resource_t *instance1 = (const pcmk_resource_t *) a;
+ const pcmk_resource_t *instance2 = (const pcmk_resource_t *) b;
char *div1 = NULL;
char *div2 = NULL;
@@ -386,16 +346,16 @@ gint
pcmk__cmp_instance(gconstpointer a, gconstpointer b)
{
int rc = 0;
- pe_node_t *node1 = NULL;
- pe_node_t *node2 = NULL;
+ pcmk_node_t *node1 = NULL;
+ pcmk_node_t *node2 = NULL;
unsigned int nnodes1 = 0;
unsigned int nnodes2 = 0;
bool can1 = true;
bool can2 = true;
- const pe_resource_t *instance1 = (const pe_resource_t *) a;
- const pe_resource_t *instance2 = (const pe_resource_t *) b;
+ const pcmk_resource_t *instance1 = (const pcmk_resource_t *) a;
+ const pcmk_resource_t *instance2 = (const pcmk_resource_t *) b;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
@@ -547,7 +507,41 @@ pcmk__cmp_instance(gconstpointer a, gconstpointer b)
/*!
* \internal
- * \brief Choose a node for an instance
+ * \brief Increment the parent's instance count after assigning an instance
+ *
+ * An instance's parent tracks how many instances have been assigned to each
+ * node via its pcmk_node_t:count member. After assigning an instance to a node,
+ * find the corresponding node in the parent's allowed table and increment it.
+ *
+ * \param[in,out] instance Instance whose parent to update
+ * \param[in] assigned_to Node to which the instance was assigned
+ */
+static void
+increment_parent_count(pcmk_resource_t *instance,
+ const pcmk_node_t *assigned_to)
+{
+ pcmk_node_t *allowed = NULL;
+
+ if (assigned_to == NULL) {
+ return;
+ }
+ allowed = pcmk__top_allowed_node(instance, assigned_to);
+
+ if (allowed == NULL) {
+ /* The instance is allowed on the node, but its parent isn't. This
+ * shouldn't be possible if the resource is managed, and we won't be
+ * able to limit the number of instances assigned to the node.
+ */
+ CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pcmk_rsc_managed));
+
+ } else {
+ allowed->count++;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Assign an instance to a node
*
* \param[in,out] instance Clone instance or bundle replica container
* \param[in] prefer If not NULL, attempt early assignment to this
@@ -555,84 +549,153 @@ pcmk__cmp_instance(gconstpointer a, gconstpointer b)
* perform final assignment
* \param[in] max_per_node Assign at most this many instances to one node
*
- * \return true if \p instance could be assigned to a node, otherwise false
+ * \return Node to which \p instance is assigned
*/
-static bool
-assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
+static const pcmk_node_t *
+assign_instance(pcmk_resource_t *instance, const pcmk_node_t *prefer,
int max_per_node)
{
- pe_node_t *chosen = NULL;
- pe_node_t *allowed = NULL;
+ pcmk_node_t *chosen = NULL;
- CRM_ASSERT(instance != NULL);
pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
((prefer == NULL)? "no node" : prefer->details->uname));
- if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
- // Instance is already assigned
- return instance->fns->location(instance, NULL, FALSE) != NULL;
- }
-
- if (pcmk_is_set(instance->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(instance->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(instance,
"Assignment loop detected involving %s colocations",
instance->id);
+ return NULL;
+ }
+ ban_unavailable_allowed_nodes(instance, max_per_node);
+
+ // Failed early assignments are reversible (stop_if_fail=false)
+ chosen = instance->cmds->assign(instance, prefer, (prefer == NULL));
+ increment_parent_count(instance, chosen);
+ return chosen;
+}
+
+/*!
+ * \internal
+ * \brief Try to assign an instance to its current node early
+ *
+ * \param[in] rsc Clone or bundle being assigned (for logs only)
+ * \param[in] instance Clone instance or bundle replica container
+ * \param[in] current Instance's current node
+ * \param[in] max_per_node Maximum number of instances per node
+ * \param[in] available Number of instances still available for assignment
+ *
+ * \return \c true if \p instance was successfully assigned to its current node,
+ * or \c false otherwise
+ */
+static bool
+assign_instance_early(const pcmk_resource_t *rsc, pcmk_resource_t *instance,
+ const pcmk_node_t *current, int max_per_node,
+ int available)
+{
+ const pcmk_node_t *chosen = NULL;
+ int reserved = 0;
+
+ pcmk_resource_t *parent = instance->parent;
+ GHashTable *allowed_orig = NULL;
+ GHashTable *allowed_orig_parent = parent->allowed_nodes;
+ const pcmk_node_t *allowed_node = NULL;
+
+ pe_rsc_trace(instance, "Trying to assign %s to its current node %s",
+ instance->id, pe__node_name(current));
+
+ allowed_node = g_hash_table_lookup(instance->allowed_nodes,
+ current->details->id);
+ if (!pcmk__node_available(allowed_node, true, false)) {
+ pe_rsc_info(instance,
+ "Not assigning %s to current node %s: unavailable",
+ instance->id, pe__node_name(current));
return false;
}
- if (prefer != NULL) { // Possible early assignment to preferred node
+ /* On each iteration, if instance gets assigned to a node other than its
+ * current one, we reserve one instance for the chosen node, unassign
+ * instance, restore instance's original node tables, and try again. This
+ * way, instances are proportionally assigned to nodes based on preferences,
+ * but shuffling of specific instances is minimized. If a node will be
+ * assigned instances at all, it preferentially receives instances that are
+ * currently active there.
+ *
+ * parent->allowed_nodes tracks the number of instances assigned to each
+ * node. If a node already has max_per_node instances assigned,
+ * ban_unavailable_allowed_nodes() marks it as unavailable.
+ *
+ * In the end, we restore the original parent->allowed_nodes to undo the
+ * changes to counts during tentative assignments. If we successfully
+ * assigned instance to its current node, we increment that node's counter.
+ */
+
+ // Back up the allowed node tables of instance and its children recursively
+ pcmk__copy_node_tables(instance, &allowed_orig);
- // Get preferred node with instance's scores
- allowed = g_hash_table_lookup(instance->allowed_nodes,
- prefer->details->id);
+ // Update instances-per-node counts in a scratch table
+ parent->allowed_nodes = pcmk__copy_node_table(parent->allowed_nodes);
- if ((allowed == NULL) || (allowed->weight < 0)) {
- pe_rsc_trace(instance,
- "Not assigning %s to preferred node %s: unavailable",
- instance->id, pe__node_name(prefer));
- return false;
+ while (reserved < available) {
+ chosen = assign_instance(instance, current, max_per_node);
+
+ if (pe__same_node(chosen, current)) {
+ // Successfully assigned to current node
+ break;
}
- }
- ban_unavailable_allowed_nodes(instance, max_per_node);
+ // Assignment updates scores, so restore to original state
+ pe_rsc_debug(instance, "Rolling back node scores for %s", instance->id);
+ pcmk__restore_node_tables(instance, allowed_orig);
- if (prefer == NULL) { // Final assignment
- chosen = instance->cmds->assign(instance, NULL);
+ if (chosen == NULL) {
+ // Assignment failed, so give up
+ pe_rsc_info(instance,
+ "Not assigning %s to current node %s: unavailable",
+ instance->id, pe__node_name(current));
+ pe__set_resource_flags(instance, pcmk_rsc_unassigned);
+ break;
+ }
- } else { // Possible early assignment to preferred node
- GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes);
+ // We prefer more strongly to assign an instance to the chosen node
+ pe_rsc_debug(instance,
+ "Not assigning %s to current node %s: %s is better",
+ instance->id, pe__node_name(current),
+ pe__node_name(chosen));
- chosen = instance->cmds->assign(instance, prefer);
+ // Reserve one instance for the chosen node and try again
+ if (++reserved >= available) {
+ pe_rsc_info(instance,
+ "Not assigning %s to current node %s: "
+ "other assignments are more important",
+ instance->id, pe__node_name(current));
- // Revert nodes if preferred node won't be assigned
- if ((chosen != NULL) && (chosen->details != prefer->details)) {
- crm_info("Not assigning %s to preferred node %s: %s is better",
- instance->id, pe__node_name(prefer),
- pe__node_name(chosen));
- g_hash_table_destroy(instance->allowed_nodes);
- instance->allowed_nodes = backup;
- pcmk__unassign_resource(instance);
- chosen = NULL;
- } else if (backup != NULL) {
- g_hash_table_destroy(backup);
+ } else {
+ pe_rsc_debug(instance,
+ "Reserved an instance of %s for %s. Retrying "
+ "assignment of %s to %s",
+ rsc->id, pe__node_name(chosen), instance->id,
+ pe__node_name(current));
}
+
+ // Clear this assignment (frees chosen); leave instance counts in parent
+ pcmk__unassign_resource(instance);
+ chosen = NULL;
}
- // The parent tracks how many instances have been assigned to each node
- if (chosen != NULL) {
- allowed = pcmk__top_allowed_node(instance, chosen);
- if (allowed == NULL) {
- /* The instance is allowed on the node, but its parent isn't. This
- * shouldn't be possible if the resource is managed, and we won't be
- * able to limit the number of instances assigned to the node.
- */
- CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed));
+ g_hash_table_destroy(allowed_orig);
- } else {
- allowed->count++;
- }
+ // Restore original instances-per-node counts
+ g_hash_table_destroy(parent->allowed_nodes);
+ parent->allowed_nodes = allowed_orig_parent;
+
+ if (chosen == NULL) {
+ // Couldn't assign instance to current node
+ return false;
}
- return chosen != NULL;
+ pe_rsc_trace(instance, "Assigned %s to current node %s",
+ instance->id, pe__node_name(current));
+ increment_parent_count(instance, chosen);
+ return true;
}
/*!
@@ -644,10 +707,10 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
* \return Number of nodes that are available to run resources
*/
static unsigned int
-reset_allowed_node_counts(pe_resource_t *rsc)
+reset_allowed_node_counts(pcmk_resource_t *rsc)
{
unsigned int available_nodes = 0;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
@@ -664,30 +727,28 @@ reset_allowed_node_counts(pe_resource_t *rsc)
* \internal
* \brief Check whether an instance has a preferred node
*
- * \param[in] rsc Clone or bundle being assigned (for logs only)
* \param[in] instance Clone instance or bundle replica container
* \param[in] optimal_per_node Optimal number of instances per node
*
* \return Instance's current node if still available, otherwise NULL
*/
-static const pe_node_t *
-preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
- int optimal_per_node)
+static const pcmk_node_t *
+preferred_node(const pcmk_resource_t *instance, int optimal_per_node)
{
- const pe_node_t *node = NULL;
- const pe_node_t *parent_node = NULL;
+ const pcmk_node_t *node = NULL;
+ const pcmk_node_t *parent_node = NULL;
// Check whether instance is active, healthy, and not yet assigned
if ((instance->running_on == NULL)
- || !pcmk_is_set(instance->flags, pe_rsc_provisional)
- || pcmk_is_set(instance->flags, pe_rsc_failed)) {
+ || !pcmk_is_set(instance->flags, pcmk_rsc_unassigned)
+ || pcmk_is_set(instance->flags, pcmk_rsc_failed)) {
return NULL;
}
// Check whether instance's current node can run resources
node = pe__current_node(instance);
if (!pcmk__node_available(node, true, false)) {
- pe_rsc_trace(rsc, "Not assigning %s to %s early (unavailable)",
+ pe_rsc_trace(instance, "Not assigning %s to %s early (unavailable)",
instance->id, pe__node_name(node));
return NULL;
}
@@ -695,7 +756,7 @@ preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
// Check whether node already has optimal number of instances assigned
parent_node = pcmk__top_allowed_node(instance, node);
if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
- pe_rsc_trace(rsc,
+ pe_rsc_trace(instance,
"Not assigning %s to %s early "
"(optimal instances already assigned)",
instance->id, pe__node_name(node));
@@ -715,7 +776,7 @@ preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
* \param[in] max_per_node Maximum instances to assign to any one node
*/
void
-pcmk__assign_instances(pe_resource_t *collective, GList *instances,
+pcmk__assign_instances(pcmk_resource_t *collective, GList *instances,
int max_total, int max_per_node)
{
// Reuse node count to track number of assigned instances
@@ -724,8 +785,8 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
int optimal_per_node = 0;
int assigned = 0;
GList *iter = NULL;
- pe_resource_t *instance = NULL;
- const pe_node_t *current = NULL;
+ pcmk_resource_t *instance = NULL;
+ const pcmk_node_t *current = NULL;
if (available_nodes > 0) {
optimal_per_node = max_total / available_nodes;
@@ -744,13 +805,17 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
// Assign as many instances as possible to their current location
for (iter = instances; (iter != NULL) && (assigned < max_total);
iter = iter->next) {
- instance = (pe_resource_t *) iter->data;
+ int available = max_total - assigned;
+
+ instance = iter->data;
+ if (!pcmk_is_set(instance->flags, pcmk_rsc_unassigned)) {
+ continue; // Already assigned
+ }
- current = preferred_node(collective, instance, optimal_per_node);
+ current = preferred_node(instance, optimal_per_node);
if ((current != NULL)
- && assign_instance(instance, current, max_per_node)) {
- pe_rsc_trace(collective, "Assigned %s to current node %s",
- instance->id, pe__node_name(current));
+ && assign_instance_early(collective, instance, current,
+ max_per_node, available)) {
assigned++;
}
}
@@ -759,9 +824,9 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
assigned, max_total, pcmk__plural_s(max_total));
for (iter = instances; iter != NULL; iter = iter->next) {
- instance = (pe_resource_t *) iter->data;
+ instance = (pcmk_resource_t *) iter->data;
- if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(instance->flags, pcmk_rsc_unassigned)) {
continue; // Already assigned
}
@@ -770,7 +835,7 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
if (pcmk__top_allowed_node(instance, current) == NULL) {
const char *unmanaged = "";
- if (!pcmk_is_set(instance->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(instance->flags, pcmk_rsc_managed)) {
unmanaged = "Unmanaged resource ";
}
crm_notice("%s%s is running on %s which is no longer allowed",
@@ -786,7 +851,7 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
resource_location(instance, NULL, -INFINITY,
"collective_limit_reached", collective->cluster);
- } else if (assign_instance(instance, NULL, max_per_node)) {
+ } else if (assign_instance(instance, NULL, max_per_node) != NULL) {
assigned++;
}
}
@@ -821,7 +886,7 @@ enum instance_state {
* \param[in,out] state Whether any instance is starting, stopping, etc.
*/
static void
-check_instance_state(const pe_resource_t *instance, uint32_t *state)
+check_instance_state(const pcmk_resource_t *instance, uint32_t *state)
{
const GList *iter = NULL;
uint32_t instance_state = 0; // State of just this instance
@@ -832,11 +897,11 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
}
// If instance is a collective (a cloned group), check its children instead
- if (instance->variant > pe_native) {
+ if (instance->variant > pcmk_rsc_variant_primitive) {
for (iter = instance->children;
(iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
iter = iter->next) {
- check_instance_state((const pe_resource_t *) iter->data, state);
+ check_instance_state((const pcmk_resource_t *) iter->data, state);
}
return;
}
@@ -854,11 +919,13 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
|instance_stopping);
iter = iter->next) {
- const pe_action_t *action = (const pe_action_t *) iter->data;
- const bool optional = pcmk_is_set(action->flags, pe_action_optional);
+ const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
+ const bool optional = pcmk_is_set(action->flags, pcmk_action_optional);
+
+ if (pcmk__str_eq(PCMK_ACTION_START, action->task, pcmk__str_none)) {
+ if (!optional
+ && pcmk_is_set(action->flags, pcmk_action_runnable)) {
- if (pcmk__str_eq(RSC_START, action->task, pcmk__str_none)) {
- if (!optional && pcmk_is_set(action->flags, pe_action_runnable)) {
pe_rsc_trace(instance, "Instance is starting due to %s",
action->uuid);
instance_state |= instance_starting;
@@ -868,14 +935,15 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
(optional? "optional" : "unrunnable"));
}
- } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_none)) {
+ } else if (pcmk__str_eq(PCMK_ACTION_STOP, action->task,
+ pcmk__str_none)) {
/* Only stop actions can be pseudo-actions for primitives. That
* indicates that the node they are on is being fenced, so the stop
* is implied rather than actually executed.
*/
if (!optional
- && pcmk_any_flags_set(action->flags,
- pe_action_pseudo|pe_action_runnable)) {
+ && pcmk_any_flags_set(action->flags, pcmk_action_pseudo
+ |pcmk_action_runnable)) {
pe_rsc_trace(instance, "Instance is stopping due to %s",
action->uuid);
instance_state |= instance_stopping;
@@ -902,52 +970,52 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
* \param[in,out] instances List of clone instances or bundle containers
*/
void
-pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
+pcmk__create_instance_actions(pcmk_resource_t *collective, GList *instances)
{
uint32_t state = 0;
- pe_action_t *stop = NULL;
- pe_action_t *stopped = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *stopped = NULL;
- pe_action_t *start = NULL;
- pe_action_t *started = NULL;
+ pcmk_action_t *start = NULL;
+ pcmk_action_t *started = NULL;
pe_rsc_trace(collective, "Creating collective instance actions for %s",
collective->id);
// Create actions for each instance appropriate to its variant
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->cmds->create_actions(instance);
check_instance_state(instance, &state);
}
// Create pseudo-actions for rsc start and started
- start = pe__new_rsc_pseudo_action(collective, RSC_START,
+ start = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_START,
!pcmk_is_set(state, instance_starting),
true);
- started = pe__new_rsc_pseudo_action(collective, RSC_STARTED,
+ started = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_RUNNING,
!pcmk_is_set(state, instance_starting),
false);
started->priority = INFINITY;
if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
- pe__set_action_flags(started, pe_action_runnable);
+ pe__set_action_flags(started, pcmk_action_runnable);
}
// Create pseudo-actions for rsc stop and stopped
- stop = pe__new_rsc_pseudo_action(collective, RSC_STOP,
+ stop = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_STOP,
!pcmk_is_set(state, instance_stopping),
true);
- stopped = pe__new_rsc_pseudo_action(collective, RSC_STOPPED,
+ stopped = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_STOPPED,
!pcmk_is_set(state, instance_stopping),
true);
stopped->priority = INFINITY;
if (!pcmk_is_set(state, instance_restarting)) {
- pe__set_action_flags(stop, pe_action_migrate_runnable);
+ pe__set_action_flags(stop, pcmk_action_migratable);
}
- if (collective->variant == pe_clone) {
+ if (collective->variant == pcmk_rsc_variant_clone) {
pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
stopped);
}
@@ -965,9 +1033,9 @@ pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
* is no longer needed.
*/
static inline GList *
-get_instance_list(const pe_resource_t *rsc)
+get_instance_list(const pcmk_resource_t *rsc)
{
- if (rsc->variant == pe_container) {
+ if (rsc->variant == pcmk_rsc_variant_bundle) {
return pe__bundle_containers(rsc);
} else {
return rsc->children;
@@ -982,7 +1050,7 @@ get_instance_list(const pe_resource_t *rsc)
* \param[in,out] list Return value of get_instance_list() for \p rsc
*/
static inline void
-free_instance_list(const pe_resource_t *rsc, GList *list)
+free_instance_list(const pcmk_resource_t *rsc, GList *list)
{
if (list != rsc->children) {
g_list_free(list);
@@ -995,7 +1063,7 @@ free_instance_list(const pe_resource_t *rsc, GList *list)
*
* \param[in] instance Clone instance or bundle replica container
* \param[in] node Instance must match this node
- * \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
+ * \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
@@ -1003,14 +1071,14 @@ free_instance_list(const pe_resource_t *rsc, GList *list)
* otherwise false
*/
bool
-pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
+pcmk__instance_matches(const pcmk_resource_t *instance, const pcmk_node_t *node,
enum rsc_role_e role, bool current)
{
- pe_node_t *instance_node = NULL;
+ pcmk_node_t *instance_node = NULL;
CRM_CHECK((instance != NULL) && (node != NULL), return false);
- if ((role != RSC_ROLE_UNKNOWN)
+ if ((role != pcmk_role_unknown)
&& (role != instance->fns->state(instance, current))) {
pe_rsc_trace(instance,
"%s is not a compatible instance (role is not %s)",
@@ -1018,7 +1086,7 @@ pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
return false;
}
- if (!is_set_recursive(instance, pe_rsc_block, true)) {
+ if (!is_set_recursive(instance, pcmk_rsc_blocked, true)) {
// We only want instances that haven't failed
instance_node = instance->fns->location(instance, NULL, current);
}
@@ -1030,7 +1098,7 @@ pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
return false;
}
- if (instance_node->details != node->details) {
+ if (!pe__same_node(instance_node, node)) {
pe_rsc_trace(instance,
"%s is not a compatible instance (assigned to %s not %s)",
instance->id, pe__node_name(instance_node),
@@ -1048,27 +1116,28 @@ pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
* \param[in] match_rsc Resource that instance must match (for logging only)
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] node Instance must match this node
- * \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
+ * \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return \p rsc instance matching \p node and \p role if any, otherwise NULL
*/
-static pe_resource_t *
-find_compatible_instance_on_node(const pe_resource_t *match_rsc,
- const pe_resource_t *rsc,
- const pe_node_t *node, enum rsc_role_e role,
+static pcmk_resource_t *
+find_compatible_instance_on_node(const pcmk_resource_t *match_rsc,
+ const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, enum rsc_role_e role,
bool current)
{
GList *instances = NULL;
instances = get_instance_list(rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
if (pcmk__instance_matches(instance, node, role, current)) {
- pe_rsc_trace(match_rsc, "Found %s %s instance %s compatible with %s on %s",
- role == RSC_ROLE_UNKNOWN? "matching" : role2text(role),
+ pe_rsc_trace(match_rsc,
+ "Found %s %s instance %s compatible with %s on %s",
+ role == pcmk_role_unknown? "matching" : role2text(role),
rsc->id, instance->id, match_rsc->id,
pe__node_name(node));
free_instance_list(rsc, instances); // Only frees list, not contents
@@ -1078,7 +1147,7 @@ find_compatible_instance_on_node(const pe_resource_t *match_rsc,
free_instance_list(rsc, instances);
pe_rsc_trace(match_rsc, "No %s %s instance found compatible with %s on %s",
- ((role == RSC_ROLE_UNKNOWN)? "matching" : role2text(role)),
+ ((role == pcmk_role_unknown)? "matching" : role2text(role)),
rsc->id, match_rsc->id, pe__node_name(node));
return NULL;
}
@@ -1089,23 +1158,24 @@ find_compatible_instance_on_node(const pe_resource_t *match_rsc,
*
* \param[in] match_rsc Resource that instance must match
* \param[in] rsc Clone or bundle resource to check for matching instance
- * \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
+ * \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return Compatible (by \p role and \p match_rsc location) instance of \p rsc
* if any, otherwise NULL
*/
-pe_resource_t *
-pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
- const pe_resource_t *rsc, enum rsc_role_e role,
+pcmk_resource_t *
+pcmk__find_compatible_instance(const pcmk_resource_t *match_rsc,
+ const pcmk_resource_t *rsc, enum rsc_role_e role,
bool current)
{
- pe_resource_t *instance = NULL;
+ pcmk_resource_t *instance = NULL;
GList *nodes = NULL;
- const pe_node_t *node = match_rsc->fns->location(match_rsc, NULL, current);
+ const pcmk_node_t *node = NULL;
// If match_rsc has a node, check only that node
+ node = match_rsc->fns->location(match_rsc, NULL, current);
if (node != NULL) {
return find_compatible_instance_on_node(match_rsc, rsc, node, role,
current);
@@ -1117,7 +1187,7 @@ pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
iter = iter->next) {
instance = find_compatible_instance_on_node(match_rsc, rsc,
- (pe_node_t *) iter->data,
+ (pcmk_node_t *) iter->data,
role, current);
}
@@ -1136,14 +1206,15 @@ pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
* \param[in] first 'First' action in an ordering
* \param[in] then 'Then' action in an ordering
* \param[in,out] then_instance 'Then' instance that has no interleave match
- * \param[in] type Group of enum pe_ordering flags to apply
+ * \param[in] type Group of enum pcmk__action_relation_flags
* \param[in] current If true, "then" action is stopped or demoted
*
* \return true if \p then_instance was unassigned, otherwise false
*/
static bool
-unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
- pe_resource_t *then_instance, uint32_t type, bool current)
+unassign_if_mandatory(const pcmk_action_t *first, const pcmk_action_t *then,
+ pcmk_resource_t *then_instance, uint32_t type,
+ bool current)
{
// Allow "then" instance to go down even without an interleave match
if (current) {
@@ -1155,13 +1226,13 @@ unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
/* If the "first" action must be runnable, but there is no "first"
* instance, the "then" instance must not be allowed to come up.
*/
- } else if (pcmk_any_flags_set(type, pe_order_runnable_left
- |pe_order_implies_then)) {
+ } else if (pcmk_any_flags_set(type, pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then)) {
pe_rsc_info(then->rsc,
"Inhibiting %s from being active "
"because there is no %s instance to interleave",
then_instance->id, first->rsc->id);
- return pcmk__assign_resource(then_instance, NULL, true);
+ return pcmk__assign_resource(then_instance, NULL, true, true);
}
return false;
}
@@ -1181,13 +1252,13 @@ unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
* bundle container, its containerized resource) that matches
* \p action_name and \p node if any, otherwise NULL
*/
-static pe_action_t *
-find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
- const char *action_name, const pe_node_t *node,
+static pcmk_action_t *
+find_instance_action(const pcmk_action_t *action, const pcmk_resource_t *instance,
+ const char *action_name, const pcmk_node_t *node,
bool for_first)
{
- const pe_resource_t *rsc = NULL;
- pe_action_t *matching_action = NULL;
+ const pcmk_resource_t *rsc = NULL;
+ pcmk_action_t *matching_action = NULL;
/* If instance is a bundle container, sometimes we should interleave the
* action for the container itself, and sometimes for the containerized
@@ -1204,15 +1275,15 @@ find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
* everything except promote and demote (which can only be performed on the
* containerized resource).
*/
- if ((for_first && !pcmk__str_any_of(action->task, CRMD_ACTION_STOP,
- CRMD_ACTION_STOPPED, NULL))
+ if ((for_first && !pcmk__str_any_of(action->task, PCMK_ACTION_STOP,
+ PCMK_ACTION_STOPPED, NULL))
- || (!for_first && pcmk__str_any_of(action->task, CRMD_ACTION_PROMOTE,
- CRMD_ACTION_PROMOTED,
- CRMD_ACTION_DEMOTE,
- CRMD_ACTION_DEMOTED, NULL))) {
+ || (!for_first && pcmk__str_any_of(action->task, PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_PROMOTED,
+ PCMK_ACTION_DEMOTE,
+ PCMK_ACTION_DEMOTED, NULL))) {
- rsc = pcmk__get_rsc_in_container(instance);
+ rsc = pe__get_rsc_in_container(instance);
}
if (rsc == NULL) {
rsc = instance; // No containerized resource, use instance itself
@@ -1225,11 +1296,12 @@ find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
return matching_action;
}
- if (pcmk_is_set(instance->flags, pe_rsc_orphan)
- || pcmk__str_any_of(action_name, RSC_STOP, RSC_DEMOTE, NULL)) {
+ if (pcmk_is_set(instance->flags, pcmk_rsc_removed)
+ || pcmk__str_any_of(action_name, PCMK_ACTION_STOP, PCMK_ACTION_DEMOTE,
+ NULL)) {
crm_trace("No %s action found for %s%s",
action_name,
- pcmk_is_set(instance->flags, pe_rsc_orphan)? "orphan " : "",
+ pcmk_is_set(instance->flags, pcmk_rsc_removed)? "orphan " : "",
instance->id);
} else {
crm_err("No %s action found for %s to interleave (bug?)",
@@ -1252,20 +1324,23 @@ find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
* \return Original action name for \p action
*/
static const char *
-orig_action_name(const pe_action_t *action)
+orig_action_name(const pcmk_action_t *action)
{
- const pe_resource_t *instance = action->rsc->children->data; // Any instance
+ // Any instance will do
+ const pcmk_resource_t *instance = action->rsc->children->data;
+
char *action_type = NULL;
const char *action_name = action->task;
- enum action_tasks orig_task = no_action;
+ enum action_tasks orig_task = pcmk_action_unspecified;
- if (pcmk__strcase_any_of(action->task, CRMD_ACTION_NOTIFY,
- CRMD_ACTION_NOTIFIED, NULL)) {
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY,
+ PCMK_ACTION_NOTIFIED, NULL)) {
// action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
- return task2text(no_action));
+ return task2text(pcmk_action_unspecified));
action_name = strstr(action_type, "_notify_");
- CRM_CHECK(action_name != NULL, return task2text(no_action));
+ CRM_CHECK(action_name != NULL,
+ return task2text(pcmk_action_unspecified));
action_name += strlen("_notify_");
}
orig_task = get_complex_task(instance, action_name);
@@ -1286,16 +1361,16 @@ orig_action_name(const pe_action_t *action)
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
-update_interleaved_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t filter,
+update_interleaved_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t filter,
uint32_t type)
{
GList *instances = NULL;
@@ -1303,23 +1378,23 @@ update_interleaved_actions(pe_action_t *first, pe_action_t *then,
const char *orig_first_task = orig_action_name(first);
// Stops and demotes must be interleaved with instance on current node
- bool current = pcmk__ends_with(first->uuid, "_" CRMD_ACTION_STOPPED "_0")
+ bool current = pcmk__ends_with(first->uuid, "_" PCMK_ACTION_STOPPED "_0")
|| pcmk__ends_with(first->uuid,
- "_" CRMD_ACTION_DEMOTED "_0");
+ "_" PCMK_ACTION_DEMOTED "_0");
// Update the specified actions for each "then" instance individually
instances = get_instance_list(then->rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *first_instance = NULL;
- pe_resource_t *then_instance = iter->data;
+ pcmk_resource_t *first_instance = NULL;
+ pcmk_resource_t *then_instance = iter->data;
- pe_action_t *first_action = NULL;
- pe_action_t *then_action = NULL;
+ pcmk_action_t *first_action = NULL;
+ pcmk_action_t *then_action = NULL;
// Find a "first" instance to interleave with this "then" instance
first_instance = pcmk__find_compatible_instance(then_instance,
first->rsc,
- RSC_ROLE_UNKNOWN,
+ pcmk_role_unknown,
current);
if (first_instance == NULL) { // No instance can be interleaved
@@ -1366,10 +1441,10 @@ update_interleaved_actions(pe_action_t *first, pe_action_t *then,
* \return true if \p first and \p then can be interleaved, otherwise false
*/
static bool
-can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
+can_interleave_actions(const pcmk_action_t *first, const pcmk_action_t *then)
{
bool interleave = false;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
if ((first->rsc == NULL) || (then->rsc == NULL)) {
crm_trace("Not interleaving %s with %s: not resource actions",
@@ -1383,7 +1458,8 @@ can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
return false;
}
- if ((first->rsc->variant < pe_clone) || (then->rsc->variant < pe_clone)) {
+ if ((first->rsc->variant < pcmk_rsc_variant_clone)
+ || (then->rsc->variant < pcmk_rsc_variant_clone)) {
crm_trace("Not interleaving %s with %s: not clones or bundles",
first->uuid, then->uuid);
return false;
@@ -1418,19 +1494,19 @@ can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
-update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
- const pe_action_t *then, const pe_node_t *node,
+update_noninterleaved_actions(pcmk_resource_t *instance, pcmk_action_t *first,
+ const pcmk_action_t *then, const pcmk_node_t *node,
uint32_t flags, uint32_t filter, uint32_t type)
{
- pe_action_t *instance_action = NULL;
+ pcmk_action_t *instance_action = NULL;
uint32_t instance_flags = 0;
uint32_t changed = pcmk__updated_none;
@@ -1443,7 +1519,7 @@ update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
// Check whether action is runnable
instance_flags = instance->cmds->action_flags(instance_action, node);
- if (!pcmk_is_set(instance_flags, pe_action_runnable)) {
+ if (!pcmk_is_set(instance_flags, pcmk_action_runnable)) {
return changed;
}
@@ -1456,7 +1532,7 @@ update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
if (pcmk_is_set(changed, pcmk__updated_then)) {
for (GList *after_iter = instance_action->actions_after;
after_iter != NULL; after_iter = after_iter->next) {
- pe_action_wrapper_t *after = after_iter->data;
+ pcmk__related_action_t *after = after_iter->data;
pcmk__update_action_for_orderings(after->action, instance->cluster);
}
@@ -1474,26 +1550,28 @@ update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this node
- * (only used when interleaving instances)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this node
+ * (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates (may
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
-pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+pcmk__instance_update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
+ CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
+
if (then->rsc == NULL) {
return pcmk__updated_none;
@@ -1506,11 +1584,11 @@ pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
// Update actions for the clone or bundle resource itself
changed |= pcmk__update_ordered_actions(first, then, node, flags,
- filter, type, data_set);
+ filter, type, scheduler);
// Update the 'then' clone instances or bundle containers individually
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = iter->data;
+ pcmk_resource_t *instance = iter->data;
changed |= update_noninterleaved_actions(instance, first, then,
node, flags, filter, type);
@@ -1536,25 +1614,26 @@ pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
*
* \return Flags appropriate to \p action on \p node
*/
-enum pe_action_flags
-pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
- const pe_node_t *node)
+uint32_t
+pcmk__collective_action_flags(pcmk_action_t *action, const GList *instances,
+ const pcmk_node_t *node)
{
bool any_runnable = false;
- enum pe_action_flags flags;
const char *action_name = orig_action_name(action);
// Set original assumptions (optional and runnable may be cleared below)
- flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
+ uint32_t flags = pcmk_action_optional
+ |pcmk_action_runnable
+ |pcmk_action_pseudo;
for (const GList *iter = instances; iter != NULL; iter = iter->next) {
- const pe_resource_t *instance = iter->data;
- const pe_node_t *instance_node = NULL;
- pe_action_t *instance_action = NULL;
- enum pe_action_flags instance_flags;
+ const pcmk_resource_t *instance = iter->data;
+ const pcmk_node_t *instance_node = NULL;
+ pcmk_action_t *instance_action = NULL;
+ uint32_t instance_flags;
// Node is relevant only to primitive instances
- if (instance->variant == pe_native) {
+ if (instance->variant == pcmk_rsc_variant_primitive) {
instance_node = node;
}
@@ -1573,16 +1652,17 @@ pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
instance_flags = instance->cmds->action_flags(instance_action, node);
// If any instance action is mandatory, so is the collective action
- if (pcmk_is_set(flags, pe_action_optional)
- && !pcmk_is_set(instance_flags, pe_action_optional)) {
+ if (pcmk_is_set(flags, pcmk_action_optional)
+ && !pcmk_is_set(instance_flags, pcmk_action_optional)) {
pe_rsc_trace(instance, "%s is mandatory because %s is",
action->uuid, instance_action->uuid);
- pe__clear_action_summary_flags(flags, action, pe_action_optional);
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_summary_flags(flags, action,
+ pcmk_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
// If any instance action is runnable, so is the collective action
- if (pcmk_is_set(instance_flags, pe_action_runnable)) {
+ if (pcmk_is_set(instance_flags, pcmk_action_runnable)) {
any_runnable = true;
}
}
@@ -1591,69 +1671,11 @@ pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
pe_rsc_trace(action->rsc,
"%s is not runnable because no instance can run %s",
action->uuid, action_name);
- pe__clear_action_summary_flags(flags, action, pe_action_runnable);
+ pe__clear_action_summary_flags(flags, action, pcmk_action_runnable);
if (node == NULL) {
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
}
}
return flags;
}
-
-/*!
- * \internal
- * \brief Add a collective resource's colocations to a list for an instance
- *
- * \param[in,out] list Colocation list to add to
- * \param[in] instance Clone or bundle instance or instance group member
- * \param[in] collective Clone or bundle resource with colocations to add
- * \param[in] with_this If true, add collective's "with this" colocations,
- * otherwise add its "this with" colocations
- */
-void
-pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance,
- const pe_resource_t *collective,
- bool with_this)
-{
- const GList *colocations = NULL;
- bool everywhere = false;
-
- CRM_CHECK((list != NULL) && (instance != NULL), return);
-
- if (collective == NULL) {
- return;
- }
- switch (collective->variant) {
- case pe_clone:
- case pe_container:
- break;
- default:
- return;
- }
-
- everywhere = can_run_everywhere(collective);
-
- if (with_this) {
- colocations = collective->rsc_cons_lhs;
- } else {
- colocations = collective->rsc_cons;
- }
-
- for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
- const pcmk__colocation_t *colocation = iter->data;
-
- if (with_this
- && !pcmk__colocation_has_influence(colocation, instance)) {
- continue;
- }
- if (!everywhere || (colocation->score < 0)
- || (!with_this && (colocation->score == INFINITY))) {
-
- if (with_this) {
- pcmk__add_with_this(list, colocation);
- } else {
- pcmk__add_this_with(list, colocation);
- }
- }
- }
-}
diff --git a/lib/pacemaker/pcmk_sched_location.c b/lib/pacemaker/pcmk_sched_location.c
index b4ce4ff..eab9481 100644
--- a/lib/pacemaker/pcmk_sched_location.c
+++ b/lib/pacemaker/pcmk_sched_location.c
@@ -14,13 +14,14 @@
#include <crm/crm.h>
#include <crm/pengine/status.h>
+#include <crm/pengine/rules.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
static int
get_node_score(const char *rule, const char *score, bool raw,
- pe_node_t *node, pe_resource_t *rsc)
+ pcmk_node_t *node, pcmk_resource_t *rsc)
{
int score_f = 0;
@@ -31,7 +32,11 @@ get_node_score(const char *rule, const char *score, bool raw,
score_f = char2score(score);
} else {
- const char *attr_score = pe_node_attribute_calculated(node, score, rsc);
+ const char *attr_score = NULL;
+
+ attr_score = pe__node_attribute_calculated(node, score, rsc,
+ pcmk__rsc_node_current,
+ false);
if (attr_score == NULL) {
crm_debug("Rule %s: %s did not have a value for %s",
@@ -48,9 +53,8 @@ get_node_score(const char *rule, const char *score, bool raw,
}
static pe__location_t *
-generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
+generate_location_rule(pcmk_resource_t *rsc, xmlNode *rule_xml,
const char *discovery, crm_time_t *next_change,
- pe_working_set_t *data_set,
pe_re_match_data_t *re_match_data)
{
const char *rule_id = NULL;
@@ -58,8 +62,8 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
const char *boolean = NULL;
const char *role = NULL;
- GList *gIter = NULL;
- GList *match_L = NULL;
+ GList *iter = NULL;
+ GList *nodes = NULL;
bool do_and = true;
bool accept = true;
@@ -68,7 +72,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
pe__location_t *location_rule = NULL;
- rule_xml = expand_idref(rule_xml, data_set->input);
+ rule_xml = expand_idref(rule_xml, rsc->cluster->input);
if (rule_xml == NULL) {
return NULL;
}
@@ -79,7 +83,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
crm_trace("Processing rule: %s", rule_id);
- if ((role != NULL) && (text2role(role) == RSC_ROLE_UNKNOWN)) {
+ if ((role != NULL) && (text2role(role) == pcmk_role_unknown)) {
pe_err("Bad role specified for %s: %s", rule_id, role);
return NULL;
}
@@ -95,8 +99,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
do_and = false;
}
- location_rule = pcmk__new_location(rule_id, rsc, 0, discovery, NULL,
- data_set);
+ location_rule = pcmk__new_location(rule_id, rsc, 0, discovery, NULL);
if (location_rule == NULL) {
return NULL;
@@ -116,36 +119,34 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
if (role != NULL) {
crm_trace("Setting role filter: %s", role);
location_rule->role_filter = text2role(role);
- if (location_rule->role_filter == RSC_ROLE_UNPROMOTED) {
+ if (location_rule->role_filter == pcmk_role_unpromoted) {
/* Any promotable clone cannot be promoted without being in the
* unpromoted role first. Ergo, any constraint for the unpromoted
* role applies to every role.
*/
- location_rule->role_filter = RSC_ROLE_UNKNOWN;
+ location_rule->role_filter = pcmk_role_unknown;
}
}
if (do_and) {
- GList *gIter = NULL;
-
- match_L = pcmk__copy_node_list(data_set->nodes, true);
- for (gIter = match_L; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ nodes = pcmk__copy_node_list(rsc->cluster->nodes, true);
+ for (iter = nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = iter->data;
node->weight = get_node_score(rule_id, score, raw_score, node, rsc);
}
}
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ for (iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
int score_f = 0;
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = iter->data;
pe_match_data_t match_data = {
.re = re_match_data,
- .params = pe_rsc_params(rsc, node, data_set),
+ .params = pe_rsc_params(rsc, node, rsc->cluster),
.meta = rsc->meta,
};
- accept = pe_test_rule(rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN,
- data_set->now, next_change, &match_data);
+ accept = pe_test_rule(rule_xml, node->details->attrs, pcmk_role_unknown,
+ rsc->cluster->now, next_change, &match_data);
crm_trace("Rule %s %s on %s", ID(rule_xml), accept? "passed" : "failed",
pe__node_name(node));
@@ -153,14 +154,14 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
score_f = get_node_score(rule_id, score, raw_score, node, rsc);
if (accept) {
- pe_node_t *local = pe_find_node_id(match_L, node->details->id);
+ pcmk_node_t *local = pe_find_node_id(nodes, node->details->id);
if ((local == NULL) && do_and) {
continue;
} else if (local == NULL) {
local = pe__copy_node(node);
- match_L = g_list_append(match_L, local);
+ nodes = g_list_append(nodes, local);
}
if (!do_and) {
@@ -171,10 +172,10 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
} else if (do_and && !accept) {
// Remove it
- pe_node_t *delete = pe_find_node_id(match_L, node->details->id);
+ pcmk_node_t *delete = pe_find_node_id(nodes, node->details->id);
if (delete != NULL) {
- match_L = g_list_remove(match_L, delete);
+ nodes = g_list_remove(nodes, delete);
crm_trace("%s did not match", pe__node_name(node));
}
free(delete);
@@ -185,7 +186,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
free((char *)score);
}
- location_rule->node_list_rh = match_L;
+ location_rule->node_list_rh = nodes;
if (location_rule->node_list_rh == NULL) {
crm_trace("No matching nodes for rule %s", rule_id);
return NULL;
@@ -197,15 +198,15 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
}
static void
-unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
- const char *score, pe_working_set_t *data_set,
- pe_re_match_data_t *re_match_data)
+unpack_rsc_location(xmlNode *xml_obj, pcmk_resource_t *rsc, const char *role,
+ const char *score, pe_re_match_data_t *re_match_data)
{
pe__location_t *location = NULL;
const char *rsc_id = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
const char *node = crm_element_value(xml_obj, XML_CIB_TAG_NODE);
- const char *discovery = crm_element_value(xml_obj, XML_LOCATION_ATTR_DISCOVERY);
+ const char *discovery = crm_element_value(xml_obj,
+ XML_LOCATION_ATTR_DISCOVERY);
if (rsc == NULL) {
pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
@@ -219,13 +220,12 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
if ((node != NULL) && (score != NULL)) {
int score_i = char2score(score);
- pe_node_t *match = pe_find_node(data_set->nodes, node);
+ pcmk_node_t *match = pe_find_node(rsc->cluster->nodes, node);
if (!match) {
return;
}
- location = pcmk__new_location(id, rsc, score_i, discovery, match,
- data_set);
+ location = pcmk__new_location(id, rsc, score_i, discovery, match);
} else {
bool empty = true;
@@ -240,7 +240,7 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
empty = false;
crm_trace("Unpacking %s/%s", id, ID(rule_xml));
generate_location_rule(rsc, rule_xml, discovery, next_change,
- data_set, re_match_data);
+ re_match_data);
}
if (empty) {
@@ -254,7 +254,8 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
if (crm_time_is_defined(next_change)) {
time_t t = (time_t) crm_time_get_seconds_since_epoch(next_change);
- pe__update_recheck_time(t, data_set);
+ pe__update_recheck_time(t, rsc->cluster,
+ "location rule evaluation");
}
crm_time_free(next_change);
return;
@@ -265,18 +266,18 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
}
if ((location != NULL) && (role != NULL)) {
- if (text2role(role) == RSC_ROLE_UNKNOWN) {
+ if (text2role(role) == pcmk_role_unknown) {
pe_err("Invalid constraint %s: Bad role %s", id, role);
return;
} else {
enum rsc_role_e r = text2role(role);
- switch(r) {
- case RSC_ROLE_UNKNOWN:
- case RSC_ROLE_STARTED:
- case RSC_ROLE_UNPROMOTED:
+ switch (r) {
+ case pcmk_role_unknown:
+ case pcmk_role_started:
+ case pcmk_role_unpromoted:
/* Applies to all */
- location->role_filter = RSC_ROLE_UNKNOWN;
+ location->role_filter = pcmk_role_unknown;
break;
default:
location->role_filter = r;
@@ -287,23 +288,22 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
}
static void
-unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
+unpack_simple_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
const char *value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
if (value) {
- pe_resource_t *rsc;
+ pcmk_resource_t *rsc;
- rsc = pcmk__find_constraint_resource(data_set->resources, value);
- unpack_rsc_location(xml_obj, rsc, NULL, NULL, data_set, NULL);
+ rsc = pcmk__find_constraint_resource(scheduler->resources, value);
+ unpack_rsc_location(xml_obj, rsc, NULL, NULL, NULL);
}
value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE_PATTERN);
if (value) {
regex_t *r_patt = calloc(1, sizeof(regex_t));
bool invert = false;
- GList *rIter = NULL;
if (value[0] == '!') {
value++;
@@ -318,13 +318,15 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
return;
}
- for (rIter = data_set->resources; rIter; rIter = rIter->next) {
- pe_resource_t *r = rIter->data;
+ for (GList *iter = scheduler->resources; iter != NULL;
+ iter = iter->next) {
+
+ pcmk_resource_t *r = iter->data;
int nregs = 0;
regmatch_t *pmatch = NULL;
int status;
- if(r_patt->re_nsub > 0) {
+ if (r_patt->re_nsub > 0) {
nregs = r_patt->re_nsub + 1;
} else {
nregs = 1;
@@ -341,13 +343,12 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
};
crm_debug("'%s' matched '%s' for %s", r->id, value, id);
- unpack_rsc_location(xml_obj, r, NULL, NULL, data_set,
- &re_match_data);
+ unpack_rsc_location(xml_obj, r, NULL, NULL, &re_match_data);
} else if (invert && (status != 0)) {
crm_debug("'%s' is an inverted match of '%s' for %s",
r->id, value, id);
- unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, NULL);
+ unpack_rsc_location(xml_obj, r, NULL, NULL, NULL);
} else {
crm_trace("'%s' does not match '%s' for %s", r->id, value, id);
@@ -364,13 +365,13 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
// \return Standard Pacemaker return code
static int
unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *rsc_id = NULL;
const char *state = NULL;
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
xmlNode *rsc_set = NULL;
*expanded_xml = NULL;
@@ -380,12 +381,12 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_location");
return pcmk_rc_ok;
@@ -396,7 +397,7 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, rsc_id, &rsc, &tag)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, rsc_id, &rsc, &tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, rsc_id);
return pcmk_rc_unpack_error;
@@ -410,9 +411,9 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "rsc" into resource_set under constraint
+ // Convert any template or tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set, XML_LOC_ATTR_SOURCE,
- false, data_set)) {
+ false, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -437,10 +438,11 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
// \return Standard Pacemaker return code
static int
-unpack_location_set(xmlNode *location, xmlNode *set, pe_working_set_t *data_set)
+unpack_location_set(xmlNode *location, xmlNode *set,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *resource = NULL;
const char *set_id;
const char *role;
const char *local_score;
@@ -461,7 +463,7 @@ unpack_location_set(xmlNode *location, xmlNode *set, pe_working_set_t *data_set)
for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- resource = pcmk__find_constraint_resource(data_set->resources,
+ resource = pcmk__find_constraint_resource(scheduler->resources,
ID(xml_rsc));
if (resource == NULL) {
pcmk__config_err("%s: No resource found for %s",
@@ -469,15 +471,14 @@ unpack_location_set(xmlNode *location, xmlNode *set, pe_working_set_t *data_set)
return pcmk_rc_unpack_error;
}
- unpack_rsc_location(location, resource, role, local_score, data_set,
- NULL);
+ unpack_rsc_location(location, resource, role, local_score, NULL);
}
return pcmk_rc_ok;
}
void
-pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
xmlNode *set = NULL;
bool any_sets = false;
@@ -485,7 +486,7 @@ pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
xmlNode *orig_xml = NULL;
xmlNode *expanded_xml = NULL;
- if (unpack_location_tags(xml_obj, &expanded_xml, data_set) != pcmk_rc_ok) {
+ if (unpack_location_tags(xml_obj, &expanded_xml, scheduler) != pcmk_rc_ok) {
return;
}
@@ -498,9 +499,9 @@ pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
set = crm_next_same_xml(set)) {
any_sets = true;
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
if ((set == NULL) // Configuration error, message already logged
- || (unpack_location_set(xml_obj, set, data_set) != pcmk_rc_ok)) {
+ || (unpack_location_set(xml_obj, set, scheduler) != pcmk_rc_ok)) {
if (expanded_xml) {
free_xml(expanded_xml);
@@ -515,29 +516,27 @@ pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if (!any_sets) {
- unpack_simple_location(xml_obj, data_set);
+ unpack_simple_location(xml_obj, scheduler);
}
}
/*!
* \internal
- * \brief Add a new location constraint to a cluster working set
+ * \brief Add a new location constraint to scheduler data
*
* \param[in] id XML ID of location constraint
* \param[in,out] rsc Resource in location constraint
- * \param[in] node_weight Constraint score
+ * \param[in] node_score Constraint score
* \param[in] discover_mode Resource discovery option for constraint
* \param[in] node Node in constraint (or NULL if rule-based)
- * \param[in,out] data_set Cluster working set to add constraint to
*
* \return Newly allocated location constraint
- * \note The result will be added to \p data_set and should not be freed
- * separately.
+ * \note The result will be added to the cluster (via \p rsc) and should not be
+ * freed separately.
*/
pe__location_t *
-pcmk__new_location(const char *id, pe_resource_t *rsc,
- int node_weight, const char *discover_mode,
- pe_node_t *node, pe_working_set_t *data_set)
+pcmk__new_location(const char *id, pcmk_resource_t *rsc,
+ int node_score, const char *discover_mode, pcmk_node_t *node)
{
pe__location_t *new_con = NULL;
@@ -550,7 +549,7 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
return NULL;
} else if (node == NULL) {
- CRM_CHECK(node_weight == 0, return NULL);
+ CRM_CHECK(node_score == 0, return NULL);
}
new_con = calloc(1, sizeof(pe__location_t));
@@ -558,17 +557,17 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
new_con->id = strdup(id);
new_con->rsc_lh = rsc;
new_con->node_list_rh = NULL;
- new_con->role_filter = RSC_ROLE_UNKNOWN;
+ new_con->role_filter = pcmk_role_unknown;
if (pcmk__str_eq(discover_mode, "always",
pcmk__str_null_matches|pcmk__str_casei)) {
- new_con->discover_mode = pe_discover_always;
+ new_con->discover_mode = pcmk_probe_always;
} else if (pcmk__str_eq(discover_mode, "never", pcmk__str_casei)) {
- new_con->discover_mode = pe_discover_never;
+ new_con->discover_mode = pcmk_probe_never;
} else if (pcmk__str_eq(discover_mode, "exclusive", pcmk__str_casei)) {
- new_con->discover_mode = pe_discover_exclusive;
+ new_con->discover_mode = pcmk_probe_exclusive;
rsc->exclusive_discover = TRUE;
} else {
@@ -577,14 +576,14 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
}
if (node != NULL) {
- pe_node_t *copy = pe__copy_node(node);
+ pcmk_node_t *copy = pe__copy_node(node);
- copy->weight = node_weight;
+ copy->weight = node_score;
new_con->node_list_rh = g_list_prepend(NULL, copy);
}
- data_set->placement_constraints = g_list_prepend(data_set->placement_constraints,
- new_con);
+ rsc->cluster->placement_constraints = g_list_prepend(
+ rsc->cluster->placement_constraints, new_con);
rsc->rsc_location = g_list_prepend(rsc->rsc_location, new_con);
}
@@ -595,12 +594,12 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
* \internal
* \brief Apply all location constraints
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__apply_locations(pe_working_set_t *data_set)
+pcmk__apply_locations(pcmk_scheduler_t *scheduler)
{
- for (GList *iter = data_set->placement_constraints;
+ for (GList *iter = scheduler->placement_constraints;
iter != NULL; iter = iter->next) {
pe__location_t *location = iter->data;
@@ -619,14 +618,14 @@ pcmk__apply_locations(pe_working_set_t *data_set)
* apply_location() method should be used instead in most cases.
*/
void
-pcmk__apply_location(pe_resource_t *rsc, pe__location_t *location)
+pcmk__apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
bool need_role = false;
- CRM_CHECK((rsc != NULL) && (location != NULL), return);
+ CRM_ASSERT((rsc != NULL) && (location != NULL));
// If a role was specified, ensure constraint is applicable
- need_role = (location->role_filter > RSC_ROLE_UNKNOWN);
+ need_role = (location->role_filter > pcmk_role_unknown);
if (need_role && (location->role_filter != rsc->next_role)) {
pe_rsc_trace(rsc,
"Not applying %s to %s because role will be %s not %s",
@@ -645,34 +644,33 @@ pcmk__apply_location(pe_resource_t *rsc, pe__location_t *location)
(need_role? " for role " : ""),
(need_role? role2text(location->role_filter) : ""), rsc->id);
- for (GList *gIter = location->node_list_rh; gIter != NULL;
- gIter = gIter->next) {
+ for (GList *iter = location->node_list_rh;
+ iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
- pe_node_t *weighted_node = NULL;
+ pcmk_node_t *node = iter->data;
+ pcmk_node_t *allowed_node = g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id);
- weighted_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes,
- node->details->id);
- if (weighted_node == NULL) {
+ if (allowed_node == NULL) {
pe_rsc_trace(rsc, "* = %d on %s",
node->weight, pe__node_name(node));
- weighted_node = pe__copy_node(node);
+ allowed_node = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes,
- (gpointer) weighted_node->details->id,
- weighted_node);
+ (gpointer) allowed_node->details->id,
+ allowed_node);
} else {
pe_rsc_trace(rsc, "* + %d on %s",
node->weight, pe__node_name(node));
- weighted_node->weight = pcmk__add_scores(weighted_node->weight,
- node->weight);
+ allowed_node->weight = pcmk__add_scores(allowed_node->weight,
+ node->weight);
}
- if (weighted_node->rsc_discover_mode < location->discover_mode) {
- if (location->discover_mode == pe_discover_exclusive) {
+ if (allowed_node->rsc_discover_mode < location->discover_mode) {
+ if (location->discover_mode == pcmk_probe_exclusive) {
rsc->exclusive_discover = TRUE;
}
/* exclusive > never > always... always is default */
- weighted_node->rsc_discover_mode = location->discover_mode;
+ allowed_node->rsc_discover_mode = location->discover_mode;
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_migration.c b/lib/pacemaker/pcmk_sched_migration.c
index 7e6ba8e..5231bf7 100644
--- a/lib/pacemaker/pcmk_sched_migration.c
+++ b/lib/pacemaker/pcmk_sched_migration.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -25,8 +25,8 @@
* \param[in] target Node to add as migration target
*/
static void
-add_migration_meta(pe_action_t *action, const pe_node_t *source,
- const pe_node_t *target)
+add_migration_meta(pcmk_action_t *action, const pcmk_node_t *source,
+ const pcmk_node_t *target)
{
add_hash_param(action->meta, XML_LRM_ATTR_MIGRATE_SOURCE,
source->details->uname);
@@ -43,12 +43,12 @@ add_migration_meta(pe_action_t *action, const pe_node_t *source,
* \param[in] current Node that resource is originally active on
*/
void
-pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
+pcmk__create_migration_actions(pcmk_resource_t *rsc, const pcmk_node_t *current)
{
- pe_action_t *migrate_to = NULL;
- pe_action_t *migrate_from = NULL;
- pe_action_t *start = NULL;
- pe_action_t *stop = NULL;
+ pcmk_action_t *migrate_to = NULL;
+ pcmk_action_t *migrate_from = NULL;
+ pcmk_action_t *start = NULL;
+ pcmk_action_t *stop = NULL;
pe_rsc_trace(rsc, "Creating actions to %smigrate %s from %s to %s",
((rsc->partial_migration_target == NULL)? "" : "partially "),
@@ -58,61 +58,68 @@ pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
stop = stop_action(rsc, current, TRUE);
if (rsc->partial_migration_target == NULL) {
- migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
- RSC_MIGRATE, current, TRUE, TRUE,
+ migrate_to = custom_action(rsc, pcmk__op_key(rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
+ PCMK_ACTION_MIGRATE_TO, current, TRUE,
rsc->cluster);
}
- migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
- RSC_MIGRATED, rsc->allocated_to, TRUE, TRUE,
- rsc->cluster);
+ migrate_from = custom_action(rsc, pcmk__op_key(rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
+ PCMK_ACTION_MIGRATE_FROM, rsc->allocated_to,
+ TRUE, rsc->cluster);
- if ((migrate_from != NULL)
- && ((migrate_to != NULL) || (rsc->partial_migration_target != NULL))) {
+ pe__set_action_flags(start, pcmk_action_migratable);
+ pe__set_action_flags(stop, pcmk_action_migratable);
- pe__set_action_flags(start, pe_action_migrate_runnable);
- pe__set_action_flags(stop, pe_action_migrate_runnable);
+ // This is easier than trying to delete it from the graph
+ pe__set_action_flags(start, pcmk_action_pseudo);
- // This is easier than trying to delete it from the graph
- pe__set_action_flags(start, pe_action_pseudo);
-
- if (rsc->partial_migration_target == NULL) {
- pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
-
- if (migrate_to != NULL) {
- pe__set_action_flags(migrate_to, pe_action_migrate_runnable);
- migrate_to->needs = start->needs;
- }
-
- // Probe -> migrate_to -> migrate_from
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
- NULL, pe_order_optional, rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
- NULL,
- pe_order_optional|pe_order_implies_first_migratable,
- rsc->cluster);
- } else {
- pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
- migrate_from->needs = start->needs;
-
- // Probe -> migrate_from (migrate_to already completed)
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
- NULL, pe_order_optional, rsc->cluster);
- }
-
- // migrate_from before stop or start
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
- pe_order_optional|pe_order_implies_first_migratable,
- rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
- pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left,
+ if (rsc->partial_migration_target == NULL) {
+ pe__set_action_flags(migrate_from, pcmk_action_migratable);
+ pe__set_action_flags(migrate_to, pcmk_action_migratable);
+ migrate_to->needs = start->needs;
+
+ // Probe -> migrate_to -> migrate_from
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_TO, 0),
+ NULL, pcmk__ar_ordered, rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_TO, 0),
+ NULL,
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL,
+ pcmk__ar_ordered|pcmk__ar_unmigratable_then_blocks,
rsc->cluster);
+ } else {
+ pe__set_action_flags(migrate_from, pcmk_action_migratable);
+ migrate_from->needs = start->needs;
+
+ // Probe -> migrate_from (migrate_to already completed)
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL, pcmk__ar_ordered, rsc->cluster);
}
+ // migrate_from before stop or start
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
+ pcmk__ar_ordered|pcmk__ar_unmigratable_then_blocks,
+ rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
+ NULL,
+ pcmk__ar_ordered
+ |pcmk__ar_unmigratable_then_blocks
+ |pcmk__ar_first_else_then,
+ rsc->cluster);
+
if (migrate_to != NULL) {
add_migration_meta(migrate_to, current, rsc->allocated_to);
@@ -132,9 +139,7 @@ pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
}
}
- if (migrate_from != NULL) {
- add_migration_meta(migrate_from, current, rsc->allocated_to);
- }
+ add_migration_meta(migrate_from, current, rsc->allocated_to);
}
/*!
@@ -147,18 +152,19 @@ pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
void
pcmk__abort_dangling_migration(void *data, void *user_data)
{
- const pe_node_t *dangling_source = (const pe_node_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ const pcmk_node_t *dangling_source = (const pcmk_node_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
- pe_action_t *stop = NULL;
- bool cleanup = pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop);
+ pcmk_action_t *stop = NULL;
+ bool cleanup = pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_remove_after_stop);
pe_rsc_trace(rsc,
"Scheduling stop%s for %s on %s due to dangling migration",
(cleanup? " and cleanup" : ""), rsc->id,
pe__node_name(dangling_source));
stop = stop_action(rsc, dangling_source, FALSE);
- pe__set_action_flags(stop, pe_action_dangle);
+ pe__set_action_flags(stop, pcmk_action_migration_abort);
if (cleanup) {
pcmk__schedule_cleanup(rsc, dangling_source, false);
}
@@ -174,30 +180,30 @@ pcmk__abort_dangling_migration(void *data, void *user_data)
* \return true if \p rsc can migrate, otherwise false
*/
bool
-pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current)
+pcmk__rsc_can_migrate(const pcmk_resource_t *rsc, const pcmk_node_t *current)
{
CRM_CHECK(rsc != NULL, return false);
- if (!pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_migratable)) {
pe_rsc_trace(rsc, "%s cannot migrate because "
"the configuration does not allow it",
rsc->id);
return false;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "%s cannot migrate because it is not managed",
rsc->id);
return false;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_trace(rsc, "%s cannot migrate because it is failed",
rsc->id);
return false;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
pe_rsc_trace(rsc, "%s cannot migrate because it has a start pending",
rsc->id);
return false;
@@ -230,7 +236,7 @@ pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current)
* \return Newly allocated copy of action name (or NULL if none available)
*/
static char *
-task_from_action_or_key(const pe_action_t *action, const char *key)
+task_from_action_or_key(const pcmk_action_t *action, const char *key)
{
char *res = NULL;
@@ -270,8 +276,8 @@ pcmk__order_migration_equivalents(pe__ordering_t *order)
}
// Only orderings involving at least one migratable resource are relevant
- first_migratable = pcmk_is_set(order->lh_rsc->flags, pe_rsc_allow_migrate);
- then_migratable = pcmk_is_set(order->rh_rsc->flags, pe_rsc_allow_migrate);
+ first_migratable = pcmk_is_set(order->lh_rsc->flags, pcmk_rsc_migratable);
+ then_migratable = pcmk_is_set(order->rh_rsc->flags, pcmk_rsc_migratable);
if (!first_migratable && !then_migratable) {
return;
}
@@ -282,24 +288,26 @@ pcmk__order_migration_equivalents(pe__ordering_t *order)
then_task = task_from_action_or_key(order->rh_action,
order->rh_action_task);
- if (pcmk__str_eq(first_task, RSC_START, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_START, pcmk__str_none)) {
+ if (pcmk__str_eq(first_task, PCMK_ACTION_START, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_START, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (first_migratable && then_migratable) {
/* A start then B start
* -> A migrate_from then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_MIGRATED, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
}
if (then_migratable) {
if (first_migratable) {
- pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
+ pe__set_order_flags(flags, pcmk__ar_if_first_unmigratable);
}
/* A start then B start
@@ -307,75 +315,87 @@ pcmk__order_migration_equivalents(pe__ordering_t *order)
* migration)
*/
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_START, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_START, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
}
} else if (then_migratable
- && pcmk__str_eq(first_task, RSC_STOP, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_STOP, pcmk__str_none)) {
+ && pcmk__str_eq(first_task, PCMK_ACTION_STOP, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_STOP, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (first_migratable) {
- pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
+ pe__set_order_flags(flags, pcmk__ar_if_first_unmigratable);
}
/* For an ordering "stop A then stop B", if A is moving via restart, and
* B is migrating, enforce that B's migrate_to occurs after A's stop.
*/
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0), NULL,
+ pcmk__op_key(order->lh_rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
// Also order B's migrate_from after A's stop during partial migrations
if (order->rh_rsc->partial_migration_target) {
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0),
+ pcmk__op_key(order->lh_rsc->id, PCMK_ACTION_STOP,
+ 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
NULL, flags, order->lh_rsc->cluster);
}
- } else if (pcmk__str_eq(first_task, RSC_PROMOTE, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_START, pcmk__str_none)) {
+ } else if (pcmk__str_eq(first_task, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_START, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (then_migratable) {
/* A promote then B start
* -> A promote then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_PROMOTE, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_PROMOTE, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
}
- } else if (pcmk__str_eq(first_task, RSC_DEMOTE, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_STOP, pcmk__str_none)) {
+ } else if (pcmk__str_eq(first_task, PCMK_ACTION_DEMOTE, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_STOP, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (then_migratable) {
/* A demote then B stop
* -> A demote then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_DEMOTE, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
- // Also order B migrate_from after A demote during partial migrations
+ // Order B migrate_from after A demote during partial migrations
if (order->rh_rsc->partial_migration_target) {
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_DEMOTE, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
NULL, flags, order->lh_rsc->cluster);
}
}
diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c
index d7d5ba4..9cf5545 100644
--- a/lib/pacemaker/pcmk_sched_nodes.c
+++ b/lib/pacemaker/pcmk_sched_nodes.c
@@ -9,7 +9,6 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
-#include <crm/lrmd.h> // lrmd_event_data_t
#include <crm/common/xml_internal.h>
#include <pacemaker-internal.h>
#include <pacemaker.h>
@@ -28,7 +27,7 @@
* or maintenance mode, otherwise false
*/
bool
-pcmk__node_available(const pe_node_t *node, bool consider_score,
+pcmk__node_available(const pcmk_node_t *node, bool consider_score,
bool consider_guest)
{
if ((node == NULL) || (node->details == NULL) || !node->details->online
@@ -43,7 +42,7 @@ pcmk__node_available(const pe_node_t *node, bool consider_score,
// @TODO Go through all callers to see which should set consider_guest
if (consider_guest && pe__is_guest_node(node)) {
- pe_resource_t *guest = node->details->remote_rsc->container;
+ pcmk_resource_t *guest = node->details->remote_rsc->container;
if (guest->fns->location(guest, NULL, FALSE) == NULL) {
return false;
@@ -66,7 +65,7 @@ pcmk__copy_node_table(GHashTable *nodes)
{
GHashTable *new_table = NULL;
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
if (nodes == NULL) {
return NULL;
@@ -74,7 +73,7 @@ pcmk__copy_node_table(GHashTable *nodes)
new_table = pcmk__strkey_table(NULL, free);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
- pe_node_t *new_node = pe__copy_node(node);
+ pcmk_node_t *new_node = pe__copy_node(node);
g_hash_table_insert(new_table, (gpointer) new_node->details->id,
new_node);
@@ -84,6 +83,82 @@ pcmk__copy_node_table(GHashTable *nodes)
/*!
* \internal
+ * \brief Free a table of node tables
+ *
+ * \param[in,out] data Table to free
+ *
+ * \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy().
+ */
+static void
+destroy_node_tables(gpointer data)
+{
+ g_hash_table_destroy((GHashTable *) data);
+}
+
+/*!
+ * \internal
+ * \brief Recursively copy the node tables of a resource
+ *
+ * Build a hash table containing copies of the allowed nodes tables of \p rsc
+ * and its entire tree of descendants. The key is the resource ID, and the value
+ * is a copy of the resource's node table.
+ *
+ * \param[in] rsc Resource whose node table to copy
+ * \param[in,out] copy Where to store the copied node tables
+ *
+ * \note \p *copy should be \c NULL for the top-level call.
+ * \note The caller is responsible for freeing \p copy using
+ * \c g_hash_table_destroy().
+ */
+void
+pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy)
+{
+ CRM_ASSERT((rsc != NULL) && (copy != NULL));
+
+ if (*copy == NULL) {
+ *copy = pcmk__strkey_table(NULL, destroy_node_tables);
+ }
+
+ g_hash_table_insert(*copy, rsc->id,
+ pcmk__copy_node_table(rsc->allowed_nodes));
+
+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk__copy_node_tables((const pcmk_resource_t *) iter->data, copy);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Recursively restore the node tables of a resource from backup
+ *
+ * Given a hash table containing backup copies of the allowed nodes tables of
+ * \p rsc and its entire tree of descendants, replace the resources' current
+ * node tables with the backed-up copies.
+ *
+ * \param[in,out] rsc Resource whose node tables to restore
+ * \param[in] backup Table of backup node tables (created by
+ * \c pcmk__copy_node_tables())
+ *
+ * \note This function frees the resources' current node tables.
+ */
+void
+pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup)
+{
+ CRM_ASSERT((rsc != NULL) && (backup != NULL));
+
+ g_hash_table_destroy(rsc->allowed_nodes);
+
+ // Copy to avoid danger with multiple restores
+ rsc->allowed_nodes = g_hash_table_lookup(backup, rsc->id);
+ rsc->allowed_nodes = pcmk__copy_node_table(rsc->allowed_nodes);
+
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk__restore_node_tables((pcmk_resource_t *) iter->data, backup);
+ }
+}
+
+/*!
+ * \internal
* \brief Copy a list of node objects
*
* \param[in] list List to copy
@@ -96,9 +171,9 @@ pcmk__copy_node_list(const GList *list, bool reset)
{
GList *result = NULL;
- for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- pe_node_t *new_node = NULL;
- pe_node_t *this_node = (pe_node_t *) gIter->data;
+ for (const GList *iter = list; iter != NULL; iter = iter->next) {
+ pcmk_node_t *new_node = NULL;
+ pcmk_node_t *this_node = iter->data;
new_node = pe__copy_node(this_node);
if (reset) {
@@ -111,14 +186,14 @@ pcmk__copy_node_list(const GList *list, bool reset)
/*!
* \internal
- * \brief Compare two nodes for allocation desirability
+ * \brief Compare two nodes for assignment preference
*
- * Given two nodes, check which one is more preferred by allocation criteria
- * such as node weight and utilization.
+ * Given two nodes, check which one is more preferred by assignment criteria
+ * such as node score and utilization.
*
* \param[in] a First node to compare
* \param[in] b Second node to compare
- * \param[in] data Node that resource being assigned is active on, if any
+ * \param[in] data Node to prefer if all else equal
*
* \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are
* equally preferred
@@ -126,12 +201,12 @@ pcmk__copy_node_list(const GList *list, bool reset)
static gint
compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
{
- const pe_node_t *node1 = (const pe_node_t *) a;
- const pe_node_t *node2 = (const pe_node_t *) b;
- const pe_node_t *active = (const pe_node_t *) data;
+ const pcmk_node_t *node1 = (const pcmk_node_t *) a;
+ const pcmk_node_t *node2 = (const pcmk_node_t *) b;
+ const pcmk_node_t *preferred = (const pcmk_node_t *) data;
- int node1_weight = 0;
- int node2_weight = 0;
+ int node1_score = -INFINITY;
+ int node2_score = -INFINITY;
int result = 0;
@@ -142,29 +217,29 @@ compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
return -1;
}
- // Compare node weights
+ // Compare node scores
- node1_weight = pcmk__node_available(node1, false, false)? node1->weight : -INFINITY;
- node2_weight = pcmk__node_available(node2, false, false)? node2->weight : -INFINITY;
+ if (pcmk__node_available(node1, false, false)) {
+ node1_score = node1->weight;
+ }
+ if (pcmk__node_available(node2, false, false)) {
+ node2_score = node2->weight;
+ }
- if (node1_weight > node2_weight) {
- crm_trace("%s (%d) > %s (%d) : weight",
- pe__node_name(node1), node1_weight, pe__node_name(node2),
- node2_weight);
+ if (node1_score > node2_score) {
+ crm_trace("%s before %s (score %d > %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1_score, node2_score);
return -1;
}
- if (node1_weight < node2_weight) {
- crm_trace("%s (%d) < %s (%d) : weight",
- pe__node_name(node1), node1_weight, pe__node_name(node2),
- node2_weight);
+ if (node1_score < node2_score) {
+ crm_trace("%s after %s (score %d < %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1_score, node2_score);
return 1;
}
- crm_trace("%s (%d) == %s (%d) : weight",
- pe__node_name(node1), node1_weight, pe__node_name(node2),
- node2_weight);
-
// If appropriate, compare node utilization
if (pcmk__str_eq(node1->details->data_set->placement_strategy, "minimal",
@@ -176,56 +251,65 @@ compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
pcmk__str_casei)) {
result = pcmk__compare_node_capacities(node1, node2);
if (result < 0) {
- crm_trace("%s > %s : capacity (%d)",
- pe__node_name(node1), pe__node_name(node2), result);
+ crm_trace("%s before %s (greater capacity by %d attributes)",
+ pe__node_name(node1), pe__node_name(node2), result * -1);
return -1;
} else if (result > 0) {
- crm_trace("%s < %s : capacity (%d)",
+ crm_trace("%s after %s (lower capacity by %d attributes)",
pe__node_name(node1), pe__node_name(node2), result);
return 1;
}
}
- // Compare number of allocated resources
+ // Compare number of resources already assigned to node
if (node1->details->num_resources < node2->details->num_resources) {
- crm_trace("%s (%d) > %s (%d) : resources",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ crm_trace("%s before %s (%d resources < %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1->details->num_resources, node2->details->num_resources);
return -1;
} else if (node1->details->num_resources > node2->details->num_resources) {
- crm_trace("%s (%d) < %s (%d) : resources",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ crm_trace("%s after %s (%d resources > %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1->details->num_resources, node2->details->num_resources);
return 1;
}
// Check whether one node is already running desired resource
- if (active != NULL) {
- if (active->details == node1->details) {
- crm_trace("%s (%d) > %s (%d) : active",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ if (preferred != NULL) {
+ if (pe__same_node(preferred, node1)) {
+ crm_trace("%s before %s (preferred node)",
+ pe__node_name(node1), pe__node_name(node2));
return -1;
- } else if (active->details == node2->details) {
- crm_trace("%s (%d) < %s (%d) : active",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ } else if (pe__same_node(preferred, node2)) {
+ crm_trace("%s after %s (not preferred node)",
+ pe__node_name(node1), pe__node_name(node2));
return 1;
}
}
// If all else is equal, prefer node with lowest-sorting name
equal:
- crm_trace("%s = %s", pe__node_name(node1), pe__node_name(node2));
- return strcmp(node1->details->uname, node2->details->uname);
+ result = strcmp(node1->details->uname, node2->details->uname);
+ if (result < 0) {
+ crm_trace("%s before %s (name)",
+ pe__node_name(node1), pe__node_name(node2));
+ return -1;
+ } else if (result > 0) {
+ crm_trace("%s after %s (name)",
+ pe__node_name(node1), pe__node_name(node2));
+ return 1;
+ }
+
+ crm_trace("%s == %s", pe__node_name(node1), pe__node_name(node2));
+ return 0;
}
/*!
* \internal
- * \brief Sort a list of nodes by allocation desirability
+ * \brief Sort a list of nodes by assigment preference
*
* \param[in,out] nodes Node list to sort
* \param[in] active_node Node where resource being assigned is active
@@ -233,7 +317,7 @@ equal:
* \return New head of sorted list
*/
GList *
-pcmk__sort_nodes(GList *nodes, pe_node_t *active_node)
+pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node)
{
return g_list_sort_with_data(nodes, compare_nodes, active_node);
}
@@ -251,7 +335,7 @@ bool
pcmk__any_node_available(GHashTable *nodes)
{
GHashTableIter iter;
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
if (nodes == NULL) {
return false;
@@ -269,14 +353,14 @@ pcmk__any_node_available(GHashTable *nodes)
* \internal
* \brief Apply node health values for all nodes in cluster
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__apply_node_health(pe_working_set_t *data_set)
+pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
{
int base_health = 0;
enum pcmk__health_strategy strategy;
- const char *strategy_str = pe_pref(data_set->config_hash,
+ const char *strategy_str = pe_pref(scheduler->config_hash,
PCMK__OPT_NODE_HEALTH_STRATEGY);
strategy = pcmk__parse_health_strategy(strategy_str);
@@ -287,11 +371,11 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
// The progressive strategy can use a base health score
if (strategy == pcmk__health_strategy_progressive) {
- base_health = pe__health_score(PCMK__OPT_NODE_HEALTH_BASE, data_set);
+ base_health = pe__health_score(PCMK__OPT_NODE_HEALTH_BASE, scheduler);
}
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
int health = pe__sum_node_health_scores(node, base_health);
// An overall health score of 0 has no effect
@@ -302,8 +386,8 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
pe__node_name(node), health);
// Use node health as a location score for each resource on the node
- for (GList *r = data_set->resources; r != NULL; r = r->next) {
- pe_resource_t *rsc = (pe_resource_t *) r->data;
+ for (GList *r = scheduler->resources; r != NULL; r = r->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) r->data;
bool constrain = true;
@@ -315,8 +399,7 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
PCMK__META_ALLOW_UNHEALTHY_NODES));
}
if (constrain) {
- pcmk__new_location(strategy_str, rsc, health, NULL, node,
- data_set);
+ pcmk__new_location(strategy_str, rsc, health, NULL, node);
} else {
pe_rsc_trace(rsc, "%s is immune from health ban on %s",
rsc->id, pe__node_name(node));
@@ -335,8 +418,8 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
* \return Equivalent of \p node from \p rsc's parent's allowed nodes if any,
* otherwise NULL
*/
-pe_node_t *
-pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node)
+pcmk_node_t *
+pcmk__top_allowed_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
GHashTable *allowed_nodes = NULL;
@@ -347,5 +430,5 @@ pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node)
} else {
allowed_nodes = rsc->parent->allowed_nodes;
}
- return pe_hash_table_lookup(allowed_nodes, node->details->id);
+ return g_hash_table_lookup(allowed_nodes, node->details->id);
}
diff --git a/lib/pacemaker/pcmk_sched_ordering.c b/lib/pacemaker/pcmk_sched_ordering.c
index 6629999..e589692 100644
--- a/lib/pacemaker/pcmk_sched_ordering.c
+++ b/lib/pacemaker/pcmk_sched_ordering.c
@@ -29,40 +29,41 @@ enum ordering_symmetry {
ordering_symmetric_inverse, // the inverse relation in a symmetric ordering
};
-#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \
- __rsc = pcmk__find_constraint_resource(data_set->resources, __name); \
- if (__rsc == NULL) { \
- pcmk__config_err("%s: No resource found for %s", __set, __name); \
- return pcmk_rc_unpack_error; \
- } \
+#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \
+ __rsc = pcmk__find_constraint_resource(scheduler->resources, \
+ __name); \
+ if (__rsc == NULL) { \
+ pcmk__config_err("%s: No resource found for %s", __set, __name);\
+ return pcmk_rc_unpack_error; \
+ } \
} while (0)
static const char *
invert_action(const char *action)
{
- if (pcmk__str_eq(action, RSC_START, pcmk__str_casei)) {
- return RSC_STOP;
+ if (pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) {
+ return PCMK_ACTION_STOP;
- } else if (pcmk__str_eq(action, RSC_STOP, pcmk__str_casei)) {
- return RSC_START;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
+ return PCMK_ACTION_START;
- } else if (pcmk__str_eq(action, RSC_PROMOTE, pcmk__str_casei)) {
- return RSC_DEMOTE;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
+ return PCMK_ACTION_DEMOTE;
- } else if (pcmk__str_eq(action, RSC_DEMOTE, pcmk__str_casei)) {
- return RSC_PROMOTE;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
+ return PCMK_ACTION_PROMOTE;
- } else if (pcmk__str_eq(action, RSC_PROMOTED, pcmk__str_casei)) {
- return RSC_DEMOTED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_PROMOTED, pcmk__str_none)) {
+ return PCMK_ACTION_DEMOTED;
- } else if (pcmk__str_eq(action, RSC_DEMOTED, pcmk__str_casei)) {
- return RSC_PROMOTED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_DEMOTED, pcmk__str_none)) {
+ return PCMK_ACTION_PROMOTED;
- } else if (pcmk__str_eq(action, RSC_STARTED, pcmk__str_casei)) {
- return RSC_STOPPED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_RUNNING, pcmk__str_none)) {
+ return PCMK_ACTION_STOPPED;
- } else if (pcmk__str_eq(action, RSC_STOPPED, pcmk__str_casei)) {
- return RSC_STARTED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_STOPPED, pcmk__str_none)) {
+ return PCMK_ACTION_RUNNING;
}
crm_warn("Unknown action '%s' specified in order constraint", action);
return NULL;
@@ -86,19 +87,19 @@ get_ordering_type(const xmlNode *xml_obj)
if (score_i == 0) {
kind_e = pe_order_kind_optional;
}
- pe_warn_once(pe_wo_order_score,
+ pe_warn_once(pcmk__wo_order_score,
"Support for 'score' in rsc_order is deprecated "
"and will be removed in a future release "
"(use 'kind' instead)");
}
- } else if (pcmk__str_eq(kind, "Mandatory", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(kind, "Mandatory", pcmk__str_none)) {
kind_e = pe_order_kind_mandatory;
- } else if (pcmk__str_eq(kind, "Optional", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(kind, "Optional", pcmk__str_none)) {
kind_e = pe_order_kind_optional;
- } else if (pcmk__str_eq(kind, "Serialize", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(kind, "Serialize", pcmk__str_none)) {
kind_e = pe_order_kind_serialize;
} else {
@@ -177,34 +178,39 @@ static uint32_t
ordering_flags_for_kind(enum pe_order_kind kind, const char *first,
enum ordering_symmetry symmetry)
{
- uint32_t flags = pe_order_none; // so we trace-log all flags set
-
- pe__set_order_flags(flags, pe_order_optional);
+ uint32_t flags = pcmk__ar_none; // so we trace-log all flags set
switch (kind) {
case pe_order_kind_optional:
+ pe__set_order_flags(flags, pcmk__ar_ordered);
break;
case pe_order_kind_serialize:
- pe__set_order_flags(flags, pe_order_serialize_only);
+ /* This flag is not used anywhere directly but means the relation
+ * will not match an equality comparison against pcmk__ar_none or
+ * pcmk__ar_ordered.
+ */
+ pe__set_order_flags(flags, pcmk__ar_serialize);
break;
case pe_order_kind_mandatory:
+ pe__set_order_flags(flags, pcmk__ar_ordered);
switch (symmetry) {
case ordering_asymmetric:
- pe__set_order_flags(flags, pe_order_asymmetrical);
+ pe__set_order_flags(flags, pcmk__ar_asymmetric);
break;
case ordering_symmetric:
- pe__set_order_flags(flags, pe_order_implies_then);
- if (pcmk__strcase_any_of(first, RSC_START, RSC_PROMOTE,
- NULL)) {
- pe__set_order_flags(flags, pe_order_runnable_left);
+ pe__set_order_flags(flags, pcmk__ar_first_implies_then);
+ if (pcmk__strcase_any_of(first, PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE, NULL)) {
+ pe__set_order_flags(flags,
+ pcmk__ar_unrunnable_first_blocks);
}
break;
case ordering_symmetric_inverse:
- pe__set_order_flags(flags, pe_order_implies_first);
+ pe__set_order_flags(flags, pcmk__ar_then_implies_first);
break;
}
break;
@@ -221,17 +227,17 @@ ordering_flags_for_kind(enum pe_order_kind kind, const char *first,
* \param[in] instance_attr XML attribute name for instance number.
* This option is deprecated and will be removed in a
* future release.
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Resource corresponding to \p id, or NULL if none
*/
-static pe_resource_t *
+static pcmk_resource_t *
get_ordering_resource(const xmlNode *xml, const char *resource_attr,
const char *instance_attr,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
// @COMPAT: instance_attr and instance_id variables deprecated since 2.1.5
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(xml, resource_attr);
const char *instance_id = crm_element_value(xml, instance_attr);
@@ -241,7 +247,7 @@ get_ordering_resource(const xmlNode *xml, const char *resource_attr,
return NULL;
}
- rsc = pcmk__find_constraint_resource(data_set->resources, rsc_id);
+ rsc = pcmk__find_constraint_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not exist", ID(xml), rsc_id);
@@ -249,7 +255,7 @@ get_ordering_resource(const xmlNode *xml, const char *resource_attr,
}
if (instance_id != NULL) {
- pe_warn_once(pe_wo_order_inst,
+ pe_warn_once(pcmk__wo_order_inst,
"Support for " XML_ORDER_ATTR_FIRST_INSTANCE " and "
XML_ORDER_ATTR_THEN_INSTANCE " is deprecated and will be "
"removed in a future release.");
@@ -281,7 +287,7 @@ get_ordering_resource(const xmlNode *xml, const char *resource_attr,
* \return Minimum 'first' instances required (or 0 if not applicable)
*/
static int
-get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
+get_minimum_first_instances(const pcmk_resource_t *rsc, const xmlNode *xml)
{
const char *clone_min = NULL;
bool require_all = false;
@@ -290,8 +296,7 @@ get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
return 0;
}
- clone_min = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_INCARNATION_MIN);
+ clone_min = g_hash_table_lookup(rsc->meta, PCMK_META_CLONE_MIN);
if (clone_min != NULL) {
int clone_min_int = 0;
@@ -303,7 +308,7 @@ get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
* require-all=false is deprecated equivalent of clone-min=1
*/
if (pcmk__xe_get_bool_attr(xml, "require-all", &require_all) != ENODATA) {
- pe_warn_once(pe_wo_require_all,
+ pe_warn_once(pcmk__wo_require_all,
"Support for require-all in ordering constraints "
"is deprecated and will be removed in a future release"
" (use clone-min clone meta-attribute instead)");
@@ -326,17 +331,16 @@ get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
* \param[in] action_then 'Then' action in ordering
* \param[in] flags Ordering flags
* \param[in] clone_min Minimum required instances of 'first'
- * \param[in,out] data_set Cluster working set
*/
static void
clone_min_ordering(const char *id,
- pe_resource_t *rsc_first, const char *action_first,
- pe_resource_t *rsc_then, const char *action_then,
- uint32_t flags, int clone_min, pe_working_set_t *data_set)
+ pcmk_resource_t *rsc_first, const char *action_first,
+ pcmk_resource_t *rsc_then, const char *action_then,
+ uint32_t flags, int clone_min)
{
// Create a pseudo-action for when the minimum instances are active
- char *task = crm_strdup_printf(CRM_OP_RELAXED_CLONE ":%s", id);
- pe_action_t *clone_min_met = get_pseudo_op(task, data_set);
+ char *task = crm_strdup_printf(PCMK_ACTION_CLONE_ONE_OR_MORE ":%s", id);
+ pcmk_action_t *clone_min_met = get_pseudo_op(task, rsc_first->cluster);
free(task);
@@ -344,24 +348,24 @@ clone_min_ordering(const char *id,
* considered runnable before allowing the pseudo-action to be runnable.
*/
clone_min_met->required_runnable_before = clone_min;
- pe__set_action_flags(clone_min_met, pe_action_requires_any);
+ pe__set_action_flags(clone_min_met, pcmk_action_min_runnable);
// Order the actions for each clone instance before the pseudo-action
- for (GList *rIter = rsc_first->children; rIter != NULL;
- rIter = rIter->next) {
-
- pe_resource_t *child = rIter->data;
+ for (GList *iter = rsc_first->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child = iter->data;
pcmk__new_ordering(child, pcmk__op_key(child->id, action_first, 0),
NULL, NULL, NULL, clone_min_met,
- pe_order_one_or_more|pe_order_implies_then_printed,
- data_set);
+ pcmk__ar_min_runnable
+ |pcmk__ar_first_implies_then_graphed,
+ rsc_first->cluster);
}
// Order "then" action after the pseudo-action (if runnable)
pcmk__new_ordering(NULL, NULL, clone_min_met, rsc_then,
pcmk__op_key(rsc_then->id, action_then, 0),
- NULL, flags|pe_order_runnable_left, data_set);
+ NULL, flags|pcmk__ar_unrunnable_first_blocks,
+ rsc_first->cluster);
}
/*!
@@ -397,8 +401,8 @@ clone_min_ordering(const char *id,
*/
static void
inverse_ordering(const char *id, enum pe_order_kind kind,
- pe_resource_t *rsc_first, const char *action_first,
- pe_resource_t *rsc_then, const char *action_then)
+ pcmk_resource_t *rsc_first, const char *action_first,
+ pcmk_resource_t *rsc_then, const char *action_then)
{
action_then = invert_action(action_then);
action_first = invert_action(action_first);
@@ -409,20 +413,20 @@ inverse_ordering(const char *id, enum pe_order_kind kind,
uint32_t flags = ordering_flags_for_kind(kind, action_first,
ordering_symmetric_inverse);
- handle_restart_type(rsc_then, kind, pe_order_implies_first, flags);
+ handle_restart_type(rsc_then, kind, pcmk__ar_then_implies_first, flags);
pcmk__order_resource_actions(rsc_then, action_then, rsc_first,
action_first, flags);
}
}
static void
-unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
+unpack_simple_rsc_order(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc_then = NULL;
- pe_resource_t *rsc_first = NULL;
+ pcmk_resource_t *rsc_then = NULL;
+ pcmk_resource_t *rsc_first = NULL;
int min_required_before = 0;
enum pe_order_kind kind = pe_order_kind_mandatory;
- uint32_t cons_weight = pe_order_none;
+ uint32_t flags = pcmk__ar_none;
enum ordering_symmetry symmetry;
const char *action_then = NULL;
@@ -434,27 +438,27 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return;
}
rsc_first = get_ordering_resource(xml_obj, XML_ORDER_ATTR_FIRST,
XML_ORDER_ATTR_FIRST_INSTANCE,
- data_set);
+ scheduler);
if (rsc_first == NULL) {
return;
}
rsc_then = get_ordering_resource(xml_obj, XML_ORDER_ATTR_THEN,
XML_ORDER_ATTR_THEN_INSTANCE,
- data_set);
+ scheduler);
if (rsc_then == NULL) {
return;
}
action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION);
if (action_first == NULL) {
- action_first = RSC_START;
+ action_first = PCMK_ACTION_START;
}
action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION);
@@ -465,9 +469,9 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
kind = get_ordering_type(xml_obj);
symmetry = get_ordering_symmetry(xml_obj, kind, NULL);
- cons_weight = ordering_flags_for_kind(kind, action_first, symmetry);
+ flags = ordering_flags_for_kind(kind, action_first, symmetry);
- handle_restart_type(rsc_then, kind, pe_order_implies_then, cons_weight);
+ handle_restart_type(rsc_then, kind, pcmk__ar_first_implies_then, flags);
/* If there is a minimum number of instances that must be runnable before
* the 'then' action is runnable, we use a pseudo-action for convenience:
@@ -477,10 +481,10 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
min_required_before = get_minimum_first_instances(rsc_first, xml_obj);
if (min_required_before > 0) {
clone_min_ordering(id, rsc_first, action_first, rsc_then, action_then,
- cons_weight, min_required_before, data_set);
+ flags, min_required_before);
} else {
pcmk__order_resource_actions(rsc_first, action_first, rsc_then,
- action_then, cons_weight);
+ action_then, flags);
}
if (symmetry == ordering_symmetric) {
@@ -511,17 +515,17 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
* \param[in] then_action 'then' action (if NULL, \p then_rsc and
* \p then_action_task must be set)
*
- * \param[in] flags Flag set of enum pe_ordering
- * \param[in,out] data_set Cluster working set to add ordering to
+ * \param[in] flags Group of enum pcmk__action_relation_flags
+ * \param[in,out] sched Scheduler data to add ordering to
*
* \note This function takes ownership of first_action_task and
* then_action_task, which do not need to be freed by the caller.
*/
void
-pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
- pe_action_t *first_action, pe_resource_t *then_rsc,
- char *then_action_task, pe_action_t *then_action,
- uint32_t flags, pe_working_set_t *data_set)
+pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_action_task,
+ pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
+ char *then_action_task, pcmk_action_t *then_action,
+ uint32_t flags, pcmk_scheduler_t *sched)
{
pe__ordering_t *order = NULL;
@@ -540,7 +544,7 @@ pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
order = calloc(1, sizeof(pe__ordering_t));
CRM_ASSERT(order != NULL);
- order->id = data_set->order_id++;
+ order->id = sched->order_id++;
order->flags = flags;
order->lh_rsc = first_rsc;
order->rh_rsc = then_rsc;
@@ -566,12 +570,12 @@ pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
}
pe_rsc_trace(first_rsc, "Created ordering %d for %s then %s",
- (data_set->order_id - 1),
+ (sched->order_id - 1),
pcmk__s(order->lh_action_task, "an underspecified action"),
pcmk__s(order->rh_action_task, "an underspecified action"));
- data_set->ordering_constraints = g_list_prepend(data_set->ordering_constraints,
- order);
+ sched->ordering_constraints = g_list_prepend(sched->ordering_constraints,
+ order);
pcmk__order_migration_equivalents(order);
}
@@ -581,23 +585,23 @@ pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
* \param[in] set Set XML to unpack
* \param[in] parent_kind rsc_order XML "kind" attribute
* \param[in] parent_symmetrical_s rsc_order XML "symmetrical" attribute
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code
*/
static int
unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
- const char *parent_symmetrical_s, pe_working_set_t *data_set)
+ const char *parent_symmetrical_s, pcmk_scheduler_t *scheduler)
{
GList *set_iter = NULL;
GList *resources = NULL;
- pe_resource_t *last = NULL;
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *last = NULL;
+ pcmk_resource_t *resource = NULL;
int local_kind = parent_kind;
bool sequential = false;
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
enum ordering_symmetry symmetry;
char *key = NULL;
@@ -607,7 +611,7 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
const char *kind_s = crm_element_value(set, XML_ORDER_ATTR_KIND);
if (action == NULL) {
- action = RSC_START;
+ action = PCMK_ACTION_START;
}
if (kind_s) {
@@ -636,7 +640,7 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
set_iter = resources;
while (set_iter != NULL) {
- resource = (pe_resource_t *) set_iter->data;
+ resource = (pcmk_resource_t *) set_iter->data;
set_iter = set_iter->next;
key = pcmk__op_key(resource->id, action, 0);
@@ -644,12 +648,12 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
if (local_kind == pe_order_kind_serialize) {
/* Serialize before everything that comes after */
- for (GList *gIter = set_iter; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *then_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = set_iter; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *then_rsc = iter->data;
char *then_key = pcmk__op_key(then_rsc->id, action, 0);
pcmk__new_ordering(resource, strdup(key), NULL, then_rsc,
- then_key, NULL, flags, data_set);
+ then_key, NULL, flags, scheduler);
}
} else if (sequential) {
@@ -674,7 +678,7 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
set_iter = resources;
while (set_iter != NULL) {
- resource = (pe_resource_t *) set_iter->data;
+ resource = (pcmk_resource_t *) set_iter->data;
set_iter = set_iter->next;
if (sequential) {
@@ -694,42 +698,42 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
/*!
* \brief Order two resource sets relative to each other
*
- * \param[in] id Ordering ID (for logging)
- * \param[in] set1 First listed set
- * \param[in] set2 Second listed set
- * \param[in] kind Ordering kind
- * \param[in,out] data_set Cluster working set
- * \param[in] symmetry Which ordering symmetry applies to this relation
+ * \param[in] id Ordering ID (for logging)
+ * \param[in] set1 First listed set
+ * \param[in] set2 Second listed set
+ * \param[in] kind Ordering kind
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] symmetry Which ordering symmetry applies to this relation
*
* \return Standard Pacemaker return code
*/
static int
order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
- enum pe_order_kind kind, pe_working_set_t *data_set,
+ enum pe_order_kind kind, pcmk_scheduler_t *scheduler,
enum ordering_symmetry symmetry)
{
const xmlNode *xml_rsc = NULL;
const xmlNode *xml_rsc_2 = NULL;
- pe_resource_t *rsc_1 = NULL;
- pe_resource_t *rsc_2 = NULL;
+ pcmk_resource_t *rsc_1 = NULL;
+ pcmk_resource_t *rsc_2 = NULL;
const char *action_1 = crm_element_value(set1, "action");
const char *action_2 = crm_element_value(set2, "action");
- uint32_t flags = pe_order_none;
+ uint32_t flags = pcmk__ar_none;
bool require_all = true;
(void) pcmk__xe_get_bool_attr(set1, "require-all", &require_all);
if (action_1 == NULL) {
- action_1 = RSC_START;
+ action_1 = PCMK_ACTION_START;
}
if (action_2 == NULL) {
- action_2 = RSC_START;
+ action_2 = PCMK_ACTION_START;
}
if (symmetry == ordering_symmetric_inverse) {
@@ -737,8 +741,8 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
action_2 = invert_action(action_2);
}
- if (pcmk__str_eq(RSC_STOP, action_1, pcmk__str_casei)
- || pcmk__str_eq(RSC_DEMOTE, action_1, pcmk__str_casei)) {
+ if (pcmk__str_eq(PCMK_ACTION_STOP, action_1, pcmk__str_none)
+ || pcmk__str_eq(PCMK_ACTION_DEMOTE, action_1, pcmk__str_none)) {
/* Assuming: A -> ( B || C) -> D
* The one-or-more logic only applies during the start/promote phase.
* During shutdown neither B nor can shutdown until D is down, so simply
@@ -753,11 +757,11 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
* irrelevant in regards to set2.
*/
if (!require_all) {
- char *task = crm_strdup_printf(CRM_OP_RELAXED_SET ":%s", ID(set1));
- pe_action_t *unordered_action = get_pseudo_op(task, data_set);
+ char *task = crm_strdup_printf(PCMK_ACTION_ONE_OR_MORE ":%s", ID(set1));
+ pcmk_action_t *unordered_action = get_pseudo_op(task, scheduler);
free(task);
- pe__set_action_flags(unordered_action, pe_action_requires_any);
+ pe__set_action_flags(unordered_action, pcmk_action_min_runnable);
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
@@ -770,8 +774,9 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
*/
pcmk__new_ordering(rsc_1, pcmk__op_key(rsc_1->id, action_1, 0),
NULL, NULL, NULL, unordered_action,
- pe_order_one_or_more|pe_order_implies_then_printed,
- data_set);
+ pcmk__ar_min_runnable
+ |pcmk__ar_first_implies_then_graphed,
+ scheduler);
}
for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL; xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
@@ -784,7 +789,8 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
*/
pcmk__new_ordering(NULL, NULL, unordered_action,
rsc_2, pcmk__op_key(rsc_2->id, action_2, 0),
- NULL, flags|pe_order_runnable_left, data_set);
+ NULL, flags|pcmk__ar_unrunnable_first_blocks,
+ scheduler);
}
return pcmk_rc_ok;
@@ -859,7 +865,8 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
- for (xmlNode *xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
+ for (xmlNode *xml_rsc_2 = first_named_child(set2,
+ XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL; xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
@@ -878,31 +885,31 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
*
* \param[in,out] xml_obj Ordering constraint XML
* \param[out] expanded_xml Equivalent XML with tags expanded
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically, pcmk_rc_ok on success,
* and pcmk_rc_unpack_error on invalid configuration)
*/
static int
unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
const char *id_first = NULL;
const char *id_then = NULL;
const char *action_first = NULL;
const char *action_then = NULL;
- pe_resource_t *rsc_first = NULL;
- pe_resource_t *rsc_then = NULL;
- pe_tag_t *tag_first = NULL;
- pe_tag_t *tag_then = NULL;
+ pcmk_resource_t *rsc_first = NULL;
+ pcmk_resource_t *rsc_then = NULL;
+ pcmk_tag_t *tag_first = NULL;
+ pcmk_tag_t *tag_then = NULL;
xmlNode *rsc_set_first = NULL;
xmlNode *rsc_set_then = NULL;
bool any_sets = false;
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_order");
return pcmk_rc_ok;
@@ -914,14 +921,15 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, id_first, &rsc_first,
+ if (!pcmk__valid_resource_or_tag(scheduler, id_first, &rsc_first,
&tag_first)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", ID(xml_obj), id_first);
return pcmk_rc_unpack_error;
}
- if (!pcmk__valid_resource_or_tag(data_set, id_then, &rsc_then, &tag_then)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, id_then, &rsc_then,
+ &tag_then)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", ID(xml_obj), id_then);
return pcmk_rc_unpack_error;
@@ -937,9 +945,9 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "first" into resource_set under constraint
+ // Convert template/tag reference in "first" into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set_first, XML_ORDER_ATTR_FIRST,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -954,9 +962,9 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
any_sets = true;
}
- // Convert template/tag reference in "then" into resource_set under constraint
+ // Convert template/tag reference in "then" into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set_then, XML_ORDER_ATTR_THEN,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -985,11 +993,11 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
* \internal
* \brief Unpack ordering constraint XML
*
- * \param[in,out] xml_obj Ordering constraint XML to unpack
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] xml_obj Ordering constraint XML to unpack
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_ordering(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
xmlNode *set = NULL;
xmlNode *last = NULL;
@@ -1005,7 +1013,7 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
NULL);
// Expand any resource tags in the constraint XML
- if (unpack_order_tags(xml_obj, &expanded_xml, data_set) != pcmk_rc_ok) {
+ if (unpack_order_tags(xml_obj, &expanded_xml, scheduler) != pcmk_rc_ok) {
return;
}
if (expanded_xml != NULL) {
@@ -1017,9 +1025,9 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET);
set != NULL; set = crm_next_same_xml(set)) {
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
if ((set == NULL) // Configuration error, message already logged
- || (unpack_order_set(set, kind, invert, data_set) != pcmk_rc_ok)) {
+ || (unpack_order_set(set, kind, invert, scheduler) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -1029,7 +1037,7 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
if (last != NULL) {
- if (order_rsc_sets(id, last, set, kind, data_set,
+ if (order_rsc_sets(id, last, set, kind, scheduler,
symmetry) != pcmk_rc_ok) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -1038,7 +1046,7 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if ((symmetry == ordering_symmetric)
- && (order_rsc_sets(id, set, last, kind, data_set,
+ && (order_rsc_sets(id, set, last, kind, scheduler,
ordering_symmetric_inverse) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -1057,17 +1065,17 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
// If the constraint has no resource sets, unpack it as a simple ordering
if (last == NULL) {
- return unpack_simple_rsc_order(xml_obj, data_set);
+ return unpack_simple_rsc_order(xml_obj, scheduler);
}
}
static bool
-ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
+ordering_is_invalid(pcmk_action_t *action, pcmk__related_action_t *input)
{
/* Prevent user-defined ordering constraints between resources
* running in a guest node and the resource that defines that node.
*/
- if (!pcmk_is_set(input->type, pe_order_preserve)
+ if (!pcmk_is_set(input->type, pcmk__ar_guest_allowed)
&& (input->action->rsc != NULL)
&& pcmk__rsc_corresponds_to_guest(action->rsc, input->action->node)) {
@@ -1083,8 +1091,9 @@ ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
* migrated from node2 to node1. If there would be a graph loop,
* break the order "load_stopped_node2" -> "rscA_migrate_to node1".
*/
- if ((input->type == pe_order_load) && action->rsc
- && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)
+ if (((uint32_t) input->type == pcmk__ar_if_on_same_node_or_target)
+ && (action->rsc != NULL)
+ && pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)
&& pcmk__graph_has_loop(action, action, input)) {
return true;
}
@@ -1093,18 +1102,18 @@ ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
}
void
-pcmk__disable_invalid_orderings(pe_working_set_t *data_set)
+pcmk__disable_invalid_orderings(pcmk_scheduler_t *scheduler)
{
- for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
- pe_action_t *action = (pe_action_t *) iter->data;
- pe_action_wrapper_t *input = NULL;
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
+ pcmk__related_action_t *input = NULL;
for (GList *input_iter = action->actions_before;
input_iter != NULL; input_iter = input_iter->next) {
- input = (pe_action_wrapper_t *) input_iter->data;
+ input = input_iter->data;
if (ordering_is_invalid(action, input)) {
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
}
}
}
@@ -1118,23 +1127,22 @@ pcmk__disable_invalid_orderings(pe_working_set_t *data_set)
* \param[in] shutdown_op Shutdown action for node
*/
void
-pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
+pcmk__order_stops_before_shutdown(pcmk_node_t *node, pcmk_action_t *shutdown_op)
{
for (GList *iter = node->details->data_set->actions;
iter != NULL; iter = iter->next) {
- pe_action_t *action = (pe_action_t *) iter->data;
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
// Only stops on the node shutting down are relevant
- if ((action->rsc == NULL) || (action->node == NULL)
- || (action->node->details != node->details)
- || !pcmk__str_eq(action->task, RSC_STOP, pcmk__str_casei)) {
+ if (!pe__same_node(action->node, node)
+ || !pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) {
continue;
}
// Resources and nodes in maintenance mode won't be touched
- if (pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_maintenance)) {
pe_rsc_trace(action->rsc,
"Not ordering %s before shutdown of %s because "
"resource in maintenance mode",
@@ -1154,7 +1162,7 @@ pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
* we may still end up blocking)
*/
if (!pcmk_any_flags_set(action->rsc->flags,
- pe_rsc_managed|pe_rsc_block)) {
+ pcmk_rsc_managed|pcmk_rsc_blocked)) {
pe_rsc_trace(action->rsc,
"Not ordering %s before shutdown of %s because "
"resource is unmanaged or blocked",
@@ -1164,10 +1172,10 @@ pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
pe_rsc_trace(action->rsc, "Ordering %s before shutdown of %s",
action->uuid, pe__node_name(node));
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
pcmk__new_ordering(action->rsc, NULL, action, NULL,
- strdup(CRM_OP_SHUTDOWN), shutdown_op,
- pe_order_optional|pe_order_runnable_left,
+ strdup(PCMK_ACTION_DO_SHUTDOWN), shutdown_op,
+ pcmk__ar_ordered|pcmk__ar_unrunnable_first_blocks,
node->details->data_set);
}
}
@@ -1183,7 +1191,7 @@ pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
* \note It is the caller's responsibility to free the result with g_list_free()
*/
static GList *
-find_actions_by_task(const pe_resource_t *rsc, const char *original_key)
+find_actions_by_task(const pcmk_resource_t *rsc, const char *original_key)
{
// Search under given task key directly
GList *list = find_actions(rsc->actions, original_key, NULL);
@@ -1215,11 +1223,11 @@ find_actions_by_task(const pe_resource_t *rsc, const char *original_key)
* \param[in,out] order Ordering constraint being applied
*/
static void
-order_resource_actions_after(pe_action_t *first_action,
- const pe_resource_t *rsc, pe__ordering_t *order)
+order_resource_actions_after(pcmk_action_t *first_action,
+ const pcmk_resource_t *rsc, pe__ordering_t *order)
{
GList *then_actions = NULL;
- uint32_t flags = pe_order_none;
+ uint32_t flags = pcmk__ar_none;
CRM_CHECK((rsc != NULL) && (order != NULL), return);
@@ -1241,15 +1249,17 @@ order_resource_actions_after(pe_action_t *first_action,
}
if ((first_action != NULL) && (first_action->rsc == rsc)
- && pcmk_is_set(first_action->flags, pe_action_dangle)) {
+ && pcmk_is_set(first_action->flags, pcmk_action_migration_abort)) {
pe_rsc_trace(rsc,
"Detected dangling migration ordering (%s then %s %s)",
first_action->uuid, order->rh_action_task, rsc->id);
- pe__clear_order_flags(flags, pe_order_implies_then);
+ pe__clear_order_flags(flags, pcmk__ar_first_implies_then);
}
- if ((first_action == NULL) && !pcmk_is_set(flags, pe_order_implies_then)) {
+ if ((first_action == NULL)
+ && !pcmk_is_set(flags, pcmk__ar_first_implies_then)) {
+
pe_rsc_debug(rsc,
"Ignoring ordering %d for %s: No first action found",
order->id, rsc->id);
@@ -1258,12 +1268,12 @@ order_resource_actions_after(pe_action_t *first_action,
}
for (GList *iter = then_actions; iter != NULL; iter = iter->next) {
- pe_action_t *then_action_iter = (pe_action_t *) iter->data;
+ pcmk_action_t *then_action_iter = (pcmk_action_t *) iter->data;
if (first_action != NULL) {
order_actions(first_action, then_action_iter, flags);
} else {
- pe__clear_action_flags(then_action_iter, pe_action_runnable);
+ pe__clear_action_flags(then_action_iter, pcmk_action_runnable);
crm_warn("%s of %s is unrunnable because there is no %s of %s "
"to order it after", then_action_iter->task, rsc->id,
order->lh_action_task, order->lh_rsc->id);
@@ -1274,12 +1284,11 @@ order_resource_actions_after(pe_action_t *first_action,
}
static void
-rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
- pe_working_set_t *data_set)
+rsc_order_first(pcmk_resource_t *first_rsc, pe__ordering_t *order)
{
GList *first_actions = NULL;
- pe_action_t *first_action = order->lh_action;
- pe_resource_t *then_rsc = order->rh_rsc;
+ pcmk_action_t *first_action = order->lh_action;
+ pcmk_resource_t *then_rsc = order->rh_rsc;
CRM_ASSERT(first_rsc != NULL);
pe_rsc_trace(first_rsc, "Applying ordering constraint %d (first: %s)",
@@ -1305,15 +1314,17 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
key = pcmk__op_key(first_rsc->id, op_type, interval_ms);
- if ((first_rsc->fns->state(first_rsc, TRUE) == RSC_ROLE_STOPPED)
- && pcmk__str_eq(op_type, RSC_STOP, pcmk__str_casei)) {
+ if ((first_rsc->fns->state(first_rsc, TRUE) == pcmk_role_stopped)
+ && pcmk__str_eq(op_type, PCMK_ACTION_STOP, pcmk__str_none)) {
free(key);
pe_rsc_trace(first_rsc,
"Ignoring constraint %d: first (%s for %s) not found",
order->id, order->lh_action_task, first_rsc->id);
- } else if ((first_rsc->fns->state(first_rsc, TRUE) == RSC_ROLE_UNPROMOTED)
- && pcmk__str_eq(op_type, RSC_DEMOTE, pcmk__str_casei)) {
+ } else if ((first_rsc->fns->state(first_rsc,
+ TRUE) == pcmk_role_unpromoted)
+ && pcmk__str_eq(op_type, PCMK_ACTION_DEMOTE,
+ pcmk__str_none)) {
free(key);
pe_rsc_trace(first_rsc,
"Ignoring constraint %d: first (%s for %s) not found",
@@ -1324,7 +1335,7 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
"Creating first (%s for %s) for constraint %d ",
order->lh_action_task, first_rsc->id, order->id);
first_action = custom_action(first_rsc, key, op_type, NULL, TRUE,
- TRUE, data_set);
+ first_rsc->cluster);
first_actions = g_list_prepend(NULL, first_action);
}
@@ -1339,8 +1350,8 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
}
then_rsc = order->rh_action->rsc;
}
- for (GList *gIter = first_actions; gIter != NULL; gIter = gIter->next) {
- first_action = (pe_action_t *) gIter->data;
+ for (GList *iter = first_actions; iter != NULL; iter = iter->next) {
+ first_action = iter->data;
if (then_rsc == NULL) {
order_actions(first_action, order->rh_action, order->flags);
@@ -1353,8 +1364,29 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
g_list_free(first_actions);
}
+// GFunc to call pcmk__block_colocation_dependents()
+static void
+block_colocation_dependents(gpointer data, gpointer user_data)
+{
+ pcmk__block_colocation_dependents(data);
+}
+
+// GFunc to call pcmk__update_action_for_orderings()
+static void
+update_action_for_orderings(gpointer data, gpointer user_data)
+{
+ pcmk__update_action_for_orderings((pcmk_action_t *) data,
+ (pcmk_scheduler_t *) user_data);
+}
+
+/*!
+ * \internal
+ * \brief Apply all ordering constraints
+ *
+ * \param[in,out] sched Scheduler data
+ */
void
-pcmk__apply_orderings(pe_working_set_t *data_set)
+pcmk__apply_orderings(pcmk_scheduler_t *sched)
{
crm_trace("Applying ordering constraints");
@@ -1370,16 +1402,16 @@ pcmk__apply_orderings(pe_working_set_t *data_set)
* @TODO This is brittle and should be carefully redesigned so that the
* order of creation doesn't matter, and the reverse becomes unneeded.
*/
- data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
+ sched->ordering_constraints = g_list_reverse(sched->ordering_constraints);
- for (GList *gIter = data_set->ordering_constraints;
- gIter != NULL; gIter = gIter->next) {
+ for (GList *iter = sched->ordering_constraints;
+ iter != NULL; iter = iter->next) {
- pe__ordering_t *order = gIter->data;
- pe_resource_t *rsc = order->lh_rsc;
+ pe__ordering_t *order = iter->data;
+ pcmk_resource_t *rsc = order->lh_rsc;
if (rsc != NULL) {
- rsc_order_first(rsc, order, data_set);
+ rsc_order_first(rsc, order);
continue;
}
@@ -1394,17 +1426,15 @@ pcmk__apply_orderings(pe_working_set_t *data_set)
}
}
- g_list_foreach(data_set->actions, (GFunc) pcmk__block_colocation_dependents,
- data_set);
+ g_list_foreach(sched->actions, block_colocation_dependents, NULL);
crm_trace("Ordering probes");
- pcmk__order_probes(data_set);
+ pcmk__order_probes(sched);
- crm_trace("Updating %d actions", g_list_length(data_set->actions));
- g_list_foreach(data_set->actions,
- (GFunc) pcmk__update_action_for_orderings, data_set);
+ crm_trace("Updating %d actions", g_list_length(sched->actions));
+ g_list_foreach(sched->actions, update_action_for_orderings, sched);
- pcmk__disable_invalid_orderings(data_set);
+ pcmk__disable_invalid_orderings(sched);
}
/*!
@@ -1415,18 +1445,18 @@ pcmk__apply_orderings(pe_working_set_t *data_set)
* \param[in,out] list List of "before" actions
*/
void
-pcmk__order_after_each(pe_action_t *after, GList *list)
+pcmk__order_after_each(pcmk_action_t *after, GList *list)
{
const char *after_desc = (after->task == NULL)? after->uuid : after->task;
for (GList *iter = list; iter != NULL; iter = iter->next) {
- pe_action_t *before = (pe_action_t *) iter->data;
+ pcmk_action_t *before = (pcmk_action_t *) iter->data;
const char *before_desc = before->task? before->task : before->uuid;
crm_debug("Ordering %s on %s before %s on %s",
before_desc, pe__node_name(before->node),
after_desc, pe__node_name(after->node));
- order_actions(before, after, pe_order_optional);
+ order_actions(before, after, pcmk__ar_ordered);
}
}
@@ -1437,27 +1467,34 @@ pcmk__order_after_each(pe_action_t *after, GList *list)
* \param[in,out] rsc Clone or bundle to order
*/
void
-pcmk__promotable_restart_ordering(pe_resource_t *rsc)
+pcmk__promotable_restart_ordering(pcmk_resource_t *rsc)
{
// Order start and promote after all instances are stopped
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
// Order stop, start, and promote after all instances are demoted
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_STOP,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
// Order promote after all instances are started
- pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_RUNNING,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
// Order demote after all instances are demoted
- pcmk__order_resource_actions(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTE,
+ rsc, PCMK_ACTION_DEMOTED,
+ pcmk__ar_ordered);
}
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index aefbf9a..96acf1c 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -10,20 +10,26 @@
#include <crm_internal.h>
#include <stdbool.h>
+#include <stdint.h> // uint8_t, uint32_t
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
-static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
-static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
-static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
-static void promote_resource(pe_resource_t *rsc, pe_node_t *node,
+static void stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
+ bool optional);
+static void start_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
+ bool optional);
+static void demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
+ bool optional);
+static void promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
-static void assert_role_error(pe_resource_t *rsc, pe_node_t *node,
+static void assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
+#define RSC_ROLE_MAX (pcmk_role_promoted + 1)
+
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the immediate next role when transitioning from one role
* to a target role. For example, when going from Stopped to Promoted, the
@@ -34,35 +40,35 @@ static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
* Current role Immediate next role Final target role
* ------------ ------------------- -----------------
*/
- /* Unknown */ { RSC_ROLE_UNKNOWN, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STOPPED, /* Started */
- RSC_ROLE_STOPPED, /* Unpromoted */
- RSC_ROLE_STOPPED, /* Promoted */
+ /* Unknown */ { pcmk_role_unknown, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_stopped, /* Started */
+ pcmk_role_stopped, /* Unpromoted */
+ pcmk_role_stopped, /* Promoted */
},
- /* Stopped */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STARTED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_UNPROMOTED, /* Promoted */
+ /* Stopped */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_started, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_unpromoted, /* Promoted */
},
- /* Started */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STARTED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_PROMOTED, /* Promoted */
+ /* Started */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_started, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_promoted, /* Promoted */
},
- /* Unpromoted */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STOPPED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_PROMOTED, /* Promoted */
+ /* Unpromoted */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_stopped, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_promoted, /* Promoted */
},
- /* Promoted */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_UNPROMOTED, /* Stopped */
- RSC_ROLE_UNPROMOTED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_PROMOTED, /* Promoted */
+ /* Promoted */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_unpromoted, /* Stopped */
+ pcmk_role_unpromoted, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_promoted, /* Promoted */
},
};
@@ -74,7 +80,7 @@ static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
* \param[in,out] node Node where resource will be in its next role
* \param[in] optional Whether scheduled actions should be optional
*/
-typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node,
+typedef void (*rsc_transition_fn)(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
@@ -118,14 +124,14 @@ static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/*!
* \internal
- * \brief Get a list of a resource's allowed nodes sorted by node weight
+ * \brief Get a list of a resource's allowed nodes sorted by node score
*
* \param[in] rsc Resource to check
*
- * \return List of allowed nodes sorted by node weight
+ * \return List of allowed nodes sorted by node score
*/
static GList *
-sorted_allowed_nodes(const pe_resource_t *rsc)
+sorted_allowed_nodes(const pcmk_resource_t *rsc)
{
if (rsc->allowed_nodes != NULL) {
GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
@@ -141,33 +147,43 @@ sorted_allowed_nodes(const pe_resource_t *rsc)
* \internal
* \brief Assign a resource to its best allowed node, if possible
*
- * \param[in,out] rsc Resource to choose a node for
- * \param[in] prefer If not NULL, prefer this node when all else equal
+ * \param[in,out] rsc Resource to choose a node for
+ * \param[in] prefer If not \c NULL, prefer this node when all else
+ * equal
+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
+ * node, set next role to stopped and update
+ * existing actions
*
* \return true if \p rsc could be assigned to a node, otherwise false
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
static bool
-assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
+assign_best_node(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
GList *nodes = NULL;
- pe_node_t *chosen = NULL;
- pe_node_t *best = NULL;
- bool result = false;
- const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
+ pcmk_node_t *chosen = NULL;
+ pcmk_node_t *best = NULL;
+ const pcmk_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
if (prefer == NULL) {
prefer = most_free_node;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
// We've already finished assignment of resources to nodes
return rsc->allocated_to != NULL;
}
- // Sort allowed nodes by weight
+ // Sort allowed nodes by score
nodes = sorted_allowed_nodes(rsc);
if (nodes != NULL) {
- best = (pe_node_t *) nodes->data; // First node has best score
+ best = (pcmk_node_t *) nodes->data; // First node has best score
}
if ((prefer != NULL) && (nodes != NULL)) {
@@ -178,11 +194,11 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
pe__node_name(prefer), rsc->id);
- /* Favor the preferred node as long as its weight is at least as good as
+ /* Favor the preferred node as long as its score is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
- * node is better, when the best node's weight is less than INFINITY.
+ * node is better, when the best node's score is less than INFINITY.
*/
} else if (chosen->weight < best->weight) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
@@ -196,7 +212,8 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
} else {
pe_rsc_trace(rsc,
- "Chose preferred node %s for %s (ignoring %d candidates)",
+ "Chose preferred node %s for %s "
+ "(ignoring %d candidates)",
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
}
@@ -220,23 +237,24 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
* remaining unassigned instances to prefer a node that's already
* running another instance.
*/
- pe_node_t *running = pe__current_node(rsc);
+ pcmk_node_t *running = pe__current_node(rsc);
if (running == NULL) {
// Nothing to do
} else if (!pcmk__node_available(running, true, false)) {
- pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
+ pe_rsc_trace(rsc,
+ "Current node for %s (%s) can't run resources",
rsc->id, pe__node_name(running));
} else {
int nodes_with_best_score = 1;
for (GList *iter = nodes->next; iter; iter = iter->next) {
- pe_node_t *allowed = (pe_node_t *) iter->data;
+ pcmk_node_t *allowed = (pcmk_node_t *) iter->data;
if (allowed->weight != chosen->weight) {
- // The nodes are sorted by weight, so no more are equal
+ // The nodes are sorted by score, so no more are equal
break;
}
if (pe__same_node(allowed, running)) {
@@ -247,7 +265,12 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
}
if (nodes_with_best_score > 1) {
- do_crm_log(((chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO),
+ uint8_t log_level = LOG_INFO;
+
+ if (chosen->weight >= INFINITY) {
+ log_level = LOG_WARNING;
+ }
+ do_crm_log(log_level,
"Chose %s for %s from %d nodes with score %s",
pe__node_name(chosen), rsc->id,
nodes_with_best_score,
@@ -260,40 +283,37 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
- result = pcmk__finalize_assignment(rsc, chosen, false);
+ pcmk__assign_resource(rsc, chosen, false, stop_if_fail);
g_list_free(nodes);
- return result;
+ return rsc->allocated_to != NULL;
}
/*!
* \internal
* \brief Apply a "this with" colocation to a node's allowed node scores
*
- * \param[in,out] data Colocation to apply
- * \param[in,out] user_data Resource being assigned
+ * \param[in,out] colocation Colocation to apply
+ * \param[in,out] rsc Resource being assigned
*/
static void
-apply_this_with(gpointer data, gpointer user_data)
+apply_this_with(pcmk__colocation_t *colocation, pcmk_resource_t *rsc)
{
- pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
-
GHashTable *archive = NULL;
- pe_resource_t *other = colocation->primary;
+ pcmk_resource_t *other = colocation->primary;
// In certain cases, we will need to revert the node scores
- if ((colocation->dependent_role >= RSC_ROLE_PROMOTED)
+ if ((colocation->dependent_role >= pcmk_role_promoted)
|| ((colocation->score < 0) && (colocation->score > -INFINITY))) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
- if (pcmk_is_set(other->flags, pe_rsc_provisional)) {
+ if (pcmk_is_set(other->flags, pcmk_rsc_unassigned)) {
pe_rsc_trace(rsc,
"%s: Assigning colocation %s primary %s first"
"(score=%d role=%s)",
rsc->id, colocation->id, other->id,
colocation->score, role2text(colocation->dependent_role));
- other->cmds->assign(other, NULL);
+ other->cmds->assign(other, NULL, true);
}
// Apply the colocation score to this resource's allowed node scores
@@ -320,15 +340,15 @@ apply_this_with(gpointer data, gpointer user_data)
* \param[in] connection Connection resource that has been assigned
*/
static void
-remote_connection_assigned(const pe_resource_t *connection)
+remote_connection_assigned(const pcmk_resource_t *connection)
{
- pe_node_t *remote_node = pe_find_node(connection->cluster->nodes,
- connection->id);
+ pcmk_node_t *remote_node = pe_find_node(connection->cluster->nodes,
+ connection->id);
CRM_CHECK(remote_node != NULL, return);
if ((connection->allocated_to != NULL)
- && (connection->next_role != RSC_ROLE_STOPPED)) {
+ && (connection->next_role != pcmk_role_stopped)) {
crm_trace("Pacemaker Remote node %s will be online",
remote_node->details->id);
@@ -352,42 +372,59 @@ remote_connection_assigned(const pe_resource_t *connection)
* \internal
* \brief Assign a primitive resource to a node
*
- * \param[in,out] rsc Resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
+ * node, set next role to stopped and update
+ * existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__primitive_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
GList *this_with_colocations = NULL;
GList *with_this_colocations = NULL;
GList *iter = NULL;
pcmk__colocation_t *colocation = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
// Never assign a child without parent being assigned first
if ((rsc->parent != NULL)
- && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
+ && !pcmk_is_set(rsc->parent->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "%s: Assigning parent %s first",
rsc->id, rsc->parent->id);
- rsc->parent->cmds->assign(rsc->parent, prefer);
+ rsc->parent->cmds->assign(rsc->parent, prefer, stop_if_fail);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
- return rsc->allocated_to; // Assignment has already been done
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
+ // Assignment has already been done
+ const char *node_name = "no node";
+
+ if (rsc->allocated_to != NULL) {
+ node_name = pe__node_name(rsc->allocated_to);
+ }
+ pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name);
+ return rsc->allocated_to;
}
// Ensure we detect assignment loops
- if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
- pe__set_resource_flags(rsc, pe_rsc_allocating);
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
- pe__show_node_weights(true, rsc, "Pre-assignment", rsc->allowed_nodes,
- rsc->cluster);
+ pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes,
+ rsc->cluster);
this_with_colocations = pcmk__this_with_colocations(rsc);
with_this_colocations = pcmk__with_this_colocations(rsc);
@@ -395,21 +432,23 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
// Apply mandatory colocations first, to satisfy as many as possible
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
+
if ((colocation->score <= -CRM_SCORE_INFINITY)
|| (colocation->score >= CRM_SCORE_INFINITY)) {
- apply_this_with(iter->data, rsc);
+ apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
+
if ((colocation->score <= -CRM_SCORE_INFINITY)
|| (colocation->score >= CRM_SCORE_INFINITY)) {
- pcmk__add_dependent_scores(iter->data, rsc);
+ pcmk__add_dependent_scores(colocation, rsc);
}
}
- pe__show_node_weights(true, rsc, "Mandatory-colocations",
- rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(true, rsc, "Mandatory-colocations",
+ rsc->allowed_nodes, rsc->cluster);
// Then apply optional colocations
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
@@ -417,7 +456,7 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
if ((colocation->score > -CRM_SCORE_INFINITY)
&& (colocation->score < CRM_SCORE_INFINITY)) {
- apply_this_with(iter->data, rsc);
+ apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
@@ -425,14 +464,14 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
if ((colocation->score > -CRM_SCORE_INFINITY)
&& (colocation->score < CRM_SCORE_INFINITY)) {
- pcmk__add_dependent_scores(iter->data, rsc);
+ pcmk__add_dependent_scores(colocation, rsc);
}
}
g_list_free(this_with_colocations);
g_list_free(with_this_colocations);
- if (rsc->next_role == RSC_ROLE_STOPPED) {
+ if (rsc->next_role == pcmk_role_stopped) {
pe_rsc_trace(rsc,
"Banning %s from all nodes because it will be stopped",
rsc->id);
@@ -440,64 +479,62 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
rsc->cluster);
} else if ((rsc->next_role > rsc->role)
- && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum)
- && (rsc->cluster->no_quorum_policy == no_quorum_freeze)) {
+ && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_quorate)
+ && (rsc->cluster->no_quorum_policy == pcmk_no_quorum_freeze)) {
crm_notice("Resource %s cannot be elevated from %s to %s due to "
"no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
}
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
// Unmanage resource if fencing is enabled but no device is configured
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
- && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)
+ && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_have_fencing)) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
// Unmanaged resources stay on their current node
const char *reason = NULL;
- pe_node_t *assign_to = NULL;
+ pcmk_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
reason = "promoted";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
- pcmk__finalize_assignment(rsc, assign_to, true);
-
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) {
- pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id);
- pcmk__finalize_assignment(rsc, NULL, true);
+ pcmk__assign_resource(rsc, assign_to, true, stop_if_fail);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
- && assign_best_node(rsc, prefer)) {
- // Assignment successful
+ } else if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_stop_all)) {
+ // Must stop at some point, but be consistent with stop_if_fail
+ if (stop_if_fail) {
+ pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources",
+ rsc->id);
+ }
+ pcmk__assign_resource(rsc, NULL, true, stop_if_fail);
- } else if (rsc->allocated_to == NULL) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ } else if (!assign_best_node(rsc, prefer, stop_if_fail)) {
+ // Assignment failed
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
- } else if (rsc->running_on != NULL) {
+ } else if ((rsc->running_on != NULL) && stop_if_fail) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
-
- } else {
- pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id,
- pe__node_name(rsc->allocated_to));
}
- pe__clear_resource_flags(rsc, pe_rsc_allocating);
+ pe__clear_resource_flags(rsc, pcmk_rsc_assigning);
if (rsc->is_remote_node) {
remote_connection_assigned(rsc);
@@ -518,18 +555,18 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
* \return Role that resource would have after scheduled actions are taken
*/
static void
-schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
+schedule_restart_actions(pcmk_resource_t *rsc, pcmk_node_t *current,
bool need_stop, bool need_promote)
{
enum rsc_role_e role = rsc->role;
enum rsc_role_e next_role;
rsc_transition_fn fn = NULL;
- pe__set_resource_flags(rsc, pe_rsc_restarting);
+ pe__set_resource_flags(rsc, pcmk_rsc_restarting);
// Bring resource down to a stop on its current node
- while (role != RSC_ROLE_STOPPED) {
- next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
+ while (role != pcmk_role_stopped) {
+ next_role = rsc_state_matrix[role][pcmk_role_stopped];
pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
@@ -543,11 +580,11 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
// Bring resource up to its next role on its next node
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
- && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
- if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
+ if ((next_role == pcmk_role_promoted) && need_promote) {
required = true;
}
pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
@@ -561,7 +598,7 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
role = next_role;
}
- pe__clear_resource_flags(rsc, pe_rsc_restarting);
+ pe__clear_resource_flags(rsc, pcmk_rsc_restarting);
}
/*!
@@ -573,16 +610,16 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
* \return "explicit" if next role was explicitly set, otherwise "implicit"
*/
static const char *
-set_default_next_role(pe_resource_t *rsc)
+set_default_next_role(pcmk_resource_t *rsc)
{
- if (rsc->next_role != RSC_ROLE_UNKNOWN) {
+ if (rsc->next_role != pcmk_role_unknown) {
return "explicit";
}
if (rsc->allocated_to == NULL) {
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "assignment");
+ pe__set_next_role(rsc, pcmk_role_stopped, "assignment");
} else {
- pe__set_next_role(rsc, RSC_ROLE_STARTED, "assignment");
+ pe__set_next_role(rsc, pcmk_role_started, "assignment");
}
return "implicit";
}
@@ -594,15 +631,15 @@ set_default_next_role(pe_resource_t *rsc)
* \param[in,out] rsc Resource to create start action for
*/
static void
-create_pending_start(pe_resource_t *rsc)
+create_pending_start(pcmk_resource_t *rsc)
{
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
pe_rsc_trace(rsc,
"Creating action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
- pe__set_action_flags(start, pe_action_print_always);
+ pe__set_action_flags(start, pcmk_action_always_in_graph);
}
/*!
@@ -612,7 +649,7 @@ create_pending_start(pe_resource_t *rsc)
* \param[in,out] rsc Resource to schedule actions for
*/
static void
-schedule_role_transition_actions(pe_resource_t *rsc)
+schedule_role_transition_actions(pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
@@ -640,7 +677,7 @@ schedule_role_transition_actions(pe_resource_t *rsc)
* \param[in,out] rsc Primitive resource to create actions for
*/
void
-pcmk__primitive_create_actions(pe_resource_t *rsc)
+pcmk__primitive_create_actions(pcmk_resource_t *rsc)
{
bool need_stop = false;
bool need_promote = false;
@@ -648,12 +685,12 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
bool allow_migrate = false;
bool multiply_active = false;
- pe_node_t *current = NULL;
+ pcmk_node_t *current = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
const char *next_role_source = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
next_role_source = set_default_next_role(rsc);
pe_rsc_trace(rsc,
@@ -668,8 +705,8 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
rsc);
if ((current != NULL) && (rsc->allocated_to != NULL)
- && (current->details != rsc->allocated_to->details)
- && (rsc->next_role >= RSC_ROLE_STARTED)) {
+ && !pe__same_node(current, rsc->allocated_to)
+ && (rsc->next_role >= pcmk_role_started)) {
pe_rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, pe__node_name(current),
@@ -715,7 +752,7 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = false;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
multiply_active = (num_all_active > 1);
} else {
/* If a resource has "requires" set to nothing or quorum, don't consider
@@ -739,51 +776,51 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
"#Resource_is_Too_Active for more information");
switch (rsc->recovery_type) {
- case recovery_stop_start:
+ case pcmk_multiply_active_restart:
need_stop = true;
break;
- case recovery_stop_unexpected:
+ case pcmk_multiply_active_unexpected:
need_stop = true; // stop_resource() will skip expected node
- pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
+ pe__set_resource_flags(rsc, pcmk_rsc_stop_unexpected);
break;
default:
break;
}
} else {
- pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
+ pe__clear_resource_flags(rsc, pcmk_rsc_stop_unexpected);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
create_pending_start(rsc);
}
if (is_moving) {
// Remaining tests are only for resources staying where they are
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_if_failed)) {
need_stop = true;
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ if (rsc->next_role == pcmk_role_promoted) {
need_promote = true;
}
}
- } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = true;
- } else if ((rsc->role > RSC_ROLE_STARTED) && (current != NULL)
+ } else if ((rsc->role > pcmk_role_started) && (current != NULL)
&& (rsc->allocated_to != NULL)) {
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
- if (!pcmk_is_set(start->flags, pe_action_optional)) {
+ if (!pcmk_is_set(start->flags, pcmk_action_optional)) {
// Recovery of a promoted resource
pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = true;
@@ -810,10 +847,10 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
* \param[in] rsc Resource to check
*/
static void
-rsc_avoids_remote_nodes(const pe_resource_t *rsc)
+rsc_avoids_remote_nodes(const pcmk_resource_t *rsc)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
@@ -837,7 +874,7 @@ rsc_avoids_remote_nodes(const pe_resource_t *rsc)
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
-allowed_nodes_as_list(const pe_resource_t *rsc)
+allowed_nodes_as_list(const pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
@@ -859,15 +896,15 @@ allowed_nodes_as_list(const pe_resource_t *rsc)
* \param[in,out] rsc Primitive resource to create implicit constraints for
*/
void
-pcmk__primitive_internal_constraints(pe_resource_t *rsc)
+pcmk__primitive_internal_constraints(pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
bool check_unfencing = false;
bool check_utilization = false;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping implicit constraints for unmanaged resource %s",
rsc->id);
@@ -875,9 +912,10 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
}
// Whether resource requires unfencing
- check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
- && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)
- && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
+ check_unfencing = !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)
+ && pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_enable_unfencing)
+ && pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
@@ -885,29 +923,37 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
"default", pcmk__str_casei);
// Order stops before starts (i.e. restart)
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
- pe_order_optional|pe_order_implies_then|pe_order_restart,
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL,
+ pcmk__ar_ordered
+ |pcmk__ar_first_implies_then
+ |pcmk__ar_intermediate_stop,
rsc->cluster);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)
- || (rsc->role > RSC_ROLE_UNPROMOTED)) {
+ pcmk_rsc_promotable)
+ || (rsc->role > pcmk_role_unpromoted)) {
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
- pe_order_promoted_implies_first, rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
+ pcmk__ar_promoted_then_implies_first, rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
- pe_order_runnable_left, rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0),
+ NULL,
+ pcmk__ar_unrunnable_first_blocks, rsc->cluster);
}
// Don't clear resource history if probing on same node
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
- NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
- NULL, pe_order_same_node|pe_order_then_cancels_first,
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
+ NULL, rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ pcmk__ar_if_on_same_node|pcmk__ar_then_cancels_first,
rsc->cluster);
// Certain checks need allowed nodes
@@ -924,7 +970,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
}
if (rsc->container != NULL) {
- pe_resource_t *remote_rsc = NULL;
+ pcmk_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
@@ -932,7 +978,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
/* Guest resources are not allowed to run on Pacemaker Remote nodes,
* to avoid nesting remotes. However, bundles are allowed.
*/
- if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
rsc_avoids_remote_nodes(rsc->container);
}
@@ -942,8 +988,9 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
- pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc,
- RSC_STOP, pe_order_optional);
+ pcmk__order_resource_actions(rsc->container, PCMK_ACTION_MONITOR,
+ rsc, PCMK_ACTION_STOP,
+ pcmk__ar_ordered);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
@@ -964,7 +1011,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
- pe_node_t *node = item->data;
+ pcmk_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
@@ -982,29 +1029,36 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
rsc->id, rsc->container->id);
pcmk__new_ordering(rsc->container,
- pcmk__op_key(rsc->container->id, RSC_START, 0),
- NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
+ pcmk__op_key(rsc->container->id,
+ PCMK_ACTION_START, 0),
+ NULL, rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
+ pcmk__new_ordering(rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
rsc->container,
- pcmk__op_key(rsc->container->id, RSC_STOP, 0),
- NULL, pe_order_implies_first, rsc->cluster);
+ pcmk__op_key(rsc->container->id,
+ PCMK_ACTION_STOP, 0),
+ NULL, pcmk__ar_then_implies_first, rsc->cluster);
- if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
- pcmk__new_colocation("resource-with-container", NULL, score, rsc,
- rsc->container, NULL, NULL, true,
- rsc->cluster);
+ pcmk__new_colocation("#resource-with-container", NULL, score, rsc,
+ rsc->container, NULL, NULL,
+ pcmk__coloc_influence);
}
}
- if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (rsc->is_remote_node
+ || pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Remote connections and fencing devices are not allowed to run on
* Pacemaker Remote nodes
*/
@@ -1015,27 +1069,27 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
- * \param[in] for_dependent true if called on behalf of dependent
+ * \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
enum pcmk__coloc_affects filter_results;
- CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
- return);
+ CRM_ASSERT((dependent != NULL) && (primary != NULL)
+ && (colocation != NULL));
if (for_dependent) {
// Always process on behalf of primary resource
@@ -1055,7 +1109,7 @@ pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
pcmk__apply_coloc_to_priority(dependent, primary, colocation);
break;
case pcmk__coloc_affects_location:
- pcmk__apply_coloc_to_weights(dependent, primary, colocation);
+ pcmk__apply_coloc_to_scores(dependent, primary, colocation);
break;
default: // pcmk__coloc_affects_nothing
return;
@@ -1063,40 +1117,62 @@ pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
}
/* Primitive implementation of
- * resource_alloc_functions_t:with_this_colocations()
+ * pcmk_assignment_methods_t:with_this_colocations()
*/
void
-pcmk__with_primitive_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- // Primitives don't have children, so rsc should also be orig_rsc
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_native)
- && (rsc == orig_rsc) && (list != NULL),
- return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (list != NULL));
+
+ if (rsc == orig_rsc) {
+ /* For the resource itself, add all of its own colocations and relevant
+ * colocations from its parent (if any).
+ */
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list);
+ }
+ } else {
+ // For an ancestor, add only explicitly configured constraints
+ for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) {
+ pcmk__colocation_t *colocation = iter->data;
- // Add primitive's own colocations plus any relevant ones from parent
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
- if (rsc->parent != NULL) {
- rsc->parent->cmds->with_this_colocations(rsc->parent, rsc, list);
+ if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
+ pcmk__add_with_this(list, colocation, orig_rsc);
+ }
+ }
}
}
/* Primitive implementation of
- * resource_alloc_functions_t:this_with_colocations()
+ * pcmk_assignment_methods_t:this_with_colocations()
*/
void
-pcmk__primitive_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- // Primitives don't have children, so rsc should also be orig_rsc
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_native)
- && (rsc == orig_rsc) && (list != NULL),
- return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (list != NULL));
+
+ if (rsc == orig_rsc) {
+ /* For the resource itself, add all of its own colocations and relevant
+ * colocations from its parent (if any).
+ */
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list);
+ }
+ } else {
+ // For an ancestor, add only explicitly configured constraints
+ for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
+ pcmk__colocation_t *colocation = iter->data;
- // Add primitive's own colocations plus any relevant ones from parent
- pcmk__add_this_with_list(list, rsc->rsc_cons);
- if (rsc->parent != NULL) {
- rsc->parent->cmds->this_with_colocations(rsc->parent, rsc, list);
+ if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
+ pcmk__add_this_with(list, colocation, orig_rsc);
+ }
+ }
}
}
@@ -1109,11 +1185,11 @@ pcmk__primitive_with_colocations(const pe_resource_t *rsc,
*
* \return Flags appropriate to \p action on \p node
*/
-enum pe_action_flags
-pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
+uint32_t
+pcmk__primitive_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
CRM_ASSERT(action != NULL);
- return action->flags;
+ return (uint32_t) action->flags;
}
/*!
@@ -1130,11 +1206,11 @@ pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
* been unpacked and resources have been assigned to nodes.
*/
static bool
-is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
+is_expected_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return pcmk_all_flags_set(rsc->flags,
- pe_rsc_stop_unexpected|pe_rsc_restarting)
- && (rsc->next_role > RSC_ROLE_STOPPED)
+ pcmk_rsc_stop_unexpected|pcmk_rsc_restarting)
+ && (rsc->next_role > pcmk_role_stopped)
&& pe__same_node(rsc->allocated_to, node);
}
@@ -1147,11 +1223,11 @@ is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
* \param[in] optional Whether actions should be optional
*/
static void
-stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *current = (pe_node_t *) iter->data;
- pe_action_t *stop = NULL;
+ pcmk_node_t *current = (pcmk_node_t *) iter->data;
+ pcmk_action_t *stop = NULL;
if (is_expected_node(rsc, current)) {
/* We are scheduling restart actions for a multiply active resource
@@ -1189,8 +1265,8 @@ stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
if (rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", true);
- } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
- |pe_rsc_stop_unexpected)) {
+ } else if (pcmk_all_flags_set(rsc->flags, pcmk_rsc_restarting
+ |pcmk_rsc_stop_unexpected)) {
/* We are stopping a multiply active resource on a node that is
* not its expected node, and we are still scheduling restart
* actions, so the stop is for being multiply active.
@@ -1198,19 +1274,19 @@ stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
pe_action_set_reason(stop, "being multiply active", true);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- pe__clear_action_flags(stop, pe_action_runnable);
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ pe__clear_action_flags(stop, pcmk_action_runnable);
}
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_remove_after_stop)) {
pcmk__schedule_cleanup(rsc, current, optional);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
- pe_action_t *unfence = pe_fence_op(current, "on", true, NULL, false,
- rsc->cluster);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing)) {
+ pcmk_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true,
+ NULL, false, rsc->cluster);
- order_actions(stop, unfence, pe_order_implies_first);
+ order_actions(stop, unfence, pcmk__ar_then_implies_first);
if (!pcmk__node_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced",
rsc->id, pe__node_name(current));
@@ -1228,9 +1304,9 @@ stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether actions should be optional
*/
static void
-start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+start_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
CRM_ASSERT(node != NULL);
@@ -1239,10 +1315,10 @@ start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
pe__node_name(node), node->weight);
start = start_action(rsc, node, TRUE);
- pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then);
+ pcmk__order_vs_unfence(rsc, node, start, pcmk__ar_first_implies_then);
- if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
- pe__clear_action_flags(start, pe_action_optional);
+ if (pcmk_is_set(start->flags, pcmk_action_runnable) && !optional) {
+ pe__clear_action_flags(start, pcmk_action_optional);
}
if (is_expected_node(rsc, node)) {
@@ -1253,7 +1329,7 @@ start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
"Start of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
- pe__set_action_flags(start, pe_action_pseudo);
+ pe__set_action_flags(start, pcmk_action_pseudo);
}
}
@@ -1266,7 +1342,7 @@ start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether actions should be optional
*/
static void
-promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
GList *iter = NULL;
GList *action_list = NULL;
@@ -1275,18 +1351,18 @@ promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
CRM_ASSERT(node != NULL);
// Any start must be runnable for promotion to be runnable
- action_list = pe__resource_actions(rsc, node, RSC_START, true);
+ action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true);
for (iter = action_list; iter != NULL; iter = iter->next) {
- pe_action_t *start = (pe_action_t *) iter->data;
+ pcmk_action_t *start = (pcmk_action_t *) iter->data;
- if (!pcmk_is_set(start->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
runnable = false;
}
}
g_list_free(action_list);
if (runnable) {
- pe_action_t *promote = promote_action(rsc, node, optional);
+ pcmk_action_t *promote = promote_action(rsc, node, optional);
pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
@@ -1300,16 +1376,17 @@ promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
"Promotion of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
- pe__set_action_flags(promote, pe_action_pseudo);
+ pe__set_action_flags(promote, pcmk_action_pseudo);
}
} else {
pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
rsc->id, pe__node_name(node));
- action_list = pe__resource_actions(rsc, node, RSC_PROMOTE, true);
+ action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE,
+ true);
for (iter = action_list; iter != NULL; iter = iter->next) {
- pe_action_t *promote = (pe_action_t *) iter->data;
+ pcmk_action_t *promote = (pcmk_action_t *) iter->data;
- pe__clear_action_flags(promote, pe_action_runnable);
+ pe__clear_action_flags(promote, pcmk_action_runnable);
}
g_list_free(action_list);
}
@@ -1324,7 +1401,7 @@ promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether actions should be optional
*/
static void
-demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
/* Since this will only be called for a primitive (possibly as an instance
* of a collective resource), the resource is multiply active if it is
@@ -1332,7 +1409,7 @@ demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* part of recovery, regardless of which one is the desired node.
*/
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *current = (pe_node_t *) iter->data;
+ pcmk_node_t *current = (pcmk_node_t *) iter->data;
if (is_expected_node(rsc, current)) {
pe_rsc_trace(rsc,
@@ -1349,7 +1426,7 @@ demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
}
static void
-assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
+assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
CRM_ASSERT(false);
}
@@ -1363,18 +1440,19 @@ assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether clean-up should be optional
*/
void
-pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
+pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ bool optional)
{
/* If the cleanup is required, its orderings are optional, because they're
* relevant only if both actions are required. Conversely, if the cleanup is
* optional, the orderings make the then action required if the first action
* becomes required.
*/
- uint32_t flag = optional? pe_order_implies_then : pe_order_optional;
+ uint32_t flag = optional? pcmk__ar_first_implies_then : pcmk__ar_ordered;
CRM_CHECK((rsc != NULL) && (node != NULL), return);
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
rsc->id, pe__node_name(node));
return;
@@ -1390,8 +1468,10 @@ pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
delete_action(rsc, node, optional);
// stop -> clean-up -> start
- pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE, flag);
- pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START, flag);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_DELETE, flag);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE,
+ rsc, PCMK_ACTION_START, flag);
}
/*!
@@ -1402,13 +1482,14 @@ pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
-pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
+pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
char *value = NULL;
- const pe_resource_t *parent = NULL;
+ const pcmk_resource_t *parent = NULL;
- CRM_ASSERT((rsc != NULL) && (xml != NULL));
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (xml != NULL));
/* Clone instance numbers get set internally as meta-attributes, and are
* needed in the transition graph (for example, to tell unique clone
@@ -1450,13 +1531,16 @@ pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
}
}
-// Primitive implementation of resource_alloc_functions_t:add_utilization()
+// Primitive implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__primitive_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
- GHashTable *utilization)
+pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization)
{
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (orig_rsc != NULL) && (utilization != NULL));
+
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
@@ -1474,7 +1558,7 @@ pcmk__primitive_add_utilization(const pe_resource_t *rsc,
* \return Epoch time corresponding to shutdown attribute if set or now if not
*/
static time_t
-shutdown_time(pe_node_t *node)
+shutdown_time(pcmk_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
time_t result = 0;
@@ -1499,8 +1583,8 @@ shutdown_time(pe_node_t *node)
static void
ban_if_not_locked(gpointer data, gpointer user_data)
{
- const pe_node_t *node = (const pe_node_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ const pcmk_node_t *node = (const pcmk_node_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) {
resource_location(rsc, node, -CRM_SCORE_INFINITY,
@@ -1508,15 +1592,19 @@ ban_if_not_locked(gpointer data, gpointer user_data)
}
}
-// Primitive implementation of resource_alloc_functions_t:shutdown_lock()
+// Primitive implementation of pcmk_assignment_methods_t:shutdown_lock()
void
-pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
+pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc)
{
- const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *class = NULL;
+
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
+
+ class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Fence devices and remote connections can't be locked
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
- || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
+ || pe__resource_is_remote_conn(rsc)) {
return;
}
@@ -1531,14 +1619,14 @@ pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
pe_rsc_info(rsc,
"Cancelling shutdown lock because %s is already active",
rsc->id);
- pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
+ pe__clear_resource_history(rsc, rsc->lock_node);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
- pe_node_t *node = rsc->running_on->data;
+ pcmk_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
@@ -1562,7 +1650,8 @@ pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, pe__node_name(rsc->lock_node),
(long long) lock_expiration);
- pe__update_recheck_time(++lock_expiration, rsc->cluster);
+ pe__update_recheck_time(++lock_expiration, rsc->cluster,
+ "shutdown lock expiration");
} else {
pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, pe__node_name(rsc->lock_node));
diff --git a/lib/pacemaker/pcmk_sched_probes.c b/lib/pacemaker/pcmk_sched_probes.c
index 919e523..e31e8d2 100644
--- a/lib/pacemaker/pcmk_sched_probes.c
+++ b/lib/pacemaker/pcmk_sched_probes.c
@@ -25,17 +25,17 @@
* \param[in] node Node that probe will run on
*/
static void
-add_expected_result(pe_action_t *probe, const pe_resource_t *rsc,
- const pe_node_t *node)
+add_expected_result(pcmk_action_t *probe, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node)
{
// Check whether resource is currently active on node
- pe_node_t *running = pe_find_node_id(rsc->running_on, node->details->id);
+ pcmk_node_t *running = pe_find_node_id(rsc->running_on, node->details->id);
// The expected result is what we think the resource's current state is
if (running == NULL) {
pe__add_action_expected_result(probe, CRM_EX_NOT_RUNNING);
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
pe__add_action_expected_result(probe, CRM_EX_PROMOTED);
}
}
@@ -50,12 +50,12 @@ add_expected_result(pe_action_t *probe, const pe_resource_t *rsc,
* \return true if any probe was created, otherwise false
*/
bool
-pcmk__probe_resource_list(GList *rscs, pe_node_t *node)
+pcmk__probe_resource_list(GList *rscs, pcmk_node_t *node)
{
bool any_created = false;
for (GList *iter = rscs; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->cmds->create_probe(rsc, node)) {
any_created = true;
@@ -72,15 +72,18 @@ pcmk__probe_resource_list(GList *rscs, pe_node_t *node)
* \param[in] rsc2 Resource that might be started
*/
static void
-probe_then_start(pe_resource_t *rsc1, pe_resource_t *rsc2)
+probe_then_start(pcmk_resource_t *rsc1, pcmk_resource_t *rsc2)
{
if ((rsc1->allocated_to != NULL)
&& (g_hash_table_lookup(rsc1->known_on,
rsc1->allocated_to->details->id) == NULL)) {
- pcmk__new_ordering(rsc1, pcmk__op_key(rsc1->id, RSC_STATUS, 0), NULL,
- rsc2, pcmk__op_key(rsc2->id, RSC_START, 0), NULL,
- pe_order_optional, rsc1->cluster);
+ pcmk__new_ordering(rsc1,
+ pcmk__op_key(rsc1->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ rsc2, pcmk__op_key(rsc2->id, PCMK_ACTION_START, 0),
+ NULL,
+ pcmk__ar_ordered, rsc1->cluster);
}
}
@@ -93,20 +96,20 @@ probe_then_start(pe_resource_t *rsc1, pe_resource_t *rsc2)
* \return true if guest resource will likely stop, otherwise false
*/
static bool
-guest_resource_will_stop(const pe_node_t *node)
+guest_resource_will_stop(const pcmk_node_t *node)
{
- const pe_resource_t *guest_rsc = node->details->remote_rsc->container;
+ const pcmk_resource_t *guest_rsc = node->details->remote_rsc->container;
/* Ideally, we'd check whether the guest has a required stop, but that
* information doesn't exist yet, so approximate it ...
*/
return node->details->remote_requires_reset
|| node->details->unclean
- || pcmk_is_set(guest_rsc->flags, pe_rsc_failed)
- || (guest_rsc->next_role == RSC_ROLE_STOPPED)
+ || pcmk_is_set(guest_rsc->flags, pcmk_rsc_failed)
+ || (guest_rsc->next_role == pcmk_role_stopped)
// Guest is moving
- || ((guest_rsc->role > RSC_ROLE_STOPPED)
+ || ((guest_rsc->role > pcmk_role_stopped)
&& (guest_rsc->allocated_to != NULL)
&& (pe_find_node(guest_rsc->running_on,
guest_rsc->allocated_to->details->uname) == NULL));
@@ -121,20 +124,20 @@ guest_resource_will_stop(const pe_node_t *node)
*
* \return Newly created probe action
*/
-static pe_action_t *
-probe_action(pe_resource_t *rsc, pe_node_t *node)
+static pcmk_action_t *
+probe_action(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- pe_action_t *probe = NULL;
- char *key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
+ pcmk_action_t *probe = NULL;
+ char *key = pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0);
crm_debug("Scheduling probe of %s %s on %s",
role2text(rsc->role), rsc->id, pe__node_name(node));
- probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE,
+ probe = custom_action(rsc, key, PCMK_ACTION_MONITOR, node, FALSE,
rsc->cluster);
- pe__clear_action_flags(probe, pe_action_optional);
+ pe__clear_action_flags(probe, pcmk_action_optional);
- pcmk__order_vs_unfence(rsc, node, probe, pe_order_optional);
+ pcmk__order_vs_unfence(rsc, node, probe, pcmk__ar_ordered);
add_expected_result(probe, rsc, node);
return probe;
}
@@ -151,17 +154,17 @@ probe_action(pe_resource_t *rsc, pe_node_t *node)
* \return true if any probe was created, otherwise false
*/
bool
-pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
+pcmk__probe_rsc_on_node(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- uint32_t flags = pe_order_optional;
- pe_action_t *probe = NULL;
- pe_node_t *allowed = NULL;
- pe_resource_t *top = uber_parent(rsc);
+ uint32_t flags = pcmk__ar_ordered;
+ pcmk_action_t *probe = NULL;
+ pcmk_node_t *allowed = NULL;
+ pcmk_resource_t *top = uber_parent(rsc);
const char *reason = NULL;
- CRM_CHECK((rsc != NULL) && (node != NULL), return false);
+ CRM_ASSERT((rsc != NULL) && (node != NULL));
- if (!pcmk_is_set(rsc->cluster->flags, pe_flag_startup_probes)) {
+ if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_probe_resources)) {
reason = "start-up probes are disabled";
goto no_probe;
}
@@ -193,7 +196,7 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
reason = "resource is inside a container";
goto no_probe;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
reason = "resource is orphaned";
goto no_probe;
@@ -213,7 +216,7 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
"on node";
goto no_probe;
- } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
+ } else if (allowed->rsc_discover_mode != pcmk_probe_exclusive) {
// ... but no constraint marks this node for discovery of resource
reason = "resource has exclusive discovery but is not enabled "
"on node";
@@ -224,15 +227,15 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
if (allowed == NULL) {
allowed = node;
}
- if (allowed->rsc_discover_mode == pe_discover_never) {
+ if (allowed->rsc_discover_mode == pcmk_probe_never) {
reason = "node has discovery disabled";
goto no_probe;
}
if (pe__is_guest_node(node)) {
- pe_resource_t *guest = node->details->remote_rsc->container;
+ pcmk_resource_t *guest = node->details->remote_rsc->container;
- if (guest->role == RSC_ROLE_STOPPED) {
+ if (guest->role == pcmk_role_stopped) {
// The guest is stopped, so we know no resource is active there
reason = "node's guest is stopped";
probe_then_start(guest, top);
@@ -242,9 +245,11 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
reason = "node's guest will stop";
// Order resource start after guest stop (in case it's restarting)
- pcmk__new_ordering(guest, pcmk__op_key(guest->id, RSC_STOP, 0),
- NULL, top, pcmk__op_key(top->id, RSC_START, 0),
- NULL, pe_order_optional, rsc->cluster);
+ pcmk__new_ordering(guest,
+ pcmk__op_key(guest->id, PCMK_ACTION_STOP, 0),
+ NULL, top,
+ pcmk__op_key(top->id, PCMK_ACTION_START, 0),
+ NULL, pcmk__ar_ordered, rsc->cluster);
goto no_probe;
}
}
@@ -264,17 +269,17 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
/* Prevent a start if the resource can't be probed, but don't cause the
* resource or entire clone to stop if already active.
*/
- if (!pcmk_is_set(probe->flags, pe_action_runnable)
+ if (!pcmk_is_set(probe->flags, pcmk_action_runnable)
&& (top->running_on == NULL)) {
- pe__set_order_flags(flags, pe_order_runnable_left);
+ pe__set_order_flags(flags, pcmk__ar_unrunnable_first_blocks);
}
// Start or reload after probing the resource
pcmk__new_ordering(rsc, NULL, probe,
- top, pcmk__op_key(top->id, RSC_START, 0), NULL,
+ top, pcmk__op_key(top->id, PCMK_ACTION_START, 0), NULL,
flags, rsc->cluster);
pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
- pe_order_optional, rsc->cluster);
+ pcmk__ar_ordered, rsc->cluster);
return true;
@@ -295,23 +300,23 @@ no_probe:
* \return true if \p probe should be ordered before \p then, otherwise false
*/
static bool
-probe_needed_before_action(const pe_action_t *probe, const pe_action_t *then)
+probe_needed_before_action(const pcmk_action_t *probe,
+ const pcmk_action_t *then)
{
// Probes on a node are performed after unfencing it, not before
- if (pcmk__str_eq(then->task, CRM_OP_FENCE, pcmk__str_casei)
- && (probe->node != NULL) && (then->node != NULL)
- && (probe->node->details == then->node->details)) {
+ if (pcmk__str_eq(then->task, PCMK_ACTION_STONITH, pcmk__str_none)
+ && pe__same_node(probe->node, then->node)) {
const char *op = g_hash_table_lookup(then->meta, "stonith_action");
- if (pcmk__str_eq(op, "on", pcmk__str_casei)) {
+ if (pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) {
return false;
}
}
// Probes should be done on a node before shutting it down
- if (pcmk__str_eq(then->task, CRM_OP_SHUTDOWN, pcmk__str_none)
+ if (pcmk__str_eq(then->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)
&& (probe->node != NULL) && (then->node != NULL)
- && (probe->node->details != then->node->details)) {
+ && !pe__same_node(probe->node, then->node)) {
return false;
}
@@ -330,21 +335,23 @@ probe_needed_before_action(const pe_action_t *probe, const pe_action_t *then)
* resource", add implicit "probe this resource then do something" equivalents
* so the relation is upheld until we know whether a stop is needed.
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-add_probe_orderings_for_stops(pe_working_set_t *data_set)
+add_probe_orderings_for_stops(pcmk_scheduler_t *scheduler)
{
- for (GList *iter = data_set->ordering_constraints; iter != NULL;
+ for (GList *iter = scheduler->ordering_constraints; iter != NULL;
iter = iter->next) {
pe__ordering_t *order = iter->data;
- uint32_t order_flags = pe_order_optional;
+ uint32_t order_flags = pcmk__ar_ordered;
GList *probes = NULL;
GList *then_actions = NULL;
+ pcmk_action_t *first = NULL;
+ pcmk_action_t *then = NULL;
// Skip disabled orderings
- if (order->flags == pe_order_none) {
+ if (order->flags == pcmk__ar_none) {
continue;
}
@@ -354,17 +361,20 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
}
// Skip invalid orderings (shouldn't be possible)
- if (((order->lh_action == NULL) && (order->lh_action_task == NULL)) ||
- ((order->rh_action == NULL) && (order->rh_action_task == NULL))) {
+ first = order->lh_action;
+ then = order->rh_action;
+ if (((first == NULL) && (order->lh_action_task == NULL))
+ || ((then == NULL) && (order->rh_action_task == NULL))) {
continue;
}
// Skip orderings for first actions other than stop
- if ((order->lh_action != NULL)
- && !pcmk__str_eq(order->lh_action->task, RSC_STOP, pcmk__str_none)) {
+ if ((first != NULL) && !pcmk__str_eq(first->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
continue;
- } else if ((order->lh_action == NULL)
- && !pcmk__ends_with(order->lh_action_task, "_" RSC_STOP "_0")) {
+ } else if ((first == NULL)
+ && !pcmk__ends_with(order->lh_action_task,
+ "_" PCMK_ACTION_STOP "_0")) {
continue;
}
@@ -375,41 +385,40 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
if ((order->rh_rsc != NULL)
&& (order->lh_rsc->container == order->rh_rsc)) {
- if ((order->rh_action != NULL)
- && pcmk__str_eq(order->rh_action->task, RSC_STOP,
- pcmk__str_none)) {
+ if ((then != NULL) && pcmk__str_eq(then->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
continue;
- } else if ((order->rh_action == NULL)
+ } else if ((then == NULL)
&& pcmk__ends_with(order->rh_action_task,
- "_" RSC_STOP "_0")) {
+ "_" PCMK_ACTION_STOP "_0")) {
continue;
}
}
// Preserve certain order options for future filtering
- if (pcmk_is_set(order->flags, pe_order_apply_first_non_migratable)) {
- pe__set_order_flags(order_flags,
- pe_order_apply_first_non_migratable);
+ if (pcmk_is_set(order->flags, pcmk__ar_if_first_unmigratable)) {
+ pe__set_order_flags(order_flags, pcmk__ar_if_first_unmigratable);
}
- if (pcmk_is_set(order->flags, pe_order_same_node)) {
- pe__set_order_flags(order_flags, pe_order_same_node);
+ if (pcmk_is_set(order->flags, pcmk__ar_if_on_same_node)) {
+ pe__set_order_flags(order_flags, pcmk__ar_if_on_same_node);
}
// Preserve certain order types for future filtering
- if ((order->flags == pe_order_anti_colocation)
- || (order->flags == pe_order_load)) {
+ if ((order->flags == pcmk__ar_if_required_on_same_node)
+ || (order->flags == pcmk__ar_if_on_same_node_or_target)) {
order_flags = order->flags;
}
// List all scheduled probes for the first resource
- probes = pe__resource_actions(order->lh_rsc, NULL, RSC_STATUS, FALSE);
+ probes = pe__resource_actions(order->lh_rsc, NULL, PCMK_ACTION_MONITOR,
+ FALSE);
if (probes == NULL) { // There aren't any
continue;
}
// List all relevant "then" actions
- if (order->rh_action != NULL) {
- then_actions = g_list_prepend(NULL, order->rh_action);
+ if (then != NULL) {
+ then_actions = g_list_prepend(NULL, then);
} else if (order->rh_rsc != NULL) {
then_actions = find_actions(order->rh_rsc->actions,
@@ -422,19 +431,19 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
crm_trace("Implying 'probe then' orderings for '%s then %s' "
"(id=%d, type=%.6x)",
- order->lh_action? order->lh_action->uuid : order->lh_action_task,
- order->rh_action? order->rh_action->uuid : order->rh_action_task,
+ ((first == NULL)? order->lh_action_task : first->uuid),
+ ((then == NULL)? order->rh_action_task : then->uuid),
order->id, order->flags);
for (GList *probe_iter = probes; probe_iter != NULL;
probe_iter = probe_iter->next) {
- pe_action_t *probe = (pe_action_t *) probe_iter->data;
+ pcmk_action_t *probe = (pcmk_action_t *) probe_iter->data;
for (GList *then_iter = then_actions; then_iter != NULL;
then_iter = then_iter->next) {
- pe_action_t *then = (pe_action_t *) then_iter->data;
+ pcmk_action_t *then = (pcmk_action_t *) then_iter->data;
if (probe_needed_before_action(probe, then)) {
order_actions(probe, then, order_flags);
@@ -458,53 +467,53 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
* \param[in,out] after 'then' action wrapper in the ordering
*/
static void
-add_start_orderings_for_probe(pe_action_t *probe, pe_action_wrapper_t *after)
+add_start_orderings_for_probe(pcmk_action_t *probe,
+ pcmk__related_action_t *after)
{
- uint32_t flags = pe_order_optional|pe_order_runnable_left;
+ uint32_t flags = pcmk__ar_ordered|pcmk__ar_unrunnable_first_blocks;
/* Although the ordering between the probe of the clone instance and the
* start of its parent has been added in pcmk__probe_rsc_on_node(), we
- * avoided enforcing `pe_order_runnable_left` order type for that as long as
- * any of the clone instances are running to prevent them from being
- * unexpectedly stopped.
+ * avoided enforcing `pcmk__ar_unrunnable_first_blocks` order type for that
+ * as long as any of the clone instances are running to prevent them from
+ * being unexpectedly stopped.
*
* On the other hand, we still need to prevent any inactive instances from
* starting unless the probe is runnable so that we don't risk starting too
* many instances before we know the state on all nodes.
*/
- if (after->action->rsc->variant <= pe_group
- || pcmk_is_set(probe->flags, pe_action_runnable)
+ if ((after->action->rsc->variant <= pcmk_rsc_variant_group)
+ || pcmk_is_set(probe->flags, pcmk_action_runnable)
// The order type is already enforced for its parent.
- || pcmk_is_set(after->type, pe_order_runnable_left)
+ || pcmk_is_set(after->type, pcmk__ar_unrunnable_first_blocks)
|| (pe__const_top_resource(probe->rsc, false) != after->action->rsc)
- || !pcmk__str_eq(after->action->task, RSC_START, pcmk__str_none)) {
+ || !pcmk__str_eq(after->action->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
return;
}
- crm_trace("Adding probe start orderings for '%s@%s (%s) "
+ crm_trace("Adding probe start orderings for 'unrunnable %s@%s "
"then instances of %s@%s'",
probe->uuid, pe__node_name(probe->node),
- pcmk_is_set(probe->flags, pe_action_runnable)? "runnable" : "unrunnable",
after->action->uuid, pe__node_name(after->action->node));
for (GList *then_iter = after->action->actions_after; then_iter != NULL;
then_iter = then_iter->next) {
- pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data;
+ pcmk__related_action_t *then = then_iter->data;
if (then->action->rsc->running_on
|| (pe__const_top_resource(then->action->rsc, false)
!= after->action->rsc)
- || !pcmk__str_eq(then->action->task, RSC_START, pcmk__str_none)) {
+ || !pcmk__str_eq(then->action->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
continue;
}
- crm_trace("Adding probe start ordering for '%s@%s (%s) "
+ crm_trace("Adding probe start ordering for 'unrunnable %s@%s "
"then %s@%s' (type=%#.6x)",
probe->uuid, pe__node_name(probe->node),
- pcmk_is_set(probe->flags, pe_action_runnable)? "runnable" : "unrunnable",
- then->action->uuid, pe__node_name(then->action->node),
- flags);
+ then->action->uuid, pe__node_name(then->action->node), flags);
/* Prevent the instance from starting if the instance can't, but don't
* cause any other intances to stop if already active.
@@ -526,28 +535,26 @@ add_start_orderings_for_probe(pe_action_t *probe, pe_action_wrapper_t *after)
*
* \param[in,out] probe Probe as 'first' action in an ordering
* \param[in,out] after 'then' action in the ordering
- * \param[in,out] data_set Cluster working set
*/
static void
-add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
- pe_working_set_t *data_set)
+add_restart_orderings_for_probe(pcmk_action_t *probe, pcmk_action_t *after)
{
GList *iter = NULL;
bool interleave = false;
- pe_resource_t *compatible_rsc = NULL;
+ pcmk_resource_t *compatible_rsc = NULL;
// Validate that this is a resource probe followed by some action
if ((after == NULL) || (probe == NULL) || (probe->rsc == NULL)
- || (probe->rsc->variant != pe_native)
- || !pcmk__str_eq(probe->task, RSC_STATUS, pcmk__str_casei)) {
+ || (probe->rsc->variant != pcmk_rsc_variant_primitive)
+ || !pcmk__str_eq(probe->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
return;
}
// Avoid running into any possible loop
- if (pcmk_is_set(after->flags, pe_action_tracking)) {
+ if (pcmk_is_set(after->flags, pcmk_action_detect_loop)) {
return;
}
- pe__set_action_flags(after, pe_action_tracking);
+ pe__set_action_flags(after, pcmk_action_detect_loop);
crm_trace("Adding probe restart orderings for '%s@%s then %s@%s'",
probe->uuid, pe__node_name(probe->node),
@@ -556,26 +563,28 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
/* Add restart orderings if "then" is for a different primitive.
* Orderings for collective resources will be added later.
*/
- if ((after->rsc != NULL) && (after->rsc->variant == pe_native)
+ if ((after->rsc != NULL)
+ && (after->rsc->variant == pcmk_rsc_variant_primitive)
&& (probe->rsc != after->rsc)) {
GList *then_actions = NULL;
- if (pcmk__str_eq(after->task, RSC_START, pcmk__str_casei)) {
- then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP,
- FALSE);
+ if (pcmk__str_eq(after->task, PCMK_ACTION_START, pcmk__str_none)) {
+ then_actions = pe__resource_actions(after->rsc, NULL,
+ PCMK_ACTION_STOP, FALSE);
- } else if (pcmk__str_eq(after->task, RSC_PROMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(after->task, PCMK_ACTION_PROMOTE,
+ pcmk__str_none)) {
then_actions = pe__resource_actions(after->rsc, NULL,
- RSC_DEMOTE, FALSE);
+ PCMK_ACTION_DEMOTE, FALSE);
}
for (iter = then_actions; iter != NULL; iter = iter->next) {
- pe_action_t *then = (pe_action_t *) iter->data;
+ pcmk_action_t *then = (pcmk_action_t *) iter->data;
// Skip pseudo-actions (for example, those implied by fencing)
- if (!pcmk_is_set(then->flags, pe_action_pseudo)) {
- order_actions(probe, then, pe_order_optional);
+ if (!pcmk_is_set(then->flags, pcmk_action_pseudo)) {
+ order_actions(probe, then, pcmk__ar_ordered);
}
}
g_list_free(then_actions);
@@ -585,7 +594,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
* to add orderings only for the relevant instance.
*/
if ((after->rsc != NULL)
- && (after->rsc->variant > pe_group)) {
+ && (after->rsc->variant > pcmk_rsc_variant_group)) {
const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
XML_RSC_ATTR_INTERLEAVE);
@@ -593,7 +602,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
if (interleave) {
compatible_rsc = pcmk__find_compatible_instance(probe->rsc,
after->rsc,
- RSC_ROLE_UNKNOWN,
+ pcmk_role_unknown,
false);
}
}
@@ -603,29 +612,30 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
* ordered before its individual instances' actions.
*/
for (iter = after->actions_after; iter != NULL; iter = iter->next) {
- pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) iter->data;
+ pcmk__related_action_t *after_wrapper = iter->data;
- /* pe_order_implies_then is the reason why a required A.start
+ /* pcmk__ar_first_implies_then is the reason why a required A.start
* implies/enforces B.start to be required too, which is the cause of
* B.restart/re-promote.
*
- * Not sure about pe_order_implies_then_on_node though. It's now only
- * used for unfencing case, which tends to introduce transition
+ * Not sure about pcmk__ar_first_implies_same_node_then though. It's now
+ * only used for unfencing case, which tends to introduce transition
* loops...
*/
- if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) {
+ if (!pcmk_is_set(after_wrapper->type, pcmk__ar_first_implies_then)) {
/* The order type between a group/clone and its child such as
* B.start-> B_child.start is:
- * pe_order_implies_first_printed | pe_order_runnable_left
+ * pcmk__ar_then_implies_first_graphed
+ * |pcmk__ar_unrunnable_first_blocks
*
* Proceed through the ordering chain and build dependencies with
* its children.
*/
if ((after->rsc == NULL)
- || (after->rsc->variant < pe_group)
+ || (after->rsc->variant < pcmk_rsc_variant_group)
|| (probe->rsc->parent == after->rsc)
|| (after_wrapper->action->rsc == NULL)
- || (after_wrapper->action->rsc->variant > pe_group)
+ || (after_wrapper->action->rsc->variant > pcmk_rsc_variant_group)
|| (after->rsc != after_wrapper->action->rsc->parent)) {
continue;
}
@@ -633,7 +643,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
/* Proceed to the children of a group or a non-interleaved clone.
* For an interleaved clone, proceed only to the relevant child.
*/
- if ((after->rsc->variant > pe_group) && interleave
+ if ((after->rsc->variant > pcmk_rsc_variant_group) && interleave
&& ((compatible_rsc == NULL)
|| (compatible_rsc != after_wrapper->action->rsc))) {
continue;
@@ -647,7 +657,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
pe__node_name(after_wrapper->action->node),
after_wrapper->type);
- add_restart_orderings_for_probe(probe, after_wrapper->action, data_set);
+ add_restart_orderings_for_probe(probe, after_wrapper->action);
}
}
@@ -655,17 +665,15 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
* \internal
* \brief Clear the tracking flag on all scheduled actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-clear_actions_tracking_flag(pe_working_set_t *data_set)
+clear_actions_tracking_flag(pcmk_scheduler_t *scheduler)
{
- GList *gIter = NULL;
-
- for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
- pe__clear_action_flags(action, pe_action_tracking);
+ pe__clear_action_flags(action, pcmk_action_detect_loop);
}
}
@@ -673,37 +681,37 @@ clear_actions_tracking_flag(pe_working_set_t *data_set)
* \internal
* \brief Add start and restart orderings for probes scheduled for a resource
*
- * \param[in,out] rsc Resource whose probes should be ordered
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] data Resource whose probes should be ordered
+ * \param[in] user_data Unused
*/
static void
-add_start_restart_orderings_for_rsc(pe_resource_t *rsc,
- pe_working_set_t *data_set)
+add_start_restart_orderings_for_rsc(gpointer data, gpointer user_data)
{
+ pcmk_resource_t *rsc = data;
GList *probes = NULL;
// For collective resources, order each instance recursively
- if (rsc->variant != pe_native) {
- g_list_foreach(rsc->children,
- (GFunc) add_start_restart_orderings_for_rsc, data_set);
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
+ g_list_foreach(rsc->children, add_start_restart_orderings_for_rsc,
+ NULL);
return;
}
// Find all probes for given resource
- probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
+ probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
// Add probe restart orderings for each probe found
for (GList *iter = probes; iter != NULL; iter = iter->next) {
- pe_action_t *probe = (pe_action_t *) iter->data;
+ pcmk_action_t *probe = (pcmk_action_t *) iter->data;
for (GList *then_iter = probe->actions_after; then_iter != NULL;
then_iter = then_iter->next) {
- pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data;
+ pcmk__related_action_t *then = then_iter->data;
add_start_orderings_for_probe(probe, then);
- add_restart_orderings_for_probe(probe, then->action, data_set);
- clear_actions_tracking_flag(data_set);
+ add_restart_orderings_for_probe(probe, then->action);
+ clear_actions_tracking_flag(rsc->cluster);
}
}
@@ -714,12 +722,12 @@ add_start_restart_orderings_for_rsc(pe_resource_t *rsc,
* \internal
* \brief Add "A then probe B" orderings for "A then B" orderings
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This function is currently disabled (see next comment).
*/
static void
-order_then_probes(pe_working_set_t *data_set)
+order_then_probes(pcmk_scheduler_t *scheduler)
{
#if 0
/* Given an ordering "A then B", we would prefer to wait for A to be started
@@ -751,14 +759,14 @@ order_then_probes(pe_working_set_t *data_set)
* narrowing use case suggests that this code should remain disabled until
* someone gets smarter.
*/
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
GList *actions = NULL;
GList *probes = NULL;
- actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
+ actions = pe__resource_actions(rsc, NULL, PCMK_ACTION_START, FALSE);
if (actions) {
start = actions->data;
@@ -770,22 +778,22 @@ order_then_probes(pe_working_set_t *data_set)
continue;
}
- probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
+ probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
for (actions = start->actions_before; actions != NULL;
actions = actions->next) {
- pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data;
+ pcmk__related_action_t *before = actions->data;
- pe_action_t *first = before->action;
- pe_resource_t *first_rsc = first->rsc;
+ pcmk_action_t *first = before->action;
+ pcmk_resource_t *first_rsc = first->rsc;
if (first->required_runnable_before) {
for (GList *clone_actions = first->actions_before;
clone_actions != NULL;
clone_actions = clone_actions->next) {
- before = (pe_action_wrapper_t *) clone_actions->data;
+ before = clone_actions->data;
crm_trace("Testing '%s then %s' for %s",
first->uuid, before->action->uuid, start->uuid);
@@ -795,7 +803,8 @@ order_then_probes(pe_working_set_t *data_set)
break;
}
- } else if (!pcmk__str_eq(first->task, RSC_START, pcmk__str_none)) {
+ } else if (!pcmk__str_eq(first->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
}
@@ -819,10 +828,10 @@ order_then_probes(pe_working_set_t *data_set)
for (GList *probe_iter = probes; probe_iter != NULL;
probe_iter = probe_iter->next) {
- pe_action_t *probe = (pe_action_t *) probe_iter->data;
+ pcmk_action_t *probe = (pcmk_action_t *) probe_iter->data;
crm_err("Ordering %s before %s", first->uuid, probe->uuid);
- order_actions(first, probe, pe_order_optional);
+ order_actions(first, probe, pcmk__ar_ordered);
}
}
}
@@ -830,35 +839,35 @@ order_then_probes(pe_working_set_t *data_set)
}
void
-pcmk__order_probes(pe_working_set_t *data_set)
+pcmk__order_probes(pcmk_scheduler_t *scheduler)
{
// Add orderings for "probe then X"
- g_list_foreach(data_set->resources,
- (GFunc) add_start_restart_orderings_for_rsc, data_set);
- add_probe_orderings_for_stops(data_set);
+ g_list_foreach(scheduler->resources, add_start_restart_orderings_for_rsc,
+ NULL);
+ add_probe_orderings_for_stops(scheduler);
- order_then_probes(data_set);
+ order_then_probes(scheduler);
}
/*!
* \internal
* \brief Schedule any probes needed
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing of failed remote nodes.
*/
void
-pcmk__schedule_probes(pe_working_set_t *data_set)
+pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
{
// Schedule probes on each node in the cluster as needed
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
const char *probed = NULL;
if (!node->details->online) { // Don't probe offline nodes
if (pcmk__is_failed_remote_node(node)) {
- pe_fence_node(data_set, node,
+ pe_fence_node(scheduler, node,
"the connection is unrecoverable", FALSE);
}
continue;
@@ -878,19 +887,18 @@ pcmk__schedule_probes(pe_working_set_t *data_set)
*/
probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
if (probed != NULL && crm_is_true(probed) == FALSE) {
- pe_action_t *probe_op = NULL;
+ pcmk_action_t *probe_op = NULL;
probe_op = custom_action(NULL,
crm_strdup_printf("%s-%s", CRM_OP_REPROBE,
node->details->uname),
- CRM_OP_REPROBE, node, FALSE, TRUE,
- data_set);
+ CRM_OP_REPROBE, node, FALSE, scheduler);
add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT,
XML_BOOLEAN_TRUE);
continue;
}
// Probe each resource in the cluster on this node, as needed
- pcmk__probe_resource_list(data_set->resources, node);
+ pcmk__probe_resource_list(scheduler->resources, node);
}
}
diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c
index d12d017..7f81a13 100644
--- a/lib/pacemaker/pcmk_sched_promotable.c
+++ b/lib/pacemaker/pcmk_sched_promotable.c
@@ -23,19 +23,22 @@
* \param[in,out] last Previous instance ordered (NULL if \p child is first)
*/
static void
-order_instance_promotion(pe_resource_t *clone, pe_resource_t *child,
- pe_resource_t *last)
+order_instance_promotion(pcmk_resource_t *clone, pcmk_resource_t *child,
+ pcmk_resource_t *last)
{
// "Promote clone" -> promote instance -> "clone promoted"
- pcmk__order_resource_actions(clone, RSC_PROMOTE, child, RSC_PROMOTE,
- pe_order_optional);
- pcmk__order_resource_actions(child, RSC_PROMOTE, clone, RSC_PROMOTED,
- pe_order_optional);
+ pcmk__order_resource_actions(clone, PCMK_ACTION_PROMOTE,
+ child, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(child, PCMK_ACTION_PROMOTE,
+ clone, PCMK_ACTION_PROMOTED,
+ pcmk__ar_ordered);
// If clone is ordered, order this instance relative to last
if ((last != NULL) && pe__clone_is_ordered(clone)) {
- pcmk__order_resource_actions(last, RSC_PROMOTE, child, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(last, PCMK_ACTION_PROMOTE,
+ child, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
}
}
@@ -48,19 +51,21 @@ order_instance_promotion(pe_resource_t *clone, pe_resource_t *child,
* \param[in] last Previous instance ordered (NULL if \p child is first)
*/
static void
-order_instance_demotion(pe_resource_t *clone, pe_resource_t *child,
- pe_resource_t *last)
+order_instance_demotion(pcmk_resource_t *clone, pcmk_resource_t *child,
+ pcmk_resource_t *last)
{
// "Demote clone" -> demote instance -> "clone demoted"
- pcmk__order_resource_actions(clone, RSC_DEMOTE, child, RSC_DEMOTE,
- pe_order_implies_first_printed);
- pcmk__order_resource_actions(child, RSC_DEMOTE, clone, RSC_DEMOTED,
- pe_order_implies_then_printed);
+ pcmk__order_resource_actions(clone, PCMK_ACTION_DEMOTE, child,
+ PCMK_ACTION_DEMOTE,
+ pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE,
+ clone, PCMK_ACTION_DEMOTED,
+ pcmk__ar_first_implies_then_graphed);
// If clone is ordered, order this instance relative to last
if ((last != NULL) && pe__clone_is_ordered(clone)) {
- pcmk__order_resource_actions(child, RSC_DEMOTE, last, RSC_DEMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE, last,
+ PCMK_ACTION_DEMOTE, pcmk__ar_ordered);
}
}
@@ -73,32 +78,35 @@ order_instance_demotion(pe_resource_t *clone, pe_resource_t *child,
* \param[out] promoting If \p rsc will be promoted, this will be set to true
*/
static void
-check_for_role_change(const pe_resource_t *rsc, bool *demoting, bool *promoting)
+check_for_role_change(const pcmk_resource_t *rsc, bool *demoting,
+ bool *promoting)
{
const GList *iter = NULL;
// If this is a cloned group, check group members recursively
if (rsc->children != NULL) {
for (iter = rsc->children; iter != NULL; iter = iter->next) {
- check_for_role_change((const pe_resource_t *) iter->data,
+ check_for_role_change((const pcmk_resource_t *) iter->data,
demoting, promoting);
}
return;
}
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- const pe_action_t *action = (const pe_action_t *) iter->data;
+ const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
if (*promoting && *demoting) {
return;
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
continue;
- } else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_none)) {
+ } else if (pcmk__str_eq(PCMK_ACTION_DEMOTE, action->task,
+ pcmk__str_none)) {
*demoting = true;
- } else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_none)) {
+ } else if (pcmk__str_eq(PCMK_ACTION_PROMOTE, action->task,
+ pcmk__str_none)) {
*promoting = true;
}
}
@@ -117,28 +125,29 @@ check_for_role_change(const pe_resource_t *rsc, bool *demoting, bool *promoting)
* \param[in] chosen Node where \p child will be placed
*/
static void
-apply_promoted_locations(pe_resource_t *child,
+apply_promoted_locations(pcmk_resource_t *child,
const GList *location_constraints,
- const pe_node_t *chosen)
+ const pcmk_node_t *chosen)
{
for (const GList *iter = location_constraints; iter; iter = iter->next) {
const pe__location_t *location = iter->data;
- pe_node_t *weighted_node = NULL;
+ const pcmk_node_t *constraint_node = NULL;
- if (location->role_filter == RSC_ROLE_PROMOTED) {
- weighted_node = pe_find_node_id(location->node_list_rh,
- chosen->details->id);
+ if (location->role_filter == pcmk_role_promoted) {
+ constraint_node = pe_find_node_id(location->node_list_rh,
+ chosen->details->id);
}
- if (weighted_node != NULL) {
+ if (constraint_node != NULL) {
int new_priority = pcmk__add_scores(child->priority,
- weighted_node->weight);
+ constraint_node->weight);
pe_rsc_trace(child,
"Applying location %s to %s promotion priority on %s: "
"%s + %s = %s",
- location->id, child->id, pe__node_name(weighted_node),
+ location->id, child->id,
+ pe__node_name(constraint_node),
pcmk_readable_score(child->priority),
- pcmk_readable_score(weighted_node->weight),
+ pcmk_readable_score(constraint_node->weight),
pcmk_readable_score(new_priority));
child->priority = new_priority;
}
@@ -153,16 +162,16 @@ apply_promoted_locations(pe_resource_t *child,
*
* \return Node that \p rsc will be promoted on, or NULL if none
*/
-static pe_node_t *
-node_to_be_promoted_on(const pe_resource_t *rsc)
+static pcmk_node_t *
+node_to_be_promoted_on(const pcmk_resource_t *rsc)
{
- pe_node_t *node = NULL;
- pe_node_t *local_node = NULL;
- const pe_resource_t *parent = NULL;
+ pcmk_node_t *node = NULL;
+ pcmk_node_t *local_node = NULL;
+ const pcmk_resource_t *parent = NULL;
// If this is a cloned group, bail if any group member can't be promoted
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (node_to_be_promoted_on(child) == NULL) {
pe_rsc_trace(rsc,
@@ -178,8 +187,8 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
rsc->id);
return NULL;
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ if (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted) {
crm_notice("Unmanaged instance %s will be left promoted on %s",
rsc->id, pe__node_name(node));
} else {
@@ -202,14 +211,14 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
}
parent = pe__const_top_resource(rsc, false);
- local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id);
+ local_node = g_hash_table_lookup(parent->allowed_nodes, node->details->id);
if (local_node == NULL) {
- /* It should not be possible for the scheduler to have allocated the
+ /* It should not be possible for the scheduler to have assigned the
* instance to a node where its parent is not allowed, but it's good to
* have a fail-safe.
*/
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_warn("%s can't be promoted because %s is not allowed on %s "
"(scheduler bug?)",
rsc->id, parent->id, pe__node_name(node));
@@ -217,7 +226,7 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
return NULL;
} else if ((local_node->count >= pe__clone_promoted_node_max(parent))
- && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc,
"%s can't be promoted because %s has "
"maximum promoted instances already",
@@ -242,11 +251,11 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
static gint
cmp_promotable_instance(gconstpointer a, gconstpointer b)
{
- const pe_resource_t *rsc1 = (const pe_resource_t *) a;
- const pe_resource_t *rsc2 = (const pe_resource_t *) b;
+ const pcmk_resource_t *rsc1 = (const pcmk_resource_t *) a;
+ const pcmk_resource_t *rsc2 = (const pcmk_resource_t *) b;
- enum rsc_role_e role1 = RSC_ROLE_UNKNOWN;
- enum rsc_role_e role2 = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e role1 = pcmk_role_unknown;
+ enum rsc_role_e role2 = pcmk_role_unknown;
CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
@@ -288,23 +297,23 @@ cmp_promotable_instance(gconstpointer a, gconstpointer b)
/*!
* \internal
- * \brief Add a promotable clone instance's sort index to its node's weight
+ * \brief Add a promotable clone instance's sort index to its node's score
*
* Add a promotable clone instance's sort index (which sums its promotion
* preferences and scores of relevant location constraints for the promoted
- * role) to the node weight of the instance's allocated node.
+ * role) to the node score of the instance's assigned node.
*
* \param[in] data Promotable clone instance
* \param[in,out] user_data Clone parent of \p data
*/
static void
-add_sort_index_to_node_weight(gpointer data, gpointer user_data)
+add_sort_index_to_node_score(gpointer data, gpointer user_data)
{
- const pe_resource_t *child = (const pe_resource_t *) data;
- pe_resource_t *clone = (pe_resource_t *) user_data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) data;
+ pcmk_resource_t *clone = (pcmk_resource_t *) user_data;
- pe_node_t *node = NULL;
- const pe_node_t *chosen = NULL;
+ pcmk_node_t *node = NULL;
+ const pcmk_node_t *chosen = NULL;
if (child->sort_index < 0) {
pe_rsc_trace(clone, "Not adding sort index of %s: negative", child->id);
@@ -317,8 +326,7 @@ add_sort_index_to_node_weight(gpointer data, gpointer user_data)
return;
}
- node = (pe_node_t *) pe_hash_table_lookup(clone->allowed_nodes,
- chosen->details->id);
+ node = g_hash_table_lookup(clone->allowed_nodes, chosen->details->id);
CRM_ASSERT(node != NULL);
node->weight = pcmk__add_scores(child->sort_index, node->weight);
@@ -330,7 +338,7 @@ add_sort_index_to_node_weight(gpointer data, gpointer user_data)
/*!
* \internal
- * \brief Apply colocation to dependent's node weights if for promoted role
+ * \brief Apply colocation to dependent's node scores if for promoted role
*
* \param[in,out] data Colocation constraint to apply
* \param[in,out] user_data Promotable clone that is constraint's dependent
@@ -338,31 +346,30 @@ add_sort_index_to_node_weight(gpointer data, gpointer user_data)
static void
apply_coloc_to_dependent(gpointer data, gpointer user_data)
{
- pcmk__colocation_t *constraint = (pcmk__colocation_t *) data;
- pe_resource_t *clone = (pe_resource_t *) user_data;
- pe_resource_t *primary = constraint->primary;
+ pcmk__colocation_t *colocation = data;
+ pcmk_resource_t *clone = user_data;
+ pcmk_resource_t *primary = colocation->primary;
uint32_t flags = pcmk__coloc_select_default;
- float factor = constraint->score / (float) INFINITY;
+ float factor = colocation->score / (float) INFINITY;
- if (constraint->dependent_role != RSC_ROLE_PROMOTED) {
+ if (colocation->dependent_role != pcmk_role_promoted) {
return;
}
- if (constraint->score < INFINITY) {
+ if (colocation->score < INFINITY) {
flags = pcmk__coloc_select_active;
}
pe_rsc_trace(clone, "Applying colocation %s (promoted %s with %s) @%s",
- constraint->id, constraint->dependent->id,
- constraint->primary->id,
- pcmk_readable_score(constraint->score));
- primary->cmds->add_colocated_node_scores(primary, clone->id,
- &clone->allowed_nodes,
- constraint->node_attribute, factor,
- flags);
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id,
+ pcmk_readable_score(colocation->score));
+ primary->cmds->add_colocated_node_scores(primary, clone, clone->id,
+ &clone->allowed_nodes, colocation,
+ factor, flags);
}
/*!
* \internal
- * \brief Apply colocation to primary's node weights if for promoted role
+ * \brief Apply colocation to primary's node scores if for promoted role
*
* \param[in,out] data Colocation constraint to apply
* \param[in,out] user_data Promotable clone that is constraint's primary
@@ -370,45 +377,44 @@ apply_coloc_to_dependent(gpointer data, gpointer user_data)
static void
apply_coloc_to_primary(gpointer data, gpointer user_data)
{
- pcmk__colocation_t *constraint = (pcmk__colocation_t *) data;
- pe_resource_t *clone = (pe_resource_t *) user_data;
- pe_resource_t *dependent = constraint->dependent;
- const float factor = constraint->score / (float) INFINITY;
+ pcmk__colocation_t *colocation = data;
+ pcmk_resource_t *clone = user_data;
+ pcmk_resource_t *dependent = colocation->dependent;
+ const float factor = colocation->score / (float) INFINITY;
const uint32_t flags = pcmk__coloc_select_active
|pcmk__coloc_select_nonnegative;
- if ((constraint->primary_role != RSC_ROLE_PROMOTED)
- || !pcmk__colocation_has_influence(constraint, NULL)) {
+ if ((colocation->primary_role != pcmk_role_promoted)
+ || !pcmk__colocation_has_influence(colocation, NULL)) {
return;
}
pe_rsc_trace(clone, "Applying colocation %s (%s with promoted %s) @%s",
- constraint->id, constraint->dependent->id,
- constraint->primary->id,
- pcmk_readable_score(constraint->score));
- dependent->cmds->add_colocated_node_scores(dependent, clone->id,
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id,
+ pcmk_readable_score(colocation->score));
+ dependent->cmds->add_colocated_node_scores(dependent, clone, clone->id,
&clone->allowed_nodes,
- constraint->node_attribute,
- factor, flags);
+ colocation, factor, flags);
}
/*!
* \internal
- * \brief Set clone instance's sort index to its node's weight
+ * \brief Set clone instance's sort index to its node's score
*
* \param[in,out] data Promotable clone instance
* \param[in] user_data Parent clone of \p data
*/
static void
-set_sort_index_to_node_weight(gpointer data, gpointer user_data)
+set_sort_index_to_node_score(gpointer data, gpointer user_data)
{
- pe_resource_t *child = (pe_resource_t *) data;
- const pe_resource_t *clone = (const pe_resource_t *) user_data;
+ pcmk_resource_t *child = (pcmk_resource_t *) data;
+ const pcmk_resource_t *clone = (const pcmk_resource_t *) user_data;
- pe_node_t *chosen = child->fns->location(child, NULL, FALSE);
+ pcmk_node_t *chosen = child->fns->location(child, NULL, FALSE);
- if (!pcmk_is_set(child->flags, pe_rsc_managed)
- && (child->next_role == RSC_ROLE_PROMOTED)) {
+ if (!pcmk_is_set(child->flags, pcmk_rsc_managed)
+ && (child->next_role == pcmk_role_promoted)) {
child->sort_index = INFINITY;
pe_rsc_trace(clone,
"Final sort index for %s is INFINITY (unmanaged promoted)",
@@ -416,18 +422,17 @@ set_sort_index_to_node_weight(gpointer data, gpointer user_data)
} else if ((chosen == NULL) || (child->sort_index < 0)) {
pe_rsc_trace(clone,
- "Final sort index for %s is %d (ignoring node weight)",
+ "Final sort index for %s is %d (ignoring node score)",
child->id, child->sort_index);
} else {
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = g_hash_table_lookup(clone->allowed_nodes,
+ chosen->details->id);
- node = pe_hash_table_lookup(clone->allowed_nodes, chosen->details->id);
CRM_ASSERT(node != NULL);
-
child->sort_index = node->weight;
pe_rsc_trace(clone,
- "Merging weights for %s: final sort index for %s is %d",
+ "Adding scores for %s: final sort index for %s is %d",
clone->id, child->id, child->sort_index);
}
}
@@ -439,44 +444,48 @@ set_sort_index_to_node_weight(gpointer data, gpointer user_data)
* \param[in,out] clone Promotable clone to sort
*/
static void
-sort_promotable_instances(pe_resource_t *clone)
+sort_promotable_instances(pcmk_resource_t *clone)
{
- if (pe__set_clone_flag(clone, pe__clone_promotion_constrained)
+ GList *colocations = NULL;
+
+ if (pe__set_clone_flag(clone, pcmk__clone_promotion_constrained)
== pcmk_rc_already) {
return;
}
- pe__set_resource_flags(clone, pe_rsc_merging);
+ pe__set_resource_flags(clone, pcmk_rsc_updating_nodes);
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
pe_rsc_trace(clone,
- "Merging weights for %s: initial sort index for %s is %d",
+ "Adding scores for %s: initial sort index for %s is %d",
clone->id, child->id, child->sort_index);
}
- pe__show_node_weights(true, clone, "Before", clone->allowed_nodes,
- clone->cluster);
+ pe__show_node_scores(true, clone, "Before", clone->allowed_nodes,
+ clone->cluster);
- /* Because the this_with_colocations() and with_this_colocations() methods
- * boil down to copies of rsc_cons and rsc_cons_lhs for clones, we can use
- * those here directly for efficiency.
- */
- g_list_foreach(clone->children, add_sort_index_to_node_weight, clone);
- g_list_foreach(clone->rsc_cons, apply_coloc_to_dependent, clone);
- g_list_foreach(clone->rsc_cons_lhs, apply_coloc_to_primary, clone);
+ g_list_foreach(clone->children, add_sort_index_to_node_score, clone);
+
+ colocations = pcmk__this_with_colocations(clone);
+ g_list_foreach(colocations, apply_coloc_to_dependent, clone);
+ g_list_free(colocations);
+
+ colocations = pcmk__with_this_colocations(clone);
+ g_list_foreach(colocations, apply_coloc_to_primary, clone);
+ g_list_free(colocations);
// Ban resource from all nodes if it needs a ticket but doesn't have it
pcmk__require_promotion_tickets(clone);
- pe__show_node_weights(true, clone, "After", clone->allowed_nodes,
- clone->cluster);
+ pe__show_node_scores(true, clone, "After", clone->allowed_nodes,
+ clone->cluster);
- // Reset sort indexes to final node weights
- g_list_foreach(clone->children, set_sort_index_to_node_weight, clone);
+ // Reset sort indexes to final node scores
+ g_list_foreach(clone->children, set_sort_index_to_node_score, clone);
// Finally, sort instances in descending order of promotion priority
clone->children = g_list_sort(clone->children, cmp_promotable_instance);
- pe__clear_resource_flags(clone, pe_rsc_merging);
+ pe__clear_resource_flags(clone, pcmk_rsc_updating_nodes);
}
/*!
@@ -489,17 +498,18 @@ sort_promotable_instances(pe_resource_t *clone)
*
* \return
*/
-static pe_resource_t *
-find_active_anon_instance(const pe_resource_t *clone, const char *id,
- const pe_node_t *node)
+static pcmk_resource_t *
+find_active_anon_instance(const pcmk_resource_t *clone, const char *id,
+ const pcmk_node_t *node)
{
for (GList *iter = clone->children; iter; iter = iter->next) {
- pe_resource_t *child = iter->data;
- pe_resource_t *active = NULL;
+ pcmk_resource_t *child = iter->data;
+ pcmk_resource_t *active = NULL;
// Use ->find_rsc() in case this is a cloned group
active = clone->fns->find_rsc(child, id, node,
- pe_find_clone|pe_find_current);
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node);
if (active != NULL) {
return active;
}
@@ -518,16 +528,17 @@ find_active_anon_instance(const pe_resource_t *clone, const char *id,
* otherwise false
*/
static bool
-anonymous_known_on(const pe_resource_t *clone, const char *id,
- const pe_node_t *node)
+anonymous_known_on(const pcmk_resource_t *clone, const char *id,
+ const pcmk_node_t *node)
{
for (GList *iter = clone->children; iter; iter = iter->next) {
- pe_resource_t *child = iter->data;
+ pcmk_resource_t *child = iter->data;
/* Use ->find_rsc() because this might be a cloned group, and knowing
* that other members of the group are known here implies nothing.
*/
- child = clone->fns->find_rsc(child, id, NULL, pe_find_clone);
+ child = clone->fns->find_rsc(child, id, NULL,
+ pcmk_rsc_match_clone_only);
CRM_LOG_ASSERT(child != NULL);
if (child != NULL) {
if (g_hash_table_lookup(child->known_on, node->details->id)) {
@@ -548,10 +559,10 @@ anonymous_known_on(const pe_resource_t *clone, const char *id,
* \return true if \p node is allowed to run \p rsc, otherwise false
*/
static bool
-is_allowed(const pe_resource_t *rsc, const pe_node_t *node)
+is_allowed(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
- node->details->id);
+ pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id);
return (allowed != NULL) && (allowed->weight >= 0);
}
@@ -566,15 +577,15 @@ is_allowed(const pe_resource_t *rsc, const pe_node_t *node)
* otherwise false
*/
static bool
-promotion_score_applies(const pe_resource_t *rsc, const pe_node_t *node)
+promotion_score_applies(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
char *id = clone_strip(rsc->id);
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
- pe_resource_t *active = NULL;
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
+ pcmk_resource_t *active = NULL;
const char *reason = "allowed";
// Some checks apply only to anonymous clone instances
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// If instance is active on the node, its score definitely applies
active = find_active_anon_instance(parent, id, node);
@@ -604,7 +615,7 @@ promotion_score_applies(const pe_resource_t *rsc, const pe_node_t *node)
/* Otherwise, we've probed and/or started the resource *somewhere*, so
* consider promotion scores on nodes where we know the status.
*/
- if ((pe_hash_table_lookup(rsc->known_on, node->details->id) != NULL)
+ if ((g_hash_table_lookup(rsc->known_on, node->details->id) != NULL)
|| (pe_find_node_id(rsc->running_on, node->details->id) != NULL)) {
reason = "known";
} else {
@@ -640,16 +651,20 @@ check_allowed:
* \return Value of promotion score node attribute for \p rsc on \p node
*/
static const char *
-promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node,
+promotion_attr_value(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *name)
{
char *attr_name = NULL;
const char *attr_value = NULL;
+ enum pcmk__rsc_node node_type = pcmk__rsc_node_assigned;
- CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL);
-
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
+ // Not assigned yet
+ node_type = pcmk__rsc_node_current;
+ }
attr_name = pcmk_promotion_score_name(name);
- attr_value = pe_node_attribute_calculated(node, attr_name, rsc);
+ attr_value = pe__node_attribute_calculated(node, attr_name, rsc, node_type,
+ false);
free(attr_name);
return attr_value;
}
@@ -665,7 +680,7 @@ promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node,
* \return Promotion score for \p rsc on \p node (or 0 if none)
*/
static int
-promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
+promotion_score(const pcmk_resource_t *rsc, const pcmk_node_t *node,
bool *is_default)
{
char *name = NULL;
@@ -686,7 +701,7 @@ promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
bool child_default = false;
int child_score = promotion_score(child, node, &child_default);
@@ -712,7 +727,7 @@ promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
if (attr_value != NULL) {
pe_rsc_trace(rsc, "Promotion score for %s on %s = %s",
name, pe__node_name(node), pcmk__s(attr_value, "(unset)"));
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
/* If we don't have any resource history yet, we won't have clone_name.
* In that case, for anonymous clones, try the resource name without
* any instance number.
@@ -739,22 +754,23 @@ promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
/*!
* \internal
- * \brief Include promotion scores in instances' node weights and priorities
+ * \brief Include promotion scores in instances' node scores and priorities
*
* \param[in,out] rsc Promotable clone resource to update
*/
void
-pcmk__add_promotion_scores(pe_resource_t *rsc)
+pcmk__add_promotion_scores(pcmk_resource_t *rsc)
{
- if (pe__set_clone_flag(rsc, pe__clone_promotion_added) == pcmk_rc_already) {
+ if (pe__set_clone_flag(rsc,
+ pcmk__clone_promotion_added) == pcmk_rc_already) {
return;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
int score, new_score;
g_hash_table_iter_init(&iter, child_rsc->allowed_nodes);
@@ -800,11 +816,11 @@ pcmk__add_promotion_scores(pe_resource_t *rsc)
static void
set_current_role_unpromoted(void *data, void *user_data)
{
- pe_resource_t *rsc = (pe_resource_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) data;
- if (rsc->role == RSC_ROLE_STARTED) {
+ if (rsc->role == pcmk_role_started) {
// Promotable clones should use unpromoted role instead of started
- rsc->role = RSC_ROLE_UNPROMOTED;
+ rsc->role = pcmk_role_unpromoted;
}
g_list_foreach(rsc->children, set_current_role_unpromoted, NULL);
}
@@ -819,14 +835,14 @@ set_current_role_unpromoted(void *data, void *user_data)
static void
set_next_role_unpromoted(void *data, void *user_data)
{
- pe_resource_t *rsc = (pe_resource_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) data;
GList *assigned = NULL;
rsc->fns->location(rsc, &assigned, FALSE);
if (assigned == NULL) {
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "stopped instance");
+ pe__set_next_role(rsc, pcmk_role_stopped, "stopped instance");
} else {
- pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED, "unpromoted instance");
+ pe__set_next_role(rsc, pcmk_role_unpromoted, "unpromoted instance");
g_list_free(assigned);
}
g_list_foreach(rsc->children, set_next_role_unpromoted, NULL);
@@ -842,10 +858,10 @@ set_next_role_unpromoted(void *data, void *user_data)
static void
set_next_role_promoted(void *data, gpointer user_data)
{
- pe_resource_t *rsc = (pe_resource_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) data;
- if (rsc->next_role == RSC_ROLE_UNKNOWN) {
- pe__set_next_role(rsc, RSC_ROLE_PROMOTED, "promoted instance");
+ if (rsc->next_role == pcmk_role_unknown) {
+ pe__set_next_role(rsc, pcmk_role_promoted, "promoted instance");
}
g_list_foreach(rsc->children, set_next_role_promoted, NULL);
}
@@ -857,11 +873,11 @@ set_next_role_promoted(void *data, gpointer user_data)
* \param[in,out] instance Promotable clone instance to show
*/
static void
-show_promotion_score(pe_resource_t *instance)
+show_promotion_score(pcmk_resource_t *instance)
{
- pe_node_t *chosen = instance->fns->location(instance, NULL, FALSE);
+ pcmk_node_t *chosen = instance->fns->location(instance, NULL, FALSE);
- if (pcmk_is_set(instance->cluster->flags, pe_flag_show_scores)
+ if (pcmk_is_set(instance->cluster->flags, pcmk_sched_output_scores)
&& !pcmk__is_daemon && (instance->cluster->priv != NULL)) {
pcmk__output_t *out = instance->cluster->priv;
@@ -888,16 +904,16 @@ show_promotion_score(pe_resource_t *instance)
static void
set_instance_priority(gpointer data, gpointer user_data)
{
- pe_resource_t *instance = (pe_resource_t *) data;
- const pe_resource_t *clone = (const pe_resource_t *) user_data;
- const pe_node_t *chosen = NULL;
- enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
+ pcmk_resource_t *instance = (pcmk_resource_t *) data;
+ const pcmk_resource_t *clone = (const pcmk_resource_t *) user_data;
+ const pcmk_node_t *chosen = NULL;
+ enum rsc_role_e next_role = pcmk_role_unknown;
GList *list = NULL;
pe_rsc_trace(clone, "Assigning priority for %s: %s", instance->id,
role2text(instance->next_role));
- if (instance->fns->state(instance, TRUE) == RSC_ROLE_STARTED) {
+ if (instance->fns->state(instance, TRUE) == pcmk_role_started) {
set_current_role_unpromoted(instance, NULL);
}
@@ -914,8 +930,8 @@ set_instance_priority(gpointer data, gpointer user_data)
next_role = instance->fns->state(instance, FALSE);
switch (next_role) {
- case RSC_ROLE_STARTED:
- case RSC_ROLE_UNKNOWN:
+ case pcmk_role_started:
+ case pcmk_role_unknown:
// Set instance priority to its promotion score (or -1 if none)
{
bool is_default = false;
@@ -935,13 +951,13 @@ set_instance_priority(gpointer data, gpointer user_data)
}
break;
- case RSC_ROLE_UNPROMOTED:
- case RSC_ROLE_STOPPED:
+ case pcmk_role_unpromoted:
+ case pcmk_role_stopped:
// Instance can't be promoted
instance->priority = -INFINITY;
break;
- case RSC_ROLE_PROMOTED:
+ case pcmk_role_promoted:
// Nothing needed (re-creating actions after scheduling fencing)
break;
@@ -964,7 +980,7 @@ set_instance_priority(gpointer data, gpointer user_data)
g_list_free(list);
instance->sort_index = instance->priority;
- if (next_role == RSC_ROLE_PROMOTED) {
+ if (next_role == pcmk_role_promoted) {
instance->sort_index = INFINITY;
}
pe_rsc_trace(clone, "Assigning %s priority = %d",
@@ -981,11 +997,11 @@ set_instance_priority(gpointer data, gpointer user_data)
static void
set_instance_role(gpointer data, gpointer user_data)
{
- pe_resource_t *instance = (pe_resource_t *) data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) data;
int *count = (int *) user_data;
- const pe_resource_t *clone = pe__const_top_resource(instance, false);
- pe_node_t *chosen = NULL;
+ const pcmk_resource_t *clone = pe__const_top_resource(instance, false);
+ pcmk_node_t *chosen = NULL;
show_promotion_score(instance);
@@ -994,7 +1010,7 @@ set_instance_role(gpointer data, gpointer user_data)
instance->id);
} else if ((*count < pe__clone_promoted_max(instance))
- || !pcmk_is_set(clone->flags, pe_rsc_managed)) {
+ || !pcmk_is_set(clone->flags, pcmk_rsc_managed)) {
chosen = node_to_be_promoted_on(instance);
}
@@ -1003,9 +1019,9 @@ set_instance_role(gpointer data, gpointer user_data)
return;
}
- if ((instance->role < RSC_ROLE_PROMOTED)
- && !pcmk_is_set(instance->cluster->flags, pe_flag_have_quorum)
- && (instance->cluster->no_quorum_policy == no_quorum_freeze)) {
+ if ((instance->role < pcmk_role_promoted)
+ && !pcmk_is_set(instance->cluster->flags, pcmk_sched_quorate)
+ && (instance->cluster->no_quorum_policy == pcmk_no_quorum_freeze)) {
crm_notice("Clone instance %s cannot be promoted without quorum",
instance->id);
set_next_role_unpromoted(instance, NULL);
@@ -1027,13 +1043,13 @@ set_instance_role(gpointer data, gpointer user_data)
* \param[in,out] rsc Promotable clone resource to update
*/
void
-pcmk__set_instance_roles(pe_resource_t *rsc)
+pcmk__set_instance_roles(pcmk_resource_t *rsc)
{
int promoted = 0;
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
- // Repurpose count to track the number of promoted instances allocated
+ // Repurpose count to track the number of promoted instances assigned
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
node->count = 0;
@@ -1059,11 +1075,11 @@ pcmk__set_instance_roles(pe_resource_t *rsc)
* \param[out] any_demoting Will be set true if any instance is demoting
*/
static void
-create_promotable_instance_actions(pe_resource_t *clone,
+create_promotable_instance_actions(pcmk_resource_t *clone,
bool *any_promoting, bool *any_demoting)
{
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->cmds->create_actions(instance);
check_for_role_change(instance, any_demoting, any_promoting);
@@ -1081,10 +1097,10 @@ create_promotable_instance_actions(pe_resource_t *clone,
* \param[in,out] clone Promotable clone to reset
*/
static void
-reset_instance_priorities(pe_resource_t *clone)
+reset_instance_priorities(pcmk_resource_t *clone)
{
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->priority = clone->priority;
}
@@ -1097,7 +1113,7 @@ reset_instance_priorities(pe_resource_t *clone)
* \param[in,out] clone Promotable clone to create actions for
*/
void
-pcmk__create_promotable_actions(pe_resource_t *clone)
+pcmk__create_promotable_actions(pcmk_resource_t *clone)
{
bool any_promoting = false;
bool any_demoting = false;
@@ -1119,19 +1135,19 @@ pcmk__create_promotable_actions(pe_resource_t *clone)
* \param[in,out] clone Promotable clone instance to order
*/
void
-pcmk__order_promotable_instances(pe_resource_t *clone)
+pcmk__order_promotable_instances(pcmk_resource_t *clone)
{
- pe_resource_t *previous = NULL; // Needed for ordered clones
+ pcmk_resource_t *previous = NULL; // Needed for ordered clones
pcmk__promotable_restart_ordering(clone);
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
// Demote before promote
- pcmk__order_resource_actions(instance, RSC_DEMOTE,
- instance, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(instance, PCMK_ACTION_DEMOTE,
+ instance, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
order_instance_promotion(clone, instance, previous);
order_instance_demotion(clone, instance, previous);
@@ -1144,29 +1160,26 @@ pcmk__order_promotable_instances(pe_resource_t *clone)
* \brief Update dependent's allowed nodes for colocation with promotable
*
* \param[in,out] dependent Dependent resource to update
+ * \param[in] primary Primary resource
* \param[in] primary_node Node where an instance of the primary will be
* \param[in] colocation Colocation constraint to apply
*/
static void
-update_dependent_allowed_nodes(pe_resource_t *dependent,
- const pe_node_t *primary_node,
+update_dependent_allowed_nodes(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk_node_t *primary_node,
const pcmk__colocation_t *colocation)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
const char *primary_value = NULL;
- const char *attr = NULL;
+ const char *attr = colocation->node_attribute;
if (colocation->score >= INFINITY) {
return; // Colocation is mandatory, so allowed node scores don't matter
}
- // Get value of primary's colocation node attribute
- attr = colocation->node_attribute;
- if (attr == NULL) {
- attr = CRM_ATTR_UNAME;
- }
- primary_value = pe_node_attribute_raw(primary_node, attr);
+ primary_value = pcmk__colocation_node_attr(primary_node, attr, primary);
pe_rsc_trace(colocation->primary,
"Applying %s (%s with %s on %s by %s @%d) to %s",
@@ -1176,7 +1189,8 @@ update_dependent_allowed_nodes(pe_resource_t *dependent,
g_hash_table_iter_init(&iter, dependent->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
- const char *dependent_value = pe_node_attribute_raw(node, attr);
+ const char *dependent_value = pcmk__colocation_node_attr(node, attr,
+ dependent);
if (pcmk__str_eq(primary_value, dependent_value, pcmk__str_casei)) {
node->weight = pcmk__add_scores(node->weight, colocation->score);
@@ -1197,8 +1211,8 @@ update_dependent_allowed_nodes(pe_resource_t *dependent,
* \param[in] colocation Colocation constraint to apply
*/
void
-pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
- pe_resource_t *dependent,
+pcmk__update_dependent_with_promotable(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
const pcmk__colocation_t *colocation)
{
GList *affected_nodes = NULL;
@@ -1208,35 +1222,36 @@ pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
* each one.
*/
for (GList *iter = primary->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
- pe_node_t *node = instance->fns->location(instance, NULL, FALSE);
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
+ pcmk_node_t *node = instance->fns->location(instance, NULL, FALSE);
if (node == NULL) {
continue;
}
if (instance->fns->state(instance, FALSE) == colocation->primary_role) {
- update_dependent_allowed_nodes(dependent, node, colocation);
+ update_dependent_allowed_nodes(dependent, primary, node,
+ colocation);
affected_nodes = g_list_prepend(affected_nodes, node);
}
}
- /* For mandatory colocations, add the primary's node weight to the
- * dependent's node weight for each affected node, and ban the dependent
+ /* For mandatory colocations, add the primary's node score to the
+ * dependent's node score for each affected node, and ban the dependent
* from all other nodes.
*
* However, skip this for promoted-with-promoted colocations, otherwise
* inactive dependent instances can't start (in the unpromoted role).
*/
if ((colocation->score >= INFINITY)
- && ((colocation->dependent_role != RSC_ROLE_PROMOTED)
- || (colocation->primary_role != RSC_ROLE_PROMOTED))) {
+ && ((colocation->dependent_role != pcmk_role_promoted)
+ || (colocation->primary_role != pcmk_role_promoted))) {
pe_rsc_trace(colocation->primary,
"Applying %s (mandatory %s with %s) to %s",
colocation->id, colocation->dependent->id,
colocation->primary->id, dependent->id);
- node_list_exclude(dependent->allowed_nodes, affected_nodes,
- TRUE);
+ pcmk__colocation_intersect_nodes(dependent, primary, colocation,
+ affected_nodes, true);
}
g_list_free(affected_nodes);
}
@@ -1250,11 +1265,11 @@ pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
* \param[in] colocation Colocation constraint to apply
*/
void
-pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
- pe_resource_t *dependent,
+pcmk__update_promotable_dependent_priority(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
const pcmk__colocation_t *colocation)
{
- pe_resource_t *primary_instance = NULL;
+ pcmk_resource_t *primary_instance = NULL;
// Look for a primary instance where dependent will be
primary_instance = pcmk__find_compatible_instance(dependent, primary,
diff --git a/lib/pacemaker/pcmk_sched_recurring.c b/lib/pacemaker/pcmk_sched_recurring.c
index c1b929b..9210fab 100644
--- a/lib/pacemaker/pcmk_sched_recurring.c
+++ b/lib/pacemaker/pcmk_sched_recurring.c
@@ -12,6 +12,7 @@
#include <stdbool.h>
#include <crm/msg_xml.h>
+#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
@@ -24,7 +25,7 @@ struct op_history {
// Parsed information
char *key; // Operation key for action
- enum rsc_role_e role; // Action role (or RSC_ROLE_UNKNOWN for default)
+ enum rsc_role_e role; // Action role (or pcmk_role_unknown for default)
guint interval_ms; // Action interval
};
@@ -55,7 +56,7 @@ xe_interval(const xmlNode *xml)
* once in the operation history of \p rsc, otherwise false
*/
static bool
-is_op_dup(const pe_resource_t *rsc, const char *name, guint interval_ms)
+is_op_dup(const pcmk_resource_t *rsc, const char *name, guint interval_ms)
{
const char *id = NULL;
@@ -63,8 +64,7 @@ is_op_dup(const pe_resource_t *rsc, const char *name, guint interval_ms)
op != NULL; op = crm_next_same_xml(op)) {
// Check whether action name and interval match
- if (!pcmk__str_eq(crm_element_value(op, "name"),
- name, pcmk__str_none)
+ if (!pcmk__str_eq(crm_element_value(op, "name"), name, pcmk__str_none)
|| (xe_interval(op) != interval_ms)) {
continue;
}
@@ -104,9 +104,11 @@ is_op_dup(const pe_resource_t *rsc, const char *name, guint interval_ms)
static bool
op_cannot_recur(const char *name)
{
- return pcmk__str_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE,
- CRMD_ACTION_RELOAD_AGENT, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL);
+ return pcmk__str_any_of(name, PCMK_ACTION_STOP, PCMK_ACTION_START,
+ PCMK_ACTION_DEMOTE, PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_RELOAD_AGENT,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
+ NULL);
}
/*!
@@ -120,7 +122,7 @@ op_cannot_recur(const char *name)
* \return true if \p xml is for a recurring action, otherwise false
*/
static bool
-is_recurring_history(const pe_resource_t *rsc, const xmlNode *xml,
+is_recurring_history(const pcmk_resource_t *rsc, const xmlNode *xml,
struct op_history *op)
{
const char *role = NULL;
@@ -151,24 +153,28 @@ is_recurring_history(const pe_resource_t *rsc, const xmlNode *xml,
// Ensure role is valid if specified
role = crm_element_value(xml, "role");
if (role == NULL) {
- op->role = RSC_ROLE_UNKNOWN;
+ op->role = pcmk_role_unknown;
} else {
op->role = text2role(role);
- if (op->role == RSC_ROLE_UNKNOWN) {
+ if (op->role == pcmk_role_unknown) {
pcmk__config_err("Ignoring %s because %s is not a valid role",
op->id, role);
+ return false;
}
}
- // Disabled resources don't get monitored
- op->key = pcmk__op_key(rsc->id, op->name, op->interval_ms);
- if (find_rsc_op_entry(rsc, op->key) == NULL) {
- crm_trace("Not creating recurring action %s for disabled resource %s",
- op->id, rsc->id);
- free(op->key);
+ // Only actions that are still configured and enabled matter
+ if (pcmk__find_action_config(rsc, op->name, op->interval_ms,
+ false) == NULL) {
+ pe_rsc_trace(rsc,
+ "Ignoring %s (%s-interval %s for %s) because it is "
+ "disabled or no longer in configuration",
+ op->id, pcmk__readable_interval(op->interval_ms), op->name,
+ rsc->id);
return false;
}
+ op->key = pcmk__op_key(rsc->id, op->name, op->interval_ms);
return true;
}
@@ -184,9 +190,9 @@ is_recurring_history(const pe_resource_t *rsc, const xmlNode *xml,
* \return true if recurring action should be optional, otherwise false
*/
static bool
-active_recurring_should_be_optional(const pe_resource_t *rsc,
- const pe_node_t *node, const char *key,
- pe_action_t *start)
+active_recurring_should_be_optional(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const char *key,
+ pcmk_action_t *start)
{
GList *possible_matches = NULL;
@@ -197,7 +203,7 @@ active_recurring_should_be_optional(const pe_resource_t *rsc,
}
if (!pcmk_is_set(rsc->cmds->action_flags(start, NULL),
- pe_action_optional)) {
+ pcmk_action_optional)) {
pe_rsc_trace(rsc, "%s will be mandatory because %s is",
key, start->uuid);
return false;
@@ -213,9 +219,9 @@ active_recurring_should_be_optional(const pe_resource_t *rsc,
for (const GList *iter = possible_matches;
iter != NULL; iter = iter->next) {
- const pe_action_t *op = (const pe_action_t *) iter->data;
+ const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
- if (pcmk_is_set(op->flags, pe_action_reschedule)) {
+ if (pcmk_is_set(op->flags, pcmk_action_reschedule)) {
pe_rsc_trace(rsc,
"%s will be mandatory because "
"it needs to be rescheduled", key);
@@ -238,43 +244,43 @@ active_recurring_should_be_optional(const pe_resource_t *rsc,
* \param[in] op Resource history entry
*/
static void
-recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
- const pe_node_t *node, const struct op_history *op)
+recurring_op_for_active(pcmk_resource_t *rsc, pcmk_action_t *start,
+ const pcmk_node_t *node, const struct op_history *op)
{
- pe_action_t *mon = NULL;
+ pcmk_action_t *mon = NULL;
bool is_optional = true;
+ const bool is_default_role = (op->role == pcmk_role_unknown);
// We're only interested in recurring actions for active roles
- if (op->role == RSC_ROLE_STOPPED) {
+ if (op->role == pcmk_role_stopped) {
return;
}
is_optional = active_recurring_should_be_optional(rsc, node, op->key,
start);
- if (((op->role != RSC_ROLE_UNKNOWN) && (rsc->next_role != op->role))
- || ((op->role == RSC_ROLE_UNKNOWN)
- && (rsc->next_role == RSC_ROLE_PROMOTED))) {
+ if ((!is_default_role && (rsc->next_role != op->role))
+ || (is_default_role && (rsc->next_role == pcmk_role_promoted))) {
// Configured monitor role doesn't match role resource will have
if (is_optional) { // It's running, so cancel it
char *after_key = NULL;
- pe_action_t *cancel_op = pcmk__new_cancel_action(rsc, op->name,
- op->interval_ms,
- node);
+ pcmk_action_t *cancel_op = pcmk__new_cancel_action(rsc, op->name,
+ op->interval_ms,
+ node);
switch (rsc->role) {
- case RSC_ROLE_UNPROMOTED:
- case RSC_ROLE_STARTED:
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ case pcmk_role_unpromoted:
+ case pcmk_role_started:
+ if (rsc->next_role == pcmk_role_promoted) {
after_key = promote_key(rsc);
- } else if (rsc->next_role == RSC_ROLE_STOPPED) {
+ } else if (rsc->next_role == pcmk_role_stopped) {
after_key = stop_key(rsc);
}
break;
- case RSC_ROLE_PROMOTED:
+ case pcmk_role_promoted:
after_key = demote_key(rsc);
break;
default:
@@ -283,7 +289,8 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
if (after_key) {
pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
- pe_order_runnable_left, rsc->cluster);
+ pcmk__ar_unrunnable_first_blocks,
+ rsc->cluster);
}
}
@@ -291,7 +298,7 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
"%s recurring action %s because %s configured for %s role "
"(not %s)",
(is_optional? "Cancelling" : "Ignoring"), op->key, op->id,
- role2text((op->role == RSC_ROLE_UNKNOWN)? RSC_ROLE_UNPROMOTED : op->role),
+ role2text(is_default_role? pcmk_role_unpromoted : op->role),
role2text(rsc->next_role));
return;
}
@@ -302,51 +309,55 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
op->id, rsc->id, role2text(rsc->next_role),
pe__node_name(node));
- mon = custom_action(rsc, strdup(op->key), op->name, node, is_optional, TRUE,
+ mon = custom_action(rsc, strdup(op->key), op->name, node, is_optional,
rsc->cluster);
- if (!pcmk_is_set(start->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
pe_rsc_trace(rsc, "%s is unrunnable because start is", mon->uuid);
- pe__clear_action_flags(mon, pe_action_runnable);
+ pe__clear_action_flags(mon, pcmk_action_runnable);
} else if ((node == NULL) || !node->details->online
|| node->details->unclean) {
pe_rsc_trace(rsc, "%s is unrunnable because no node is available",
mon->uuid);
- pe__clear_action_flags(mon, pe_action_runnable);
+ pe__clear_action_flags(mon, pcmk_action_runnable);
- } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
+ } else if (!pcmk_is_set(mon->flags, pcmk_action_optional)) {
pe_rsc_info(rsc, "Start %s-interval %s for %s on %s",
pcmk__readable_interval(op->interval_ms), mon->task,
rsc->id, pe__node_name(node));
}
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ if (rsc->next_role == pcmk_role_promoted) {
pe__add_action_expected_result(mon, CRM_EX_PROMOTED);
}
// Order monitor relative to other actions
- if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((node == NULL) || pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__new_ordering(rsc, start_key(rsc), NULL,
NULL, strdup(mon->uuid), mon,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
pcmk__new_ordering(rsc, reload_key(rsc), NULL,
NULL, strdup(mon->uuid), mon,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ if (rsc->next_role == pcmk_role_promoted) {
pcmk__new_ordering(rsc, promote_key(rsc), NULL,
rsc, NULL, mon,
- pe_order_optional|pe_order_runnable_left,
+ pcmk__ar_ordered
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
pcmk__new_ordering(rsc, demote_key(rsc), NULL,
rsc, NULL, mon,
- pe_order_optional|pe_order_runnable_left,
+ pcmk__ar_ordered
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
}
}
@@ -363,11 +374,11 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
* \param[in] interval_ms Action interval (in milliseconds)
*/
static void
-cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
- const char *name, guint interval_ms)
+cancel_if_running(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *key, const char *name, guint interval_ms)
{
GList *possible_matches = find_actions_exact(rsc->actions, key, node);
- pe_action_t *cancel_op = NULL;
+ pcmk_action_t *cancel_op = NULL;
if (possible_matches == NULL) {
return; // Recurring action isn't running on this node
@@ -377,8 +388,8 @@ cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
switch (rsc->next_role) {
- case RSC_ROLE_STARTED:
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_started:
+ case pcmk_role_unpromoted:
/* Order starts after cancel. If the current role is
* stopped, this cancels the monitor before the resource
* starts; if the current role is started, then this cancels
@@ -386,14 +397,14 @@ cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
*/
pcmk__new_ordering(rsc, NULL, cancel_op,
rsc, start_key(rsc), NULL,
- pe_order_runnable_left, rsc->cluster);
+ pcmk__ar_unrunnable_first_blocks, rsc->cluster);
break;
default:
break;
}
pe_rsc_info(rsc,
"Cancelling %s-interval %s action for %s on %s because "
- "configured for " RSC_ROLE_STOPPED_S " role (not %s)",
+ "configured for " PCMK__ROLE_STOPPED " role (not %s)",
pcmk__readable_interval(interval_ms), name, rsc->id,
pe__node_name(node), role2text(rsc->next_role));
}
@@ -407,14 +418,14 @@ cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
* \param[in,out] action Action to order after probes of \p rsc on \p node
*/
static void
-order_after_probes(pe_resource_t *rsc, const pe_node_t *node,
- pe_action_t *action)
+order_after_probes(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_action_t *action)
{
- GList *probes = pe__resource_actions(rsc, node, RSC_STATUS, FALSE);
+ GList *probes = pe__resource_actions(rsc, node, PCMK_ACTION_MONITOR, FALSE);
for (GList *iter = probes; iter != NULL; iter = iter->next) {
- order_actions((pe_action_t *) iter->data, action,
- pe_order_runnable_left);
+ order_actions((pcmk_action_t *) iter->data, action,
+ pcmk__ar_unrunnable_first_blocks);
}
g_list_free(probes);
}
@@ -428,32 +439,33 @@ order_after_probes(pe_resource_t *rsc, const pe_node_t *node,
* \param[in,out] action Action to order after stops of \p rsc on \p node
*/
static void
-order_after_stops(pe_resource_t *rsc, const pe_node_t *node,
- pe_action_t *action)
+order_after_stops(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_action_t *action)
{
- GList *stop_ops = pe__resource_actions(rsc, node, RSC_STOP, TRUE);
+ GList *stop_ops = pe__resource_actions(rsc, node, PCMK_ACTION_STOP, TRUE);
for (GList *iter = stop_ops; iter != NULL; iter = iter->next) {
- pe_action_t *stop = (pe_action_t *) iter->data;
+ pcmk_action_t *stop = (pcmk_action_t *) iter->data;
- if (!pcmk_is_set(stop->flags, pe_action_optional)
- && !pcmk_is_set(action->flags, pe_action_optional)
- && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(stop->flags, pcmk_action_optional)
+ && !pcmk_is_set(action->flags, pcmk_action_optional)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "%s optional on %s: unmanaged",
action->uuid, pe__node_name(node));
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
}
- if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(stop->flags, pcmk_action_runnable)) {
crm_debug("%s unrunnable on %s: stop is unrunnable",
action->uuid, pe__node_name(node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__new_ordering(rsc, stop_key(rsc), stop,
NULL, NULL, action,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
}
}
@@ -469,18 +481,18 @@ order_after_stops(pe_resource_t *rsc, const pe_node_t *node,
* \param[in] op Resource history entry
*/
static void
-recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
+recurring_op_for_inactive(pcmk_resource_t *rsc, const pcmk_node_t *node,
const struct op_history *op)
{
GList *possible_matches = NULL;
// We're only interested in recurring actions for the inactive role
- if (op->role != RSC_ROLE_STOPPED) {
+ if (op->role != pcmk_role_stopped) {
return;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
- crm_notice("Ignoring %s (recurring monitors for " RSC_ROLE_STOPPED_S
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
+ crm_notice("Ignoring %s (recurring monitors for " PCMK__ROLE_STOPPED
" role are not supported for anonymous clones)", op->id);
return; // @TODO add support
}
@@ -489,10 +501,10 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
"where it should not be running", op->id, rsc->id);
for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *stop_node = (pe_node_t *) iter->data;
+ pcmk_node_t *stop_node = (pcmk_node_t *) iter->data;
bool is_optional = true;
- pe_action_t *stopped_mon = NULL;
+ pcmk_action_t *stopped_mon = NULL;
// Cancel action on node where resource will be active
if ((node != NULL)
@@ -509,16 +521,16 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
pe_rsc_trace(rsc,
"Creating %s recurring action %s for %s (%s "
- RSC_ROLE_STOPPED_S " on %s)",
+ PCMK__ROLE_STOPPED " on %s)",
(is_optional? "optional" : "mandatory"),
op->key, op->id, rsc->id, pe__node_name(stop_node));
stopped_mon = custom_action(rsc, strdup(op->key), op->name, stop_node,
- is_optional, TRUE, rsc->cluster);
+ is_optional, rsc->cluster);
pe__add_action_expected_result(stopped_mon, CRM_EX_NOT_RUNNING);
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
order_after_probes(rsc, stop_node, stopped_mon);
}
@@ -530,13 +542,13 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
if (!stop_node->details->online || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s unrunnable on %s: node unavailable)",
stopped_mon->uuid, pe__node_name(stop_node));
- pe__clear_action_flags(stopped_mon, pe_action_runnable);
+ pe__clear_action_flags(stopped_mon, pcmk_action_runnable);
}
- if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
- && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
+ if (pcmk_is_set(stopped_mon->flags, pcmk_action_runnable)
+ && !pcmk_is_set(stopped_mon->flags, pcmk_action_optional)) {
crm_notice("Start recurring %s-interval %s for "
- RSC_ROLE_STOPPED_S " %s on %s",
+ PCMK__ROLE_STOPPED " %s on %s",
pcmk__readable_interval(op->interval_ms),
stopped_mon->task, rsc->id, pe__node_name(stop_node));
}
@@ -550,17 +562,17 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
* \param[in,out] rsc Resource to create recurring actions for
*/
void
-pcmk__create_recurring_actions(pe_resource_t *rsc)
+pcmk__create_recurring_actions(pcmk_resource_t *rsc)
{
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
pe_rsc_trace(rsc, "Skipping recurring actions for blocked resource %s",
rsc->id);
return;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pe_rsc_trace(rsc, "Skipping recurring actions for %s "
"in maintenance mode", rsc->id);
return;
@@ -575,8 +587,8 @@ pcmk__create_recurring_actions(pe_resource_t *rsc)
"in maintenance mode",
rsc->id, pe__node_name(rsc->allocated_to));
- } else if ((rsc->next_role != RSC_ROLE_STOPPED)
- || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if ((rsc->next_role != pcmk_role_stopped)
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
// Recurring actions for active roles needed
start = start_action(rsc, rsc->allocated_to, TRUE);
}
@@ -612,11 +624,11 @@ pcmk__create_recurring_actions(pe_resource_t *rsc)
*
* \return Created op
*/
-pe_action_t *
-pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms,
- const pe_node_t *node)
+pcmk_action_t *
+pcmk__new_cancel_action(pcmk_resource_t *rsc, const char *task,
+ guint interval_ms, const pcmk_node_t *node)
{
- pe_action_t *cancel_op = NULL;
+ pcmk_action_t *cancel_op = NULL;
char *key = NULL;
char *interval_ms_s = NULL;
@@ -625,10 +637,10 @@ pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms,
// @TODO dangerous if possible to schedule another action with this key
key = pcmk__op_key(rsc->id, task, interval_ms);
- cancel_op = custom_action(rsc, key, RSC_CANCEL, node, FALSE, TRUE,
+ cancel_op = custom_action(rsc, key, PCMK_ACTION_CANCEL, node, FALSE,
rsc->cluster);
- pcmk__str_update(&cancel_op->task, RSC_CANCEL);
+ pcmk__str_update(&cancel_op->task, PCMK_ACTION_CANCEL);
pcmk__str_update(&cancel_op->cancel_task, task);
interval_ms_s = crm_strdup_printf("%u", interval_ms);
@@ -648,14 +660,14 @@ pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms,
* \param[in] task Action name
* \param[in] interval_ms Action interval
* \param[in] node Node that history entry is for
- * \param[in] reason Short description of why action is being cancelled
+ * \param[in] reason Short description of why action is cancelled
*/
void
-pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task,
- guint interval_ms, const pe_node_t *node,
- const char *reason)
+pcmk__schedule_cancel(pcmk_resource_t *rsc, const char *call_id,
+ const char *task, guint interval_ms,
+ const pcmk_node_t *node, const char *reason)
{
- pe_action_t *cancel = NULL;
+ pcmk_action_t *cancel = NULL;
CRM_CHECK((rsc != NULL) && (task != NULL)
&& (node != NULL) && (reason != NULL),
@@ -669,12 +681,12 @@ pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task,
// Cancellations happen after stops
pcmk__new_ordering(rsc, stop_key(rsc), NULL, rsc, NULL, cancel,
- pe_order_optional, rsc->cluster);
+ pcmk__ar_ordered, rsc->cluster);
}
/*!
* \internal
- * \brief Reschedule a recurring action
+ * \brief Create a recurring action marked as needing rescheduling if active
*
* \param[in,out] rsc Resource that action is for
* \param[in] task Name of action being rescheduled
@@ -682,16 +694,16 @@ pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task,
* \param[in,out] node Node where action should be rescheduled
*/
void
-pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
- guint interval_ms, pe_node_t *node)
+pcmk__reschedule_recurring(pcmk_resource_t *rsc, const char *task,
+ guint interval_ms, pcmk_node_t *node)
{
- pe_action_t *op = NULL;
+ pcmk_action_t *op = NULL;
trigger_unfencing(rsc, node, "Device parameters changed (reschedule)",
NULL, rsc->cluster);
op = custom_action(rsc, pcmk__op_key(rsc->id, task, interval_ms),
- task, node, TRUE, TRUE, rsc->cluster);
- pe__set_action_flags(op, pe_action_reschedule);
+ task, node, TRUE, rsc->cluster);
+ pe__set_action_flags(op, pcmk_action_reschedule);
}
/*!
@@ -703,7 +715,7 @@ pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
* \return true if \p action has a nonzero interval, otherwise false
*/
bool
-pcmk__action_is_recurring(const pe_action_t *action)
+pcmk__action_is_recurring(const pcmk_action_t *action)
{
guint interval_ms = 0;
diff --git a/lib/pacemaker/pcmk_sched_remote.c b/lib/pacemaker/pcmk_sched_remote.c
index 6adb5d4..c915389 100644
--- a/lib/pacemaker/pcmk_sched_remote.c
+++ b/lib/pacemaker/pcmk_sched_remote.c
@@ -50,42 +50,44 @@ state2text(enum remote_connection_state state)
return "impossible";
}
-/* We always use pe_order_preserve with these convenience functions to exempt
- * internally generated constraints from the prohibition of user constraints
- * involving remote connection resources.
+/* We always use pcmk__ar_guest_allowed with these convenience functions to
+ * exempt internally generated constraints from the prohibition of user
+ * constraints involving remote connection resources.
*
- * The start ordering additionally uses pe_order_runnable_left so that the
- * specified action is not runnable if the start is not runnable.
+ * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
+ * the specified action is not runnable if the start is not runnable.
*/
static inline void
-order_start_then_action(pe_resource_t *first_rsc, pe_action_t *then_action,
- uint32_t extra, pe_working_set_t *data_set)
+order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
+ uint32_t extra)
{
- if ((first_rsc != NULL) && (then_action != NULL) && (data_set != NULL)) {
+ if ((first_rsc != NULL) && (then_action != NULL)) {
pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
then_action->rsc, NULL, then_action,
- pe_order_preserve|pe_order_runnable_left|extra,
- data_set);
+ pcmk__ar_guest_allowed
+ |pcmk__ar_unrunnable_first_blocks
+ |extra,
+ first_rsc->cluster);
}
}
static inline void
-order_action_then_stop(pe_action_t *first_action, pe_resource_t *then_rsc,
- uint32_t extra, pe_working_set_t *data_set)
+order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
+ uint32_t extra)
{
- if ((first_action != NULL) && (then_rsc != NULL) && (data_set != NULL)) {
+ if ((first_action != NULL) && (then_rsc != NULL)) {
pcmk__new_ordering(first_action->rsc, NULL, first_action,
then_rsc, stop_key(then_rsc), NULL,
- pe_order_preserve|extra, data_set);
+ pcmk__ar_guest_allowed|extra, then_rsc->cluster);
}
}
static enum remote_connection_state
-get_remote_node_state(const pe_node_t *node)
+get_remote_node_state(const pcmk_node_t *node)
{
- const pe_resource_t *remote_rsc = NULL;
- const pe_node_t *cluster_node = NULL;
+ const pcmk_resource_t *remote_rsc = NULL;
+ const pcmk_node_t *cluster_node = NULL;
CRM_ASSERT(node != NULL);
@@ -98,7 +100,7 @@ get_remote_node_state(const pe_node_t *node)
* is unclean or went offline, we can't process any operations
* on that remote node until after it starts elsewhere.
*/
- if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
+ if ((remote_rsc->next_role == pcmk_role_stopped)
|| (remote_rsc->allocated_to == NULL)) {
// The connection resource is not going to run anywhere
@@ -110,14 +112,14 @@ get_remote_node_state(const pe_node_t *node)
return remote_state_failed;
}
- if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
+ if (!pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)) {
/* Connection resource is cleanly stopped */
return remote_state_stopped;
}
/* Connection resource is failed */
- if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
+ if ((remote_rsc->next_role == pcmk_role_stopped)
&& remote_rsc->remote_reconnect_ms
&& node->details->remote_was_fenced
&& !pe__shutdown_requested(node)) {
@@ -164,13 +166,13 @@ get_remote_node_state(const pe_node_t *node)
* \param[in,out] action An action scheduled on a Pacemaker Remote node
*/
static void
-apply_remote_ordering(pe_action_t *action)
+apply_remote_ordering(pcmk_action_t *action)
{
- pe_resource_t *remote_rsc = NULL;
+ pcmk_resource_t *remote_rsc = NULL;
enum action_tasks task = text2task(action->task);
enum remote_connection_state state = get_remote_node_state(action->node);
- uint32_t order_opts = pe_order_none;
+ uint32_t order_opts = pcmk__ar_none;
if (action->rsc == NULL) {
return;
@@ -183,37 +185,35 @@ apply_remote_ordering(pe_action_t *action)
crm_trace("Order %s action %s relative to %s%s (state: %s)",
action->task, action->uuid,
- pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
+ pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
remote_rsc->id, state2text(state));
- if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
- /* Migration ops map to "no_action", but we need to apply the same
- * ordering as for stop or demote (see get_router_node()).
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ /* Migration ops map to pcmk_action_unspecified, but we need to apply
+ * the same ordering as for stop or demote (see get_router_node()).
*/
- task = stop_rsc;
+ task = pcmk_action_stop;
}
switch (task) {
- case start_rsc:
- case action_promote:
- order_opts = pe_order_none;
+ case pcmk_action_start:
+ case pcmk_action_promote:
+ order_opts = pcmk__ar_none;
if (state == remote_state_failed) {
/* Force recovery, by making this action required */
- pe__set_order_flags(order_opts, pe_order_implies_then);
+ pe__set_order_flags(order_opts, pcmk__ar_first_implies_then);
}
/* Ensure connection is up before running this action */
- order_start_then_action(remote_rsc, action, order_opts,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, order_opts);
break;
- case stop_rsc:
+ case pcmk_action_stop:
if (state == remote_state_alive) {
order_action_then_stop(action, remote_rsc,
- pe_order_implies_first,
- remote_rsc->cluster);
+ pcmk__ar_then_implies_first);
} else if (state == remote_state_failed) {
/* The resource is active on the node, but since we don't have a
@@ -223,28 +223,27 @@ apply_remote_ordering(pe_action_t *action)
* by the fencing.
*/
pe_fence_node(remote_rsc->cluster, action->node,
- "resources are active but connection is unrecoverable",
+ "resources are active but "
+ "connection is unrecoverable",
FALSE);
- } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
+ } else if (remote_rsc->next_role == pcmk_role_stopped) {
/* State must be remote_state_unknown or remote_state_stopped.
* Since the connection is not coming back up in this
* transition, stop this resource first.
*/
order_action_then_stop(action, remote_rsc,
- pe_order_implies_first,
- remote_rsc->cluster);
+ pcmk__ar_then_implies_first);
} else {
/* The connection is going to be started somewhere else, so
* stop this resource after that completes.
*/
- order_start_then_action(remote_rsc, action, pe_order_none,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
}
break;
- case action_demote:
+ case pcmk_action_demote:
/* Only order this demote relative to the connection start if the
* connection isn't being torn down. Otherwise, the demote would be
* blocked because the connection start would not be allowed.
@@ -252,8 +251,7 @@ apply_remote_ordering(pe_action_t *action)
if ((state == remote_state_resting)
|| (state == remote_state_unknown)) {
- order_start_then_action(remote_rsc, action, pe_order_none,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
} /* Otherwise we can rely on the stop ordering */
break;
@@ -265,13 +263,12 @@ apply_remote_ordering(pe_action_t *action)
* the connection was re-established
*/
order_start_then_action(remote_rsc, action,
- pe_order_implies_then,
- remote_rsc->cluster);
+ pcmk__ar_first_implies_then);
} else {
- pe_node_t *cluster_node = pe__current_node(remote_rsc);
+ pcmk_node_t *cluster_node = pe__current_node(remote_rsc);
- if ((task == monitor_rsc) && (state == remote_state_failed)) {
+ if ((task == pcmk_action_monitor) && (state == remote_state_failed)) {
/* We would only be here if we do not know the state of the
* resource on the remote node. Since we have no way to find
* out, it is necessary to fence the node.
@@ -287,12 +284,10 @@ apply_remote_ordering(pe_action_t *action)
* stopped _before_ we let the connection get closed.
*/
order_action_then_stop(action, remote_rsc,
- pe_order_runnable_left,
- remote_rsc->cluster);
+ pcmk__ar_unrunnable_first_blocks);
} else {
- order_start_then_action(remote_rsc, action, pe_order_none,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
}
}
break;
@@ -300,7 +295,7 @@ apply_remote_ordering(pe_action_t *action)
}
static void
-apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
+apply_container_ordering(pcmk_action_t *action)
{
/* VMs are also classified as containers for these purposes... in
* that they both involve a 'thing' running on a real or remote
@@ -309,8 +304,8 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* This allows us to be smarter about the type and extent of
* recovery actions required in various scenarios
*/
- pe_resource_t *remote_rsc = NULL;
- pe_resource_t *container = NULL;
+ pcmk_resource_t *remote_rsc = NULL;
+ pcmk_resource_t *container = NULL;
enum action_tasks task = text2task(action->task);
CRM_ASSERT(action->rsc != NULL);
@@ -323,40 +318,40 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
container = remote_rsc->container;
CRM_ASSERT(container != NULL);
- if (pcmk_is_set(container->flags, pe_rsc_failed)) {
- pe_fence_node(data_set, action->node, "container failed", FALSE);
+ if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
+ pe_fence_node(action->rsc->cluster, action->node, "container failed",
+ FALSE);
}
crm_trace("Order %s action %s relative to %s%s for %s%s",
action->task, action->uuid,
- pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
+ pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
remote_rsc->id,
- pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
+ pcmk_is_set(container->flags, pcmk_rsc_failed)? "failed " : "",
container->id);
- if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
- /* Migration ops map to "no_action", but we need to apply the same
- * ordering as for stop or demote (see get_router_node()).
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ /* Migration ops map to pcmk_action_unspecified, but we need to apply
+ * the same ordering as for stop or demote (see get_router_node()).
*/
- task = stop_rsc;
+ task = pcmk_action_stop;
}
switch (task) {
- case start_rsc:
- case action_promote:
+ case pcmk_action_start:
+ case pcmk_action_promote:
// Force resource recovery if the container is recovered
- order_start_then_action(container, action, pe_order_implies_then,
- data_set);
+ order_start_then_action(container, action,
+ pcmk__ar_first_implies_then);
// Wait for the connection resource to be up, too
- order_start_then_action(remote_rsc, action, pe_order_none,
- data_set);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
break;
- case stop_rsc:
- case action_demote:
- if (pcmk_is_set(container->flags, pe_rsc_failed)) {
+ case pcmk_action_stop:
+ case pcmk_action_demote:
+ if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
/* When the container representing a guest node fails, any stop
* or demote actions for resources running on the guest node
* are implied by the container stopping. This is similar to
@@ -372,8 +367,7 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* stopped (otherwise we re-introduce an ordering loop when the
* connection is restarting).
*/
- order_action_then_stop(action, remote_rsc, pe_order_none,
- data_set);
+ order_action_then_stop(action, remote_rsc, pcmk__ar_none);
}
break;
@@ -384,13 +378,12 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
- if(task != no_action) {
+ if (task != pcmk_action_unspecified) {
order_start_then_action(remote_rsc, action,
- pe_order_implies_then, data_set);
+ pcmk__ar_first_implies_then);
}
} else {
- order_start_then_action(remote_rsc, action, pe_order_none,
- data_set);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
}
break;
}
@@ -400,20 +393,20 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* \internal
* \brief Order all relevant actions relative to remote connection actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
+pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
return;
}
crm_trace("Creating remote connection orderings");
- for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
- pe_resource_t *remote = NULL;
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
+ pcmk_resource_t *remote = NULL;
// We are only interested in resource actions
if (action->rsc == NULL) {
@@ -425,16 +418,18 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* any start of the resource in this transition.
*/
if (action->rsc->is_remote_node &&
- pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
+ pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
+ pcmk__str_none)) {
pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
- pcmk__op_key(action->rsc->id, RSC_START, 0),
- NULL, pe_order_optional, data_set);
+ pcmk__op_key(action->rsc->id, PCMK_ACTION_START,
+ 0),
+ NULL, pcmk__ar_ordered, scheduler);
continue;
}
- // We are only interested in actions allocated to a node
+ // We are only interested in actions assigned to a node
if (action->node == NULL) {
continue;
}
@@ -449,7 +444,7 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* real actions and vice versa later in update_actions() at the end of
* pcmk__apply_orderings().
*/
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
continue;
}
@@ -464,16 +459,17 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* remote connection. This ensures that if the connection fails to
* start, we leave the resource running on the original node.
*/
- if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
for (GList *item = action->rsc->actions; item != NULL;
item = item->next) {
- pe_action_t *rsc_action = item->data;
+ pcmk_action_t *rsc_action = item->data;
- if ((rsc_action->node->details != action->node->details)
- && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
+ if (!pe__same_node(rsc_action->node, action->node)
+ && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
pcmk__new_ordering(remote, start_key(remote), NULL,
action->rsc, NULL, rsc_action,
- pe_order_optional, data_set);
+ pcmk__ar_ordered, scheduler);
}
}
}
@@ -489,7 +485,7 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
*/
if (remote->container) {
crm_trace("Container ordering for %s", action->uuid);
- apply_container_ordering(action, data_set);
+ apply_container_ordering(action);
} else {
crm_trace("Remote ordering for %s", action->uuid);
@@ -507,7 +503,7 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* \return true if \p node is a failed remote node, false otherwise
*/
bool
-pcmk__is_failed_remote_node(const pe_node_t *node)
+pcmk__is_failed_remote_node(const pcmk_node_t *node)
{
return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
&& (get_remote_node_state(node) == remote_state_failed);
@@ -524,7 +520,8 @@ pcmk__is_failed_remote_node(const pe_node_t *node)
* resource, otherwise false
*/
bool
-pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node)
+pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node)
{
return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
&& (node->details->remote_rsc != NULL)
@@ -545,15 +542,15 @@ pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node)
* \return Connection host that action should be routed through if remote,
* otherwise NULL
*/
-pe_node_t *
-pcmk__connection_host_for_action(const pe_action_t *action)
+pcmk_node_t *
+pcmk__connection_host_for_action(const pcmk_action_t *action)
{
- pe_node_t *began_on = NULL;
- pe_node_t *ended_on = NULL;
+ pcmk_node_t *began_on = NULL;
+ pcmk_node_t *ended_on = NULL;
bool partial_migration = false;
const char *task = action->task;
- if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
|| !pe__is_guest_or_remote_node(action->node)) {
return NULL;
}
@@ -586,7 +583,7 @@ pcmk__connection_host_for_action(const pe_action_t *action)
return began_on;
}
- if (began_on->details == ended_on->details) {
+ if (pe__same_node(began_on, ended_on)) {
crm_trace("Routing %s for %s through remote connection's "
"current node %s (not moving)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
@@ -602,7 +599,7 @@ pcmk__connection_host_for_action(const pe_action_t *action)
* on.
*/
- if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
task = g_hash_table_lookup(action->meta, "notify_operation");
}
@@ -618,8 +615,10 @@ pcmk__connection_host_for_action(const pe_action_t *action)
* the connection's pseudo-start on the migration target, so the target is
* the router node.
*/
- if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
- "migrate_to", NULL) && !partial_migration) {
+ if (pcmk__strcase_any_of(task, PCMK_ACTION_CANCEL, PCMK_ACTION_STOP,
+ PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_MIGRATE_TO, NULL)
+ && !partial_migration) {
crm_trace("Routing %s for %s through remote connection's "
"current node %s (moving)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
@@ -653,7 +652,7 @@ pcmk__connection_host_for_action(const pe_action_t *action)
* \param[in,out] params Resource parameters evaluated per node
*/
void
-pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
+pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
{
const char *remote_addr = g_hash_table_lookup(params,
XML_RSC_ATTR_REMOTE_RA_ADDR);
@@ -681,36 +680,37 @@ pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
* \param[in] action Action to check
*/
void
-pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action)
+pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
{
- const pe_node_t *host = NULL;
+ const pcmk_node_t *guest = action->node;
+ const pcmk_node_t *host = NULL;
enum action_tasks task;
- if (!pe__is_guest_node(action->node)) {
+ if (!pe__is_guest_node(guest)) {
return;
}
task = text2task(action->task);
- if ((task == action_notify) || (task == action_notified)) {
+ if ((task == pcmk_action_notify) || (task == pcmk_action_notified)) {
task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
}
switch (task) {
- case stop_rsc:
- case stopped_rsc:
- case action_demote:
- case action_demoted:
+ case pcmk_action_stop:
+ case pcmk_action_stopped:
+ case pcmk_action_demote:
+ case pcmk_action_demoted:
// "Down" actions take place on guest's current host
- host = pe__current_node(action->node->details->remote_rsc->container);
+ host = pe__current_node(guest->details->remote_rsc->container);
break;
- case start_rsc:
- case started_rsc:
- case monitor_rsc:
- case action_promote:
- case action_promoted:
+ case pcmk_action_start:
+ case pcmk_action_started:
+ case pcmk_action_monitor:
+ case pcmk_action_promote:
+ case pcmk_action_promoted:
// "Up" actions take place on guest's next host
- host = action->node->details->remote_rsc->container->allocated_to;
+ host = guest->details->remote_rsc->container->allocated_to;
break;
default:
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index b855499..908c434 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -16,8 +16,8 @@
#include "libpacemaker_private.h"
-// Resource allocation methods that vary by resource variant
-static resource_alloc_functions_t allocation_methods[] = {
+// Resource assignment methods by resource variant
+static pcmk_assignment_methods_t assignment_methods[] = {
{
pcmk__primitive_assign,
pcmk__primitive_create_actions,
@@ -58,25 +58,25 @@ static resource_alloc_functions_t allocation_methods[] = {
},
{
pcmk__clone_assign,
- clone_create_actions,
- clone_create_probe,
- clone_internal_constraints,
+ pcmk__clone_create_actions,
+ pcmk__clone_create_probe,
+ pcmk__clone_internal_constraints,
pcmk__clone_apply_coloc_score,
pcmk__colocated_resources,
pcmk__with_clone_colocations,
pcmk__clone_with_colocations,
pcmk__add_colocated_node_scores,
- clone_rsc_location,
- clone_action_flags,
+ pcmk__clone_apply_location,
+ pcmk__clone_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_resource_actions,
- clone_expand,
- clone_append_meta,
+ pcmk__clone_add_actions_to_graph,
+ pcmk__clone_add_graph_meta,
pcmk__clone_add_utilization,
pcmk__clone_shutdown_lock,
},
{
- pcmk__bundle_allocate,
+ pcmk__bundle_assign,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
@@ -85,11 +85,11 @@ static resource_alloc_functions_t allocation_methods[] = {
pcmk__with_bundle_colocations,
pcmk__bundle_with_colocations,
pcmk__add_colocated_node_scores,
- pcmk__bundle_rsc_location,
+ pcmk__bundle_apply_location,
pcmk__bundle_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_bundle_actions,
- pcmk__bundle_expand,
+ pcmk__bundle_add_actions_to_graph,
pcmk__noop_add_graph_meta,
pcmk__bundle_add_utilization,
pcmk__bundle_shutdown_lock,
@@ -108,7 +108,7 @@ static resource_alloc_functions_t allocation_methods[] = {
* \return true if agent for \p rsc changed, otherwise false
*/
bool
-pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
+pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *rsc_entry, bool active_on_node)
{
bool changed = false;
@@ -136,9 +136,9 @@ pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
}
if (changed && active_on_node) {
// Make sure the resource is restarted
- custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
+ custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->cluster);
- pe__set_resource_flags(rsc, pe_rsc_start_pending);
+ pe__set_resource_flags(rsc, pcmk_rsc_start_pending);
}
return changed;
}
@@ -154,14 +154,14 @@ pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
* \return (Possibly new) head of list
*/
static GList *
-add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
+add_rsc_if_matching(GList *result, pcmk_resource_t *rsc, const char *id)
{
if ((strcmp(rsc->id, id) == 0)
|| ((rsc->clone_name != NULL) && (strcmp(rsc->clone_name, id) == 0))) {
result = g_list_prepend(result, rsc);
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
result = add_rsc_if_matching(result, child, id);
}
@@ -172,55 +172,75 @@ add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
* \internal
* \brief Find all resources matching a given ID by either ID or clone name
*
- * \param[in] id Resource ID to check
- * \param[in] data_set Cluster working set
+ * \param[in] id Resource ID to check
+ * \param[in] scheduler Scheduler data
*
* \return List of all resources that match \p id
* \note The caller is responsible for freeing the return value with
* g_list_free().
*/
GList *
-pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set)
+pcmk__rscs_matching_id(const char *id, const pcmk_scheduler_t *scheduler)
{
GList *result = NULL;
- CRM_CHECK((id != NULL) && (data_set != NULL), return NULL);
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- result = add_rsc_if_matching(result, (pe_resource_t *) iter->data, id);
+ CRM_CHECK((id != NULL) && (scheduler != NULL), return NULL);
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ result = add_rsc_if_matching(result, (pcmk_resource_t *) iter->data,
+ id);
}
return result;
}
/*!
* \internal
- * \brief Set the variant-appropriate allocation methods for a resource
+ * \brief Set the variant-appropriate assignment methods for a resource
*
- * \param[in,out] rsc Resource to set allocation methods for
- * \param[in] ignored Here so function can be used with g_list_foreach()
+ * \param[in,out] data Resource to set assignment methods for
+ * \param[in] user_data Ignored
*/
static void
-set_allocation_methods_for_rsc(pe_resource_t *rsc, void *ignored)
+set_assignment_methods_for_rsc(gpointer data, gpointer user_data)
{
- rsc->cmds = &allocation_methods[rsc->variant];
- g_list_foreach(rsc->children, (GFunc) set_allocation_methods_for_rsc, NULL);
+ pcmk_resource_t *rsc = data;
+
+ rsc->cmds = &assignment_methods[rsc->variant];
+ g_list_foreach(rsc->children, set_assignment_methods_for_rsc, NULL);
}
/*!
* \internal
- * \brief Set the variant-appropriate allocation methods for all resources
+ * \brief Set the variant-appropriate assignment methods for all resources
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__set_allocation_methods(pe_working_set_t *data_set)
+pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
+{
+ g_list_foreach(scheduler->resources, set_assignment_methods_for_rsc, NULL);
+}
+
+/*!
+ * \internal
+ * \brief Wrapper for colocated_resources() method for readability
+ *
+ * \param[in] rsc Resource to add to colocated list
+ * \param[in] orig_rsc Resource originally requested
+ * \param[in,out] list Pointer to list to add to
+ *
+ * \return (Possibly new) head of list
+ */
+static inline void
+add_colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- g_list_foreach(data_set->resources, (GFunc) set_allocation_methods_for_rsc,
- NULL);
+ *list = rsc->cmds->colocated_resources(rsc, orig_rsc, *list);
}
-// Shared implementation of resource_alloc_functions_t:colocated_resources()
+// Shared implementation of pcmk_assignment_methods_t:colocated_resources()
GList *
-pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc,
+pcmk__colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs)
{
const GList *iter = NULL;
@@ -242,7 +262,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
colocations = pcmk__this_with_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
- const pe_resource_t *primary = constraint->primary;
+ const pcmk_resource_t *primary = constraint->primary;
if (primary == orig_rsc) {
continue; // Break colocation loop
@@ -251,10 +271,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(rsc, primary, constraint,
true) == pcmk__coloc_affects_location)) {
-
- colocated_rscs = primary->cmds->colocated_resources(primary,
- orig_rsc,
- colocated_rscs);
+ add_colocated_resources(primary, orig_rsc, &colocated_rscs);
}
}
g_list_free(colocations);
@@ -263,7 +280,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
colocations = pcmk__with_this_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
- const pe_resource_t *dependent = constraint->dependent;
+ const pcmk_resource_t *dependent = constraint->dependent;
if (dependent == orig_rsc) {
continue; // Break colocation loop
@@ -276,10 +293,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(dependent, rsc, constraint,
true) == pcmk__coloc_affects_location)) {
-
- colocated_rscs = dependent->cmds->colocated_resources(dependent,
- orig_rsc,
- colocated_rscs);
+ add_colocated_resources(dependent, orig_rsc, &colocated_rscs);
}
}
g_list_free(colocations);
@@ -289,21 +303,29 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
// No-op function for variants that don't need to implement add_graph_meta()
void
-pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
+pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
}
+/*!
+ * \internal
+ * \brief Output a summary of scheduled actions for a resource
+ *
+ * \param[in,out] rsc Resource to output actions for
+ */
void
-pcmk__output_resource_actions(pe_resource_t *rsc)
+pcmk__output_resource_actions(pcmk_resource_t *rsc)
{
- pcmk__output_t *out = rsc->cluster->priv;
+ pcmk_node_t *next = NULL;
+ pcmk_node_t *current = NULL;
+ pcmk__output_t *out = NULL;
- pe_node_t *next = NULL;
- pe_node_t *current = NULL;
+ CRM_ASSERT(rsc != NULL);
+ out = rsc->cluster->priv;
if (rsc->children != NULL) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
child->cmds->output_actions(child);
}
@@ -313,15 +335,15 @@ pcmk__output_resource_actions(pe_resource_t *rsc)
next = rsc->allocated_to;
if (rsc->running_on) {
current = pe__current_node(rsc);
- if (rsc->role == RSC_ROLE_STOPPED) {
+ if (rsc->role == pcmk_role_stopped) {
/* This can occur when resources are being recovered because
* the current role can change in pcmk__primitive_create_actions()
*/
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
- if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((current == NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
/* Don't log stopped orphans */
return;
}
@@ -331,175 +353,207 @@ pcmk__output_resource_actions(pe_resource_t *rsc)
/*!
* \internal
- * \brief Assign a specified primitive resource to a node
+ * \brief Add a resource to a node's list of assigned resources
+ *
+ * \param[in,out] node Node to add resource to
+ * \param[in] rsc Resource to add
+ */
+static inline void
+add_assigned_resource(pcmk_node_t *node, pcmk_resource_t *rsc)
+{
+ node->details->allocated_rsc = g_list_prepend(node->details->allocated_rsc,
+ rsc);
+}
+
+/*!
+ * \internal
+ * \brief Assign a specified resource (of any variant) to a node
+ *
+ * Assign a specified resource and its children (if any) to a specified node, if
+ * the node can run the resource (or unconditionally, if \p force is true). Mark
+ * the resources as no longer provisional.
*
- * Assign a specified primitive resource to a specified node, if the node can
- * run the resource (or unconditionally, if \p force is true). Mark the resource
- * as no longer provisional. If the primitive can't be assigned (or \p chosen is
- * NULL), unassign any previous assignment for it, set its next role to stopped,
- * and update any existing actions scheduled for it. This is not done
- * recursively for children, so it should be called only for primitives.
+ * If a resource can't be assigned (or \p node is \c NULL), unassign any
+ * previous assignment. If \p stop_if_fail is \c true, set next role to stopped
+ * and update any existing actions scheduled for the resource.
*
- * \param[in,out] rsc Resource to assign
- * \param[in,out] chosen Node to assign \p rsc to
- * \param[in] force If true, assign to \p chosen even if unavailable
+ * \param[in,out] rsc Resource to assign
+ * \param[in,out] node Node to assign \p rsc to
+ * \param[in] force If true, assign to \p node even if unavailable
+ * \param[in] stop_if_fail If \c true and either \p rsc can't be assigned
+ * or \p chosen is \c NULL, set next role to
+ * stopped and update existing actions (if \p rsc
+ * is not a primitive, this applies to its
+ * primitive descendants instead)
*
- * \return true if \p rsc could be assigned, otherwise false
+ * \return \c true if the assignment of \p rsc changed, or \c false otherwise
*
* \note Assigning a resource to the NULL node using this function is different
- * from calling pcmk__unassign_resource(), in that it will also update any
+ * from calling pcmk__unassign_resource(), in that it may also update any
* actions created for the resource.
+ * \note The \c pcmk_assignment_methods_t:assign() method is preferred, unless
+ * a resource should be assigned to the \c NULL node or every resource in
+ * a tree should be assigned to the same node.
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
bool
-pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force)
+pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
+ bool stop_if_fail)
{
- pcmk__output_t *out = rsc->cluster->priv;
-
- CRM_ASSERT(rsc->variant == pe_native);
-
- if (!force && (chosen != NULL)) {
- if ((chosen->weight < 0)
- // Allow the graph to assume that guest node connections will come up
- || (!pcmk__node_available(chosen, true, false)
- && !pe__is_guest_node(chosen))) {
-
- crm_debug("All nodes for resource %s are unavailable, unclean or "
- "shutting down (%s can%s run resources, with weight %d)",
- rsc->id, pe__node_name(chosen),
- (pcmk__node_available(chosen, true, false)? "" : "not"),
- chosen->weight);
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability");
- chosen = NULL;
+ bool changed = false;
+
+ CRM_ASSERT(rsc != NULL);
+
+ if (rsc->children != NULL) {
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child_rsc = iter->data;
+
+ changed |= pcmk__assign_resource(child_rsc, node, force,
+ stop_if_fail);
}
+ return changed;
}
+ // Assigning a primitive
+
+ if (!force && (node != NULL)
+ && ((node->weight < 0)
+ // Allow graph to assume that guest node connections will come up
+ || (!pcmk__node_available(node, true, false)
+ && !pe__is_guest_node(node)))) {
+
+ pe_rsc_debug(rsc,
+ "All nodes for resource %s are unavailable, unclean or "
+ "shutting down (%s can%s run resources, with score %s)",
+ rsc->id, pe__node_name(node),
+ (pcmk__node_available(node, true, false)? "" : "not"),
+ pcmk_readable_score(node->weight));
+
+ if (stop_if_fail) {
+ pe__set_next_role(rsc, pcmk_role_stopped, "node availability");
+ }
+ node = NULL;
+ }
+
+ if (rsc->allocated_to != NULL) {
+ changed = !pe__same_node(rsc->allocated_to, node);
+ } else {
+ changed = (node != NULL);
+ }
pcmk__unassign_resource(rsc);
- pe__clear_resource_flags(rsc, pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unassigned);
- if (chosen == NULL) {
- crm_debug("Could not allocate a node for %s", rsc->id);
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate");
+ if (node == NULL) {
+ char *rc_stopped = NULL;
+
+ pe_rsc_debug(rsc, "Could not assign %s to a node", rsc->id);
+
+ if (!stop_if_fail) {
+ return changed;
+ }
+ pe__set_next_role(rsc, pcmk_role_stopped, "unable to assign");
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *op = (pe_action_t *) iter->data;
+ pcmk_action_t *op = (pcmk_action_t *) iter->data;
- crm_debug("Updating %s for allocation failure", op->uuid);
+ pe_rsc_debug(rsc, "Updating %s for %s assignment failure",
+ op->uuid, rsc->id);
- if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) {
- pe__clear_action_flags(op, pe_action_optional);
+ if (pcmk__str_eq(op->task, PCMK_ACTION_STOP, pcmk__str_none)) {
+ pe__clear_action_flags(op, pcmk_action_optional);
- } else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) {
- pe__clear_action_flags(op, pe_action_runnable);
- //pe__set_resource_flags(rsc, pe_rsc_block);
+ } else if (pcmk__str_eq(op->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
+ pe__clear_action_flags(op, pcmk_action_runnable);
} else {
// Cancel recurring actions, unless for stopped state
const char *interval_ms_s = NULL;
const char *target_rc_s = NULL;
- char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
interval_ms_s = g_hash_table_lookup(op->meta,
XML_LRM_ATTR_INTERVAL_MS);
target_rc_s = g_hash_table_lookup(op->meta,
XML_ATTR_TE_TARGET_RC);
- if ((interval_ms_s != NULL)
- && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none)
+ if (rc_stopped == NULL) {
+ rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
+ }
+
+ if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
&& !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
- pe__clear_action_flags(op, pe_action_runnable);
+
+ pe__clear_action_flags(op, pcmk_action_runnable);
}
- free(rc_stopped);
}
}
- return false;
- }
-
- crm_debug("Assigning %s to %s", rsc->id, pe__node_name(chosen));
- rsc->allocated_to = pe__copy_node(chosen);
-
- chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc,
- rsc);
- chosen->details->num_resources++;
- chosen->count++;
- pcmk__consume_node_capacity(chosen->details->utilization, rsc);
-
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) {
- out->message(out, "resource-util", rsc, chosen, __func__);
+ free(rc_stopped);
+ return changed;
}
- return true;
-}
-/*!
- * \internal
- * \brief Assign a specified resource (of any variant) to a node
- *
- * Assign a specified resource and its children (if any) to a specified node, if
- * the node can run the resource (or unconditionally, if \p force is true). Mark
- * the resources as no longer provisional. If the resources can't be assigned
- * (or \p chosen is NULL), unassign any previous assignments, set next role to
- * stopped, and update any existing actions scheduled for them.
- *
- * \param[in,out] rsc Resource to assign
- * \param[in,out] chosen Node to assign \p rsc to
- * \param[in] force If true, assign to \p chosen even if unavailable
- *
- * \return true if \p rsc could be assigned, otherwise false
- *
- * \note Assigning a resource to the NULL node using this function is different
- * from calling pcmk__unassign_resource(), in that it will also update any
- * actions created for the resource.
- */
-bool
-pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force)
-{
- bool changed = false;
+ pe_rsc_debug(rsc, "Assigning %s to %s", rsc->id, pe__node_name(node));
+ rsc->allocated_to = pe__copy_node(node);
- if (rsc->children == NULL) {
- if (rsc->allocated_to != NULL) {
- changed = true;
- }
- pcmk__finalize_assignment(rsc, node, force);
+ add_assigned_resource(node, rsc);
+ node->details->num_resources++;
+ node->count++;
+ pcmk__consume_node_capacity(node->details->utilization, rsc);
- } else {
- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_show_utilization)) {
+ pcmk__output_t *out = rsc->cluster->priv;
- changed |= pcmk__assign_resource(child_rsc, node, force);
- }
+ out->message(out, "resource-util", rsc, node, __func__);
}
return changed;
}
/*!
* \internal
- * \brief Remove any assignment of a specified resource to a node
+ * \brief Remove any node assignment from a specified resource and its children
*
* If a specified resource has been assigned to a node, remove that assignment
- * and mark the resource as provisional again. This is not done recursively for
- * children, so it should be called only for primitives.
+ * and mark the resource as provisional again.
*
* \param[in,out] rsc Resource to unassign
+ *
+ * \note This function is called recursively on \p rsc and its children.
*/
void
-pcmk__unassign_resource(pe_resource_t *rsc)
+pcmk__unassign_resource(pcmk_resource_t *rsc)
{
- pe_node_t *old = rsc->allocated_to;
+ pcmk_node_t *old = rsc->allocated_to;
if (old == NULL) {
- return;
+ crm_info("Unassigning %s", rsc->id);
+ } else {
+ crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old));
}
- crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old));
- pe__set_resource_flags(rsc, pe_rsc_provisional);
- rsc->allocated_to = NULL;
+ pe__set_resource_flags(rsc, pcmk_rsc_unassigned);
- /* We're going to free the pe_node_t, but its details member is shared and
- * will remain, so update that appropriately first.
- */
- old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
- rsc);
- old->details->num_resources--;
- pcmk__release_node_capacity(old->details->utilization, rsc);
- free(old);
+ if (rsc->children == NULL) {
+ if (old == NULL) {
+ return;
+ }
+ rsc->allocated_to = NULL;
+
+ /* We're going to free the pcmk_node_t, but its details member is shared
+ * and will remain, so update that appropriately first.
+ */
+ old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
+ rsc);
+ old->details->num_resources--;
+ pcmk__release_node_capacity(old->details->utilization, rsc);
+ free(old);
+ return;
+ }
+
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk__unassign_resource((pcmk_resource_t *) iter->data);
+ }
}
/*!
@@ -514,11 +568,11 @@ pcmk__unassign_resource(pe_resource_t *rsc)
* \return true if the migration threshold has been reached, false otherwise
*/
bool
-pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
- pe_resource_t **failed)
+pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_resource_t **failed)
{
int fail_count, remaining_tries;
- pe_resource_t *rsc_to_ban = rsc;
+ pcmk_resource_t *rsc_to_ban = rsc;
// Migration threshold of 0 means never force away
if (rsc->migration_threshold == 0) {
@@ -526,19 +580,19 @@ pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
}
// If we're ignoring failures, also ignore the migration threshold
- if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
return false;
}
// If there are no failures, there's no need to force away
fail_count = pe_get_failcount(node, rsc, NULL,
- pe_fc_effective|pe_fc_fillers, NULL);
+ pcmk__fc_effective|pcmk__fc_fillers, NULL);
if (fail_count <= 0) {
return false;
}
// If failed resource is anonymous clone instance, we'll force clone away
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
rsc_to_ban = uber_parent(rsc);
}
@@ -564,69 +618,66 @@ pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
return false;
}
-static void *
-convert_const_pointer(const void *ptr)
-{
- /* Worst function ever */
- return (void *)ptr;
-}
-
/*!
* \internal
- * \brief Get a node's weight
+ * \brief Get a node's score
*
- * \param[in] node Unweighted node to check (for node ID)
- * \param[in] nodes List of weighted nodes to look for \p node in
+ * \param[in] node Node with ID to check
+ * \param[in] nodes List of nodes to look for \p node score in
*
- * \return Node's weight, or -INFINITY if not found
+ * \return Node's score, or -INFINITY if not found
*/
static int
-get_node_weight(const pe_node_t *node, GHashTable *nodes)
+get_node_score(const pcmk_node_t *node, GHashTable *nodes)
{
- pe_node_t *weighted_node = NULL;
+ pcmk_node_t *found_node = NULL;
if ((node != NULL) && (nodes != NULL)) {
- weighted_node = g_hash_table_lookup(nodes, node->details->id);
+ found_node = g_hash_table_lookup(nodes, node->details->id);
}
- return (weighted_node == NULL)? -INFINITY : weighted_node->weight;
+ return (found_node == NULL)? -INFINITY : found_node->weight;
}
/*!
* \internal
- * \brief Compare two resources according to which should be allocated first
+ * \brief Compare two resources according to which should be assigned first
*
* \param[in] a First resource to compare
* \param[in] b Second resource to compare
* \param[in] data Sorted list of all nodes in cluster
*
- * \return -1 if \p a should be allocated before \b, 0 if they are equal,
- * or +1 if \p a should be allocated after \b
+ * \return -1 if \p a should be assigned before \b, 0 if they are equal,
+ * or +1 if \p a should be assigned after \b
*/
static gint
cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
{
- const pe_resource_t *resource1 = a;
- const pe_resource_t *resource2 = b;
- const GList *nodes = (const GList *) data;
+ /* GLib insists that this function require gconstpointer arguments, but we
+ * make a small, temporary change to each argument (setting the
+ * pe_rsc_merging flag) during comparison
+ */
+ pcmk_resource_t *resource1 = (pcmk_resource_t *) a;
+ pcmk_resource_t *resource2 = (pcmk_resource_t *) b;
+ const GList *nodes = data;
int rc = 0;
- int r1_weight = -INFINITY;
- int r2_weight = -INFINITY;
- pe_node_t *r1_node = NULL;
- pe_node_t *r2_node = NULL;
+ int r1_score = -INFINITY;
+ int r2_score = -INFINITY;
+ pcmk_node_t *r1_node = NULL;
+ pcmk_node_t *r2_node = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
const char *reason = NULL;
- // Resources with highest priority should be allocated first
+ // Resources with highest priority should be assigned first
reason = "priority";
- r1_weight = resource1->priority;
- r2_weight = resource2->priority;
- if (r1_weight > r2_weight) {
+ r1_score = resource1->priority;
+ r2_score = resource2->priority;
+ if (r1_score > r2_score) {
rc = -1;
goto done;
}
- if (r1_weight < r2_weight) {
+ if (r1_score < r2_score) {
rc = 1;
goto done;
}
@@ -637,17 +688,17 @@ cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
goto done;
}
- // Calculate and log node weights
- resource1->cmds->add_colocated_node_scores(convert_const_pointer(resource1),
- resource1->id, &r1_nodes, NULL,
- 1, pcmk__coloc_select_this_with);
- resource2->cmds->add_colocated_node_scores(convert_const_pointer(resource2),
- resource2->id, &r2_nodes, NULL,
- 1, pcmk__coloc_select_this_with);
- pe__show_node_weights(true, NULL, resource1->id, r1_nodes,
- resource1->cluster);
- pe__show_node_weights(true, NULL, resource2->id, r2_nodes,
- resource2->cluster);
+ // Calculate and log node scores
+ resource1->cmds->add_colocated_node_scores(resource1, NULL, resource1->id,
+ &r1_nodes, NULL, 1,
+ pcmk__coloc_select_this_with);
+ resource2->cmds->add_colocated_node_scores(resource2, NULL, resource2->id,
+ &r2_nodes, NULL, 1,
+ pcmk__coloc_select_this_with);
+ pe__show_node_scores(true, NULL, resource1->id, r1_nodes,
+ resource1->cluster);
+ pe__show_node_scores(true, NULL, resource2->id, r2_nodes,
+ resource2->cluster);
// The resource with highest score on its current node goes first
reason = "current location";
@@ -657,29 +708,29 @@ cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
if (resource2->running_on != NULL) {
r2_node = pe__current_node(resource2);
}
- r1_weight = get_node_weight(r1_node, r1_nodes);
- r2_weight = get_node_weight(r2_node, r2_nodes);
- if (r1_weight > r2_weight) {
+ r1_score = get_node_score(r1_node, r1_nodes);
+ r2_score = get_node_score(r2_node, r2_nodes);
+ if (r1_score > r2_score) {
rc = -1;
goto done;
}
- if (r1_weight < r2_weight) {
+ if (r1_score < r2_score) {
rc = 1;
goto done;
}
- // Otherwise a higher weight on any node will do
+ // Otherwise a higher score on any node will do
reason = "score";
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- const pe_node_t *node = (const pe_node_t *) iter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
- r1_weight = get_node_weight(node, r1_nodes);
- r2_weight = get_node_weight(node, r2_nodes);
- if (r1_weight > r2_weight) {
+ r1_score = get_node_score(node, r1_nodes);
+ r2_score = get_node_score(node, r2_nodes);
+ if (r1_score > r2_score) {
rc = -1;
goto done;
}
- if (r1_weight < r2_weight) {
+ if (r1_score < r2_score) {
rc = 1;
goto done;
}
@@ -687,11 +738,11 @@ cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
done:
crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
- resource1->id, r1_weight,
+ resource1->id, r1_score,
((r1_node == NULL)? "" : " on "),
((r1_node == NULL)? "" : r1_node->details->id),
((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
- resource2->id, r2_weight,
+ resource2->id, r2_score,
((r2_node == NULL)? "" : " on "),
((r2_node == NULL)? "" : r2_node->details->id),
reason);
@@ -706,17 +757,17 @@ done:
/*!
* \internal
- * \brief Sort resources in the order they should be allocated to nodes
+ * \brief Sort resources in the order they should be assigned to nodes
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__sort_resources(pe_working_set_t *data_set)
+pcmk__sort_resources(pcmk_scheduler_t *scheduler)
{
- GList *nodes = g_list_copy(data_set->nodes);
+ GList *nodes = g_list_copy(scheduler->nodes);
nodes = pcmk__sort_nodes(nodes, NULL);
- data_set->resources = g_list_sort_with_data(data_set->resources,
- cmp_resources, nodes);
+ scheduler->resources = g_list_sort_with_data(scheduler->resources,
+ cmp_resources, nodes);
g_list_free(nodes);
}
diff --git a/lib/pacemaker/pcmk_sched_tickets.c b/lib/pacemaker/pcmk_sched_tickets.c
index 30206d7..f61b371 100644
--- a/lib/pacemaker/pcmk_sched_tickets.c
+++ b/lib/pacemaker/pcmk_sched_tickets.c
@@ -13,6 +13,7 @@
#include <glib.h>
#include <crm/crm.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
@@ -27,8 +28,8 @@ enum loss_ticket_policy {
typedef struct {
const char *id;
- pe_resource_t *rsc;
- pe_ticket_t *ticket;
+ pcmk_resource_t *rsc;
+ pcmk_ticket_t *ticket;
enum loss_ticket_policy loss_policy;
int role;
} rsc_ticket_t;
@@ -43,9 +44,9 @@ typedef struct {
* constraint's, otherwise false
*/
static bool
-ticket_role_matches(const pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
+ticket_role_matches(const pcmk_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
{
- if ((rsc_ticket->role == RSC_ROLE_UNKNOWN)
+ if ((rsc_ticket->role == pcmk_role_unknown)
|| (rsc_ticket->role == rsc->role)) {
return true;
}
@@ -59,13 +60,11 @@ ticket_role_matches(const pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
*
* \param[in,out] rsc Resource affected by ticket
* \param[in] rsc_ticket Ticket
- * \param[in,out] data_set Cluster working set
*/
static void
-constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
- pe_working_set_t *data_set)
+constraints_for_ticket(pcmk_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
CRM_CHECK((rsc != NULL) && (rsc_ticket != NULL), return);
@@ -75,9 +74,8 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
if (rsc->children) {
pe_rsc_trace(rsc, "Processing ticket dependencies from %s", rsc->id);
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- constraints_for_ticket((pe_resource_t *) gIter->data, rsc_ticket,
- data_set);
+ for (iter = rsc->children; iter != NULL; iter = iter->next) {
+ constraints_for_ticket((pcmk_resource_t *) iter->data, rsc_ticket);
}
return;
}
@@ -91,14 +89,14 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
switch (rsc_ticket->loss_policy) {
case loss_ticket_stop:
resource_location(rsc, NULL, -INFINITY, "__loss_of_ticket__",
- data_set);
+ rsc->cluster);
break;
case loss_ticket_demote:
// Promotion score will be set to -INFINITY in promotion_order()
- if (rsc_ticket->role != RSC_ROLE_PROMOTED) {
+ if (rsc_ticket->role != pcmk_role_promoted) {
resource_location(rsc, NULL, -INFINITY,
- "__loss_of_ticket__", data_set);
+ "__loss_of_ticket__", rsc->cluster);
}
break;
@@ -108,11 +106,10 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
}
resource_location(rsc, NULL, -INFINITY, "__loss_of_ticket__",
- data_set);
+ rsc->cluster);
- for (gIter = rsc->running_on; gIter != NULL;
- gIter = gIter->next) {
- pe_fence_node(data_set, (pe_node_t *) gIter->data,
+ for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
+ pe_fence_node(rsc->cluster, (pcmk_node_t *) iter->data,
"deadman ticket was lost", FALSE);
}
break;
@@ -122,34 +119,33 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
return;
}
if (rsc->running_on != NULL) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
}
break;
}
} else if (!rsc_ticket->ticket->granted) {
- if ((rsc_ticket->role != RSC_ROLE_PROMOTED)
+ if ((rsc_ticket->role != pcmk_role_promoted)
|| (rsc_ticket->loss_policy == loss_ticket_stop)) {
resource_location(rsc, NULL, -INFINITY, "__no_ticket__",
- data_set);
+ rsc->cluster);
}
} else if (rsc_ticket->ticket->standby) {
- if ((rsc_ticket->role != RSC_ROLE_PROMOTED)
+ if ((rsc_ticket->role != pcmk_role_promoted)
|| (rsc_ticket->loss_policy == loss_ticket_stop)) {
resource_location(rsc, NULL, -INFINITY, "__ticket_standby__",
- data_set);
+ rsc->cluster);
}
}
}
static void
-rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
- const char *state, const char *loss_policy,
- pe_working_set_t *data_set)
+rsc_ticket_new(const char *id, pcmk_resource_t *rsc, pcmk_ticket_t *ticket,
+ const char *state, const char *loss_policy)
{
rsc_ticket_t *new_rsc_ticket = NULL;
@@ -164,9 +160,9 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
return;
}
- if (pcmk__str_eq(state, RSC_ROLE_STARTED_S,
+ if (pcmk__str_eq(state, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
- state = RSC_ROLE_UNKNOWN_S;
+ state = PCMK__ROLE_UNKNOWN;
}
new_rsc_ticket->id = id;
@@ -175,7 +171,7 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
new_rsc_ticket->role = text2role(state);
if (pcmk__str_eq(loss_policy, "fence", pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
new_rsc_ticket->loss_policy = loss_ticket_fence;
} else {
pcmk__config_err("Resetting '" XML_TICKET_ATTR_LOSS_POLICY
@@ -196,7 +192,7 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
role2text(new_rsc_ticket->role));
new_rsc_ticket->loss_policy = loss_ticket_freeze;
- } else if (pcmk__str_eq(loss_policy, "demote", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(loss_policy, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
crm_debug("On loss of ticket '%s': Demote %s (%s)",
new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
role2text(new_rsc_ticket->role));
@@ -209,7 +205,7 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
new_rsc_ticket->loss_policy = loss_ticket_stop;
} else {
- if (new_rsc_ticket->role == RSC_ROLE_PROMOTED) {
+ if (new_rsc_ticket->role == pcmk_role_promoted) {
crm_debug("On loss of ticket '%s': Default to demote %s (%s)",
new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
role2text(new_rsc_ticket->role));
@@ -228,18 +224,18 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
rsc->rsc_tickets = g_list_append(rsc->rsc_tickets, new_rsc_ticket);
- data_set->ticket_constraints = g_list_append(data_set->ticket_constraints,
- new_rsc_ticket);
+ rsc->cluster->ticket_constraints = g_list_append(
+ rsc->cluster->ticket_constraints, new_rsc_ticket);
if (!(new_rsc_ticket->ticket->granted) || new_rsc_ticket->ticket->standby) {
- constraints_for_ticket(rsc, new_rsc_ticket, data_set);
+ constraints_for_ticket(rsc, new_rsc_ticket);
}
}
// \return Standard Pacemaker return code
static int
-unpack_rsc_ticket_set(xmlNode *set, pe_ticket_t *ticket,
- const char *loss_policy, pe_working_set_t *data_set)
+unpack_rsc_ticket_set(xmlNode *set, pcmk_ticket_t *ticket,
+ const char *loss_policy, pcmk_scheduler_t *scheduler)
{
const char *set_id = NULL;
const char *role = NULL;
@@ -259,9 +255,9 @@ unpack_rsc_ticket_set(xmlNode *set, pe_ticket_t *ticket,
for (xmlNode *xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *resource = NULL;
- resource = pcmk__find_constraint_resource(data_set->resources,
+ resource = pcmk__find_constraint_resource(scheduler->resources,
ID(xml_rsc));
if (resource == NULL) {
pcmk__config_err("%s: No resource found for %s",
@@ -270,21 +266,21 @@ unpack_rsc_ticket_set(xmlNode *set, pe_ticket_t *ticket,
}
pe_rsc_trace(resource, "Resource '%s' depends on ticket '%s'",
resource->id, ticket->id);
- rsc_ticket_new(set_id, resource, ticket, role, loss_policy, data_set);
+ rsc_ticket_new(set_id, resource, ticket, role, loss_policy);
}
return pcmk_rc_ok;
}
static void
-unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
+unpack_simple_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
const char *loss_policy = crm_element_value(xml_obj,
XML_TICKET_ATTR_LOSS_POLICY);
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
const char *rsc_id = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
const char *state = crm_element_value(xml_obj,
@@ -294,10 +290,10 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
const char *instance = crm_element_value(xml_obj,
XML_COLOC_ATTR_SOURCE_INSTANCE);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
if (instance != NULL) {
- pe_warn_once(pe_wo_coloc_inst,
+ pe_warn_once(pcmk__wo_coloc_inst,
"Support for " XML_COLOC_ATTR_SOURCE_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
@@ -307,7 +303,7 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return;
}
@@ -316,7 +312,7 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
id);
return;
} else {
- ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_str);
}
if (ticket == NULL) {
@@ -329,7 +325,7 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
pcmk__config_err("Ignoring constraint '%s' without resource", id);
return;
} else {
- rsc = pcmk__find_constraint_resource(data_set->resources, rsc_id);
+ rsc = pcmk__find_constraint_resource(scheduler->resources, rsc_id);
}
if (rsc == NULL) {
@@ -354,20 +350,20 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
}
}
- rsc_ticket_new(id, rsc, ticket, state, loss_policy, data_set);
+ rsc_ticket_new(id, rsc, ticket, state, loss_policy);
}
// \return Standard Pacemaker return code
static int
unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *rsc_id = NULL;
const char *state = NULL;
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
xmlNode *rsc_set = NULL;
@@ -378,12 +374,12 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_ticket");
return pcmk_rc_ok;
@@ -394,7 +390,7 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, rsc_id, &rsc, &tag)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, rsc_id, &rsc, &tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, rsc_id);
return pcmk_rc_unpack_error;
@@ -408,9 +404,9 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "rsc" into resource_set under rsc_ticket
+ // Convert any template or tag reference in "rsc" into ticket resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set, XML_COLOC_ATTR_SOURCE,
- false, data_set)) {
+ false, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -432,16 +428,15 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
}
void
-pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
xmlNode *set = NULL;
bool any_sets = false;
const char *id = NULL;
- const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
- const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY);
+ const char *ticket_str = NULL;
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
xmlNode *orig_xml = NULL;
xmlNode *expanded_xml = NULL;
@@ -451,30 +446,31 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return;
}
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
+ ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
if (ticket_str == NULL) {
pcmk__config_err("Ignoring constraint '%s' without ticket", id);
return;
} else {
- ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_str);
}
if (ticket == NULL) {
- ticket = ticket_new(ticket_str, data_set);
+ ticket = ticket_new(ticket_str, scheduler);
if (ticket == NULL) {
return;
}
}
if (unpack_rsc_ticket_tags(xml_obj, &expanded_xml,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
return;
}
if (expanded_xml != NULL) {
@@ -485,11 +481,15 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
set = crm_next_same_xml(set)) {
+ const char *loss_policy = NULL;
+
any_sets = true;
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
+ loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY);
+
if ((set == NULL) // Configuration error, message already logged
|| (unpack_rsc_ticket_set(set, ticket, loss_policy,
- data_set) != pcmk_rc_ok)) {
+ scheduler) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
}
@@ -503,7 +503,7 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if (!any_sets) {
- unpack_simple_rsc_ticket(xml_obj, data_set);
+ unpack_simple_rsc_ticket(xml_obj, scheduler);
}
}
@@ -517,12 +517,12 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
* \param[in,out] rsc Resource to check
*/
void
-pcmk__require_promotion_tickets(pe_resource_t *rsc)
+pcmk__require_promotion_tickets(pcmk_resource_t *rsc)
{
for (GList *item = rsc->rsc_tickets; item != NULL; item = item->next) {
rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) item->data;
- if ((rsc_ticket->role == RSC_ROLE_PROMOTED)
+ if ((rsc_ticket->role == pcmk_role_promoted)
&& (!rsc_ticket->ticket->granted || rsc_ticket->ticket->standby)) {
resource_location(rsc, NULL, -INFINITY,
"__stateful_without_ticket__", rsc->cluster);
diff --git a/lib/pacemaker/pcmk_sched_utilization.c b/lib/pacemaker/pcmk_sched_utilization.c
index 0a4bec3..962a94c 100644
--- a/lib/pacemaker/pcmk_sched_utilization.c
+++ b/lib/pacemaker/pcmk_sched_utilization.c
@@ -13,9 +13,6 @@
#include "libpacemaker_private.h"
-// Name for a pseudo-op to use in ordering constraints for utilization
-#define LOAD_STOPPED "load_stopped"
-
/*!
* \internal
* \brief Get integer utilization from a string
@@ -46,8 +43,8 @@ utilization_value(const char *s)
*/
struct compare_data {
- const pe_node_t *node1;
- const pe_node_t *node2;
+ const pcmk_node_t *node1;
+ const pcmk_node_t *node2;
bool node2_only;
int result;
};
@@ -56,8 +53,8 @@ struct compare_data {
* \internal
* \brief Compare a single utilization attribute for two nodes
*
- * Compare one utilization attribute for two nodes, incrementing the result if
- * the first node has greater capacity, and decrementing it if the second node
+ * Compare one utilization attribute for two nodes, decrementing the result if
+ * the first node has greater capacity, and incrementing it if the second node
* has greater capacity.
*
* \param[in] key Utilization attribute name to compare
@@ -102,7 +99,8 @@ compare_utilization_value(gpointer key, gpointer value, gpointer user_data)
* if node2 has more free capacity
*/
int
-pcmk__compare_node_capacities(const pe_node_t *node1, const pe_node_t *node2)
+pcmk__compare_node_capacities(const pcmk_node_t *node1,
+ const pcmk_node_t *node2)
{
struct compare_data data = {
.node1 = node1,
@@ -167,7 +165,7 @@ update_utilization_value(gpointer key, gpointer value, gpointer user_data)
*/
void
pcmk__consume_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc)
+ const pcmk_resource_t *rsc)
{
struct calculate_data data = {
.current_utilization = current_utilization,
@@ -186,7 +184,7 @@ pcmk__consume_node_capacity(GHashTable *current_utilization,
*/
void
pcmk__release_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc)
+ const pcmk_resource_t *rsc)
{
struct calculate_data data = {
.current_utilization = current_utilization,
@@ -202,7 +200,7 @@ pcmk__release_node_capacity(GHashTable *current_utilization,
*/
struct capacity_data {
- const pe_node_t *node;
+ const pcmk_node_t *node;
const char *rsc_id;
bool is_enough;
};
@@ -248,7 +246,7 @@ check_capacity(gpointer key, gpointer value, gpointer user_data)
* \return true if node has sufficient capacity for resource, otherwise false
*/
static bool
-have_enough_capacity(const pe_node_t *node, const char *rsc_id,
+have_enough_capacity(const pcmk_node_t *node, const char *rsc_id,
GHashTable *utilization)
{
struct capacity_data data = {
@@ -265,7 +263,7 @@ have_enough_capacity(const pe_node_t *node, const char *rsc_id,
* \internal
* \brief Sum the utilization requirements of a list of resources
*
- * \param[in] orig_rsc Resource being allocated (for logging purposes)
+ * \param[in] orig_rsc Resource being assigned (for logging purposes)
* \param[in] rscs Resources whose utilization should be summed
*
* \return Newly allocated hash table with sum of all utilization values
@@ -273,12 +271,12 @@ have_enough_capacity(const pe_node_t *node, const char *rsc_id,
* g_hash_table_destroy().
*/
static GHashTable *
-sum_resource_utilization(const pe_resource_t *orig_rsc, GList *rscs)
+sum_resource_utilization(const pcmk_resource_t *orig_rsc, GList *rscs)
{
GHashTable *utilization = pcmk__strkey_table(free, free);
for (GList *iter = rscs; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->add_utilization(rsc, orig_rsc, rscs, utilization);
}
@@ -294,15 +292,15 @@ sum_resource_utilization(const pe_resource_t *orig_rsc, GList *rscs)
* \return Allowed node for \p rsc with most spare capacity, if there are no
* nodes with enough capacity for \p rsc and all its colocated resources
*/
-const pe_node_t *
-pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
+const pcmk_node_t *
+pcmk__ban_insufficient_capacity(pcmk_resource_t *rsc)
{
bool any_capable = false;
char *rscs_id = NULL;
- pe_node_t *node = NULL;
- const pe_node_t *most_capable_node = NULL;
+ pcmk_node_t *node = NULL;
+ const pcmk_node_t *most_capable_node = NULL;
GList *colocated_rscs = NULL;
- GHashTable *unallocated_utilization = NULL;
+ GHashTable *unassigned_utilization = NULL;
GHashTableIter iter;
CRM_CHECK(rsc != NULL, return NULL);
@@ -326,8 +324,8 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
colocated_rscs = g_list_append(colocated_rscs, rsc);
}
- // Sum utilization of colocated resources that haven't been allocated yet
- unallocated_utilization = sum_resource_utilization(rsc, colocated_rscs);
+ // Sum utilization of colocated resources that haven't been assigned yet
+ unassigned_utilization = sum_resource_utilization(rsc, colocated_rscs);
// Check whether any node has enough capacity for all the resources
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
@@ -336,7 +334,7 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
continue;
}
- if (have_enough_capacity(node, rscs_id, unallocated_utilization)) {
+ if (have_enough_capacity(node, rscs_id, unassigned_utilization)) {
any_capable = true;
}
@@ -353,7 +351,7 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (pcmk__node_available(node, true, false)
&& !have_enough_capacity(node, rscs_id,
- unallocated_utilization)) {
+ unassigned_utilization)) {
pe_rsc_debug(rsc, "%s does not have enough capacity for %s",
pe__node_name(node), rscs_id);
resource_location(rsc, node, -INFINITY, "__limit_utilization__",
@@ -376,12 +374,12 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
}
}
- g_hash_table_destroy(unallocated_utilization);
+ g_hash_table_destroy(unassigned_utilization);
g_list_free(colocated_rscs);
free(rscs_id);
- pe__show_node_weights(true, rsc, "Post-utilization",
- rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(true, rsc, "Post-utilization", rsc->allowed_nodes,
+ rsc->cluster);
return most_capable_node;
}
@@ -389,21 +387,21 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
* \internal
* \brief Create a new load_stopped pseudo-op for a node
*
- * \param[in] node Node to create op for
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] node Node to create op for
*
* \return Newly created load_stopped op
*/
-static pe_action_t *
-new_load_stopped_op(const pe_node_t *node, pe_working_set_t *data_set)
+static pcmk_action_t *
+new_load_stopped_op(pcmk_node_t *node)
{
- char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
+ char *load_stopped_task = crm_strdup_printf(PCMK_ACTION_LOAD_STOPPED "_%s",
node->details->uname);
- pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
+ pcmk_action_t *load_stopped = get_pseudo_op(load_stopped_task,
+ node->details->data_set);
if (load_stopped->node == NULL) {
load_stopped->node = pe__copy_node(node);
- pe__clear_action_flags(load_stopped, pe_action_optional);
+ pe__clear_action_flags(load_stopped, pcmk_action_optional);
}
free(load_stopped_task);
return load_stopped;
@@ -417,33 +415,32 @@ new_load_stopped_op(const pe_node_t *node, pe_working_set_t *data_set)
* \param[in] allowed_nodes List of allowed next nodes for \p rsc
*/
void
-pcmk__create_utilization_constraints(pe_resource_t *rsc,
+pcmk__create_utilization_constraints(pcmk_resource_t *rsc,
const GList *allowed_nodes)
{
const GList *iter = NULL;
- const pe_node_t *node = NULL;
- pe_action_t *load_stopped = NULL;
+ pcmk_action_t *load_stopped = NULL;
pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
rsc->id, rsc->cluster->placement_strategy);
// "stop rsc then load_stopped" constraints for current nodes
for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
- node = (const pe_node_t *) iter->data;
- load_stopped = new_load_stopped_op(node, rsc->cluster);
+ load_stopped = new_load_stopped_op(iter->data);
pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, NULL, load_stopped,
- pe_order_load, rsc->cluster);
+ pcmk__ar_if_on_same_node_or_target, rsc->cluster);
}
// "load_stopped then start/migrate_to rsc" constraints for allowed nodes
for (iter = allowed_nodes; iter; iter = iter->next) {
- node = (const pe_node_t *) iter->data;
- load_stopped = new_load_stopped_op(node, rsc->cluster);
+ load_stopped = new_load_stopped_op(iter->data);
pcmk__new_ordering(NULL, NULL, load_stopped, rsc, start_key(rsc), NULL,
- pe_order_load, rsc->cluster);
+ pcmk__ar_if_on_same_node_or_target, rsc->cluster);
pcmk__new_ordering(NULL, NULL, load_stopped,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
- pe_order_load, rsc->cluster);
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_TO, 0),
+ NULL,
+ pcmk__ar_if_on_same_node_or_target, rsc->cluster);
}
}
@@ -451,18 +448,19 @@ pcmk__create_utilization_constraints(pe_resource_t *rsc,
* \internal
* \brief Output node capacities if enabled
*
- * \param[in] desc Prefix for output
- * \param[in,out] data_set Cluster working set
+ * \param[in] desc Prefix for output
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set)
+pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
return;
}
- for (const GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- const pe_node_t *node = (const pe_node_t *) iter->data;
- pcmk__output_t *out = data_set->priv;
+ for (const GList *iter = scheduler->nodes;
+ iter != NULL; iter = iter->next) {
+ const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
+ pcmk__output_t *out = scheduler->priv;
out->message(out, "node-capacity", node, desc);
}
diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c
index b4e670d..31b2c36 100644
--- a/lib/pacemaker/pcmk_scheduler.c
+++ b/lib/pacemaker/pcmk_scheduler.c
@@ -14,6 +14,7 @@
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#include <glib.h>
@@ -25,7 +26,7 @@ CRM_TRACE_INIT_DATA(pacemaker);
/*!
* \internal
- * \brief Do deferred action checks after allocation
+ * \brief Do deferred action checks after assignment
*
* When unpacking the resource history, the scheduler checks for resource
* configurations that have changed since an action was run. However, at that
@@ -39,30 +40,31 @@ CRM_TRACE_INIT_DATA(pacemaker);
* \param[in] check Type of deferred check to do
*/
static void
-check_params(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_op,
- enum pe_check_parameters check)
+check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
+ enum pcmk__check_parameters check)
{
const char *reason = NULL;
op_digest_cache_t *digest_data = NULL;
switch (check) {
- case pe_check_active:
+ case pcmk__check_active:
if (pcmk__check_action_config(rsc, node, rsc_op)
- && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL)) {
+ && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
+ NULL)) {
reason = "action definition changed";
}
break;
- case pe_check_last_failure:
+ case pcmk__check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
rsc->cluster);
switch (digest_data->rc) {
- case RSC_DIGEST_UNKNOWN:
+ case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s has "
"no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
- case RSC_DIGEST_MATCH:
+ case pcmk__digest_match:
break;
default:
reason = "resource parameters have changed";
@@ -86,9 +88,11 @@ check_params(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_op,
* otherwise false
*/
static bool
-failcount_clear_action_exists(const pe_node_t *node, const pe_resource_t *rsc)
+failcount_clear_action_exists(const pcmk_node_t *node,
+ const pcmk_resource_t *rsc)
{
- GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
+ GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
+ TRUE);
if (list != NULL) {
g_list_free(list);
@@ -101,19 +105,22 @@ failcount_clear_action_exists(const pe_node_t *node, const pe_resource_t *rsc)
* \internal
* \brief Ban a resource from a node if it reached its failure threshold there
*
- * \param[in,out] rsc Resource to check failure threshold for
- * \param[in] node Node to check \p rsc on
+ * \param[in,out] data Resource to check failure threshold for
+ * \param[in] user_data Node to check resource on
*/
static void
-check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node)
+check_failure_threshold(gpointer data, gpointer user_data)
{
+ pcmk_resource_t *rsc = data;
+ const pcmk_node_t *node = user_data;
+
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
- g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
- (gpointer) node);
+ g_list_foreach(rsc->children, check_failure_threshold, user_data);
return;
+ }
- } else if (failcount_clear_action_exists(node, rsc)) {
+ if (!failcount_clear_action_exists(node, rsc)) {
/* Don't force the resource away from this node due to a failcount
* that's going to be cleared.
*
@@ -124,10 +131,7 @@ check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node)
* threshold when we shouldn't. Worst case, we stop or move the
* resource, then move it back in the next transition.
*/
- return;
-
- } else {
- pe_resource_t *failed = NULL;
+ pcmk_resource_t *failed = NULL;
if (pcmk__threshold_reached(rsc, node, &failed)) {
resource_location(failed, node, -INFINITY, "__fail_limit__",
@@ -145,23 +149,25 @@ check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node)
* exclusive, probes will only be done on nodes listed in exclusive constraints.
* This function bans the resource from the node if the node is not listed.
*
- * \param[in,out] rsc Resource to check
- * \param[in] node Node to check \p rsc on
+ * \param[in,out] data Resource to check
+ * \param[in] user_data Node to check resource on
*/
static void
-apply_exclusive_discovery(pe_resource_t *rsc, const pe_node_t *node)
+apply_exclusive_discovery(gpointer data, gpointer user_data)
{
+ pcmk_resource_t *rsc = data;
+ const pcmk_node_t *node = user_data;
+
if (rsc->exclusive_discover
|| pe__const_top_resource(rsc, false)->exclusive_discover) {
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
// If this is a collective resource, apply recursively to children
- g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery,
- (gpointer) node);
+ g_list_foreach(rsc->children, apply_exclusive_discovery, user_data);
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if ((match != NULL)
- && (match->rsc_discover_mode != pe_discover_exclusive)) {
+ && (match->rsc_discover_mode != pcmk_probe_exclusive)) {
match->weight = -INFINITY;
}
}
@@ -171,24 +177,25 @@ apply_exclusive_discovery(pe_resource_t *rsc, const pe_node_t *node)
* \internal
* \brief Apply stickiness to a resource if appropriate
*
- * \param[in,out] rsc Resource to check for stickiness
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] data Resource to check for stickiness
+ * \param[in] user_data Ignored
*/
static void
-apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
+apply_stickiness(gpointer data, gpointer user_data)
{
- pe_node_t *node = NULL;
+ pcmk_resource_t *rsc = data;
+ pcmk_node_t *node = NULL;
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
- g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
+ g_list_foreach(rsc->children, apply_stickiness, NULL);
return;
}
/* A resource is sticky if it is managed, has stickiness configured, and is
* active on a single node.
*/
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)
|| (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
return;
}
@@ -200,9 +207,9 @@ apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
* allowed on the node, so we don't keep the resource somewhere it is no
* longer explicitly enabled.
*/
- if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
- && (pe_hash_table_lookup(rsc->allowed_nodes,
- node->details->id) == NULL)) {
+ if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_symmetric_cluster)
+ && (g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id) == NULL)) {
pe_rsc_debug(rsc,
"Ignoring %s stickiness because the cluster is "
"asymmetric and %s is not explicitly allowed",
@@ -212,23 +219,23 @@ apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s",
rsc->id, rsc->stickiness, pe__node_name(node));
- resource_location(rsc, node, rsc->stickiness, "stickiness", data_set);
+ resource_location(rsc, node, rsc->stickiness, "stickiness", rsc->cluster);
}
/*!
* \internal
* \brief Apply shutdown locks for all resources as appropriate
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-apply_shutdown_locks(pe_working_set_t *data_set)
+apply_shutdown_locks(pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
return;
}
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->shutdown_lock(rsc);
}
@@ -238,25 +245,25 @@ apply_shutdown_locks(pe_working_set_t *data_set)
* \internal
* \brief Calculate the number of available nodes in the cluster
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-count_available_nodes(pe_working_set_t *data_set)
+count_available_nodes(pcmk_scheduler_t *scheduler)
{
- if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_no_compat)) {
return;
}
// @COMPAT for API backward compatibility only (cluster does not use value)
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
if ((node != NULL) && (node->weight >= 0) && node->details->online
&& (node->details->type != node_ping)) {
- data_set->max_valid_nodes++;
+ scheduler->max_valid_nodes++;
}
}
- crm_trace("Online node count: %d", data_set->max_valid_nodes);
+ crm_trace("Online node count: %d", scheduler->max_valid_nodes);
}
/*
@@ -268,112 +275,113 @@ count_available_nodes(pe_working_set_t *data_set)
* migration thresholds, and exclusive resource discovery.
*/
static void
-apply_node_criteria(pe_working_set_t *data_set)
+apply_node_criteria(pcmk_scheduler_t *scheduler)
{
crm_trace("Applying node-specific scheduling criteria");
- apply_shutdown_locks(data_set);
- count_available_nodes(data_set);
- pcmk__apply_locations(data_set);
- g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
+ apply_shutdown_locks(scheduler);
+ count_available_nodes(scheduler);
+ pcmk__apply_locations(scheduler);
+ g_list_foreach(scheduler->resources, apply_stickiness, NULL);
- for (GList *node_iter = data_set->nodes; node_iter != NULL;
+ for (GList *node_iter = scheduler->nodes; node_iter != NULL;
node_iter = node_iter->next) {
- for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
+ for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
rsc_iter = rsc_iter->next) {
- pe_node_t *node = (pe_node_t *) node_iter->data;
- pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
-
- check_failure_threshold(rsc, node);
- apply_exclusive_discovery(rsc, node);
+ check_failure_threshold(rsc_iter->data, node_iter->data);
+ apply_exclusive_discovery(rsc_iter->data, node_iter->data);
}
}
}
/*!
* \internal
- * \brief Allocate resources to nodes
+ * \brief Assign resources to nodes
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-allocate_resources(pe_working_set_t *data_set)
+assign_resources(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
- crm_trace("Allocating resources to nodes");
+ crm_trace("Assigning resources to nodes");
- if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
- pcmk__sort_resources(data_set);
+ if (!pcmk__str_eq(scheduler->placement_strategy, "default",
+ pcmk__str_casei)) {
+ pcmk__sort_resources(scheduler);
}
- pcmk__show_node_capacities("Original", data_set);
+ pcmk__show_node_capacities("Original", scheduler);
- if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
- /* Allocate remote connection resources first (which will also allocate
- * any colocation dependencies). If the connection is migrating, always
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
+ /* Assign remote connection resources first (which will also assign any
+ * colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
- for (iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->is_remote_node) {
- pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
+ pe_rsc_trace(rsc, "Assigning remote connection resource '%s'",
rsc->id);
- rsc->cmds->assign(rsc, rsc->partial_migration_target);
+ rsc->cmds->assign(rsc, rsc->partial_migration_target, true);
}
}
}
/* now do the rest of the resources */
- for (iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (!rsc->is_remote_node) {
- pe_rsc_trace(rsc, "Allocating %s resource '%s'",
- crm_element_name(rsc->xml), rsc->id);
- rsc->cmds->assign(rsc, NULL);
+ pe_rsc_trace(rsc, "Assigning %s resource '%s'",
+ rsc->xml->name, rsc->id);
+ rsc->cmds->assign(rsc, NULL, true);
}
}
- pcmk__show_node_capacities("Remaining", data_set);
+ pcmk__show_node_capacities("Remaining", scheduler);
}
/*!
* \internal
* \brief Schedule fail count clearing on online nodes if resource is orphaned
*
- * \param[in,out] rsc Resource to check
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] data Resource to check
+ * \param[in] user_data Ignored
*/
static void
-clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
+clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
{
- if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ pcmk_resource_t *rsc = data;
+
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
return;
}
crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
/* There's no need to recurse into rsc->children because those
- * should just be unallocated clone instances.
+ * should just be unassigned clone instances.
*/
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
- pe_action_t *clear_op = NULL;
+ for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
+ pcmk_action_t *clear_op = NULL;
if (!node->details->online) {
continue;
}
- if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL) == 0) {
+ if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
continue;
}
- clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
+ clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
+ rsc->cluster);
/* We can't use order_action_then_stop() here because its
- * pe_order_preserve breaks things
+ * pcmk__ar_guest_allowed breaks things
*/
pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
- NULL, pe_order_optional, data_set);
+ NULL, pcmk__ar_ordered, rsc->cluster);
}
}
@@ -381,28 +389,28 @@ clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
* \internal
* \brief Schedule any resource actions needed
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-schedule_resource_actions(pe_working_set_t *data_set)
+schedule_resource_actions(pcmk_scheduler_t *scheduler)
{
// Process deferred action checks
- pe__foreach_param_check(data_set, check_params);
- pe__free_param_checks(data_set);
+ pe__foreach_param_check(scheduler, check_params);
+ pe__free_param_checks(scheduler);
- if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_trace("Scheduling probes");
- pcmk__schedule_probes(data_set);
+ pcmk__schedule_probes(scheduler);
}
- if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
- g_list_foreach(data_set->resources,
- (GFunc) clear_failcounts_if_orphaned, data_set);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
+ NULL);
}
crm_trace("Scheduling resource actions");
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->create_actions(rsc);
}
@@ -417,13 +425,13 @@ schedule_resource_actions(pe_working_set_t *data_set)
* \return true if resource or any descendant is managed, otherwise false
*/
static bool
-is_managed(const pe_resource_t *rsc)
+is_managed(const pcmk_resource_t *rsc)
{
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- if (is_managed((pe_resource_t *) iter->data)) {
+ if (is_managed((pcmk_resource_t *) iter->data)) {
return true;
}
}
@@ -434,16 +442,16 @@ is_managed(const pe_resource_t *rsc)
* \internal
* \brief Check whether any resources in the cluster are managed
*
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return true if any resource is managed, otherwise false
*/
static bool
-any_managed_resources(const pe_working_set_t *data_set)
+any_managed_resources(const pcmk_scheduler_t *scheduler)
{
- for (const GList *iter = data_set->resources;
+ for (const GList *iter = scheduler->resources;
iter != NULL; iter = iter->next) {
- if (is_managed((const pe_resource_t *) iter->data)) {
+ if (is_managed((const pcmk_resource_t *) iter->data)) {
return true;
}
}
@@ -456,16 +464,14 @@ any_managed_resources(const pe_working_set_t *data_set)
*
* \param[in] node Node to check
* \param[in] have_managed Whether any resource in cluster is managed
- * \param[in] data_set Cluster working set
*
* \return true if \p node should be fenced, otherwise false
*/
static bool
-needs_fencing(const pe_node_t *node, bool have_managed,
- const pe_working_set_t *data_set)
+needs_fencing(const pcmk_node_t *node, bool have_managed)
{
return have_managed && node->details->unclean
- && pe_can_fence(data_set, node);
+ && pe_can_fence(node->details->data_set, node);
}
/*!
@@ -477,7 +483,7 @@ needs_fencing(const pe_node_t *node, bool have_managed,
* \return true if \p node should be shut down, otherwise false
*/
static bool
-needs_shutdown(const pe_node_t *node)
+needs_shutdown(const pcmk_node_t *node)
{
if (pe__is_guest_or_remote_node(node)) {
/* Do not send shutdown actions for Pacemaker Remote nodes.
@@ -492,24 +498,24 @@ needs_shutdown(const pe_node_t *node)
* \internal
* \brief Track and order non-DC fencing
*
- * \param[in,out] list List of existing non-DC fencing actions
- * \param[in,out] action Fencing action to prepend to \p list
- * \param[in] data_set Cluster working set
+ * \param[in,out] list List of existing non-DC fencing actions
+ * \param[in,out] action Fencing action to prepend to \p list
+ * \param[in] scheduler Scheduler data
*
* \return (Possibly new) head of \p list
*/
static GList *
-add_nondc_fencing(GList *list, pe_action_t *action,
- const pe_working_set_t *data_set)
+add_nondc_fencing(GList *list, pcmk_action_t *action,
+ const pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)
&& (list != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
- order_actions((pe_action_t *) list->data, action, pe_order_optional);
+ order_actions((pcmk_action_t *) list->data, action, pcmk__ar_ordered);
}
return g_list_prepend(list, action);
}
@@ -519,16 +525,15 @@ add_nondc_fencing(GList *list, pe_action_t *action,
* \brief Schedule a node for fencing
*
* \param[in,out] node Node that requires fencing
- * \param[in,out] data_set Cluster working set
*/
-static pe_action_t *
-schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
+static pcmk_action_t *
+schedule_fencing(pcmk_node_t *node)
{
- pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
- FALSE, data_set);
+ pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
+ FALSE, node->details->data_set);
pe_warn("Scheduling node %s for fencing", pe__node_name(node));
- pcmk__order_vs_fence(fencing, data_set);
+ pcmk__order_vs_fence(fencing, node->details->data_set);
return fencing;
}
@@ -536,50 +541,52 @@ schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
* \internal
* \brief Create and order node fencing and shutdown actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
+schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
{
- pe_action_t *dc_down = NULL;
+ pcmk_action_t *dc_down = NULL;
bool integrity_lost = false;
- bool have_managed = any_managed_resources(data_set);
+ bool have_managed = any_managed_resources(scheduler);
GList *fencing_ops = NULL;
GList *shutdown_ops = NULL;
crm_trace("Scheduling fencing and shutdowns as needed");
if (!have_managed) {
- crm_notice("No fencing will be done until there are resources to manage");
+ crm_notice("No fencing will be done until there are resources "
+ "to manage");
}
// Check each node for whether it needs fencing or shutdown
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
- pe_action_t *fencing = NULL;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
+ pcmk_action_t *fencing = NULL;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
if (node->details->remote_requires_reset && have_managed
- && pe_can_fence(data_set, node)) {
+ && pe_can_fence(scheduler, node)) {
pcmk__fence_guest(node);
}
continue;
}
- if (needs_fencing(node, have_managed, data_set)) {
- fencing = schedule_fencing(node, data_set);
+ if (needs_fencing(node, have_managed)) {
+ fencing = schedule_fencing(node);
// Track DC and non-DC fence actions separately
if (node->details->is_dc) {
dc_down = fencing;
} else {
- fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
+ fencing_ops = add_nondc_fencing(fencing_ops, fencing,
+ scheduler);
}
} else if (needs_shutdown(node)) {
- pe_action_t *down_op = pcmk__new_shutdown_action(node);
+ pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
// Track DC and non-DC shutdown actions separately
if (node->details->is_dc) {
@@ -597,12 +604,12 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
}
if (integrity_lost) {
- if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pe_warn("Resource functionality and data integrity cannot be "
"guaranteed (configure, enable, and test fencing to "
"correct this)");
- } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
crm_notice("Unclean nodes will not be fenced until quorum is "
"attained or no-quorum-policy is set to ignore");
}
@@ -616,13 +623,14 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
- if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
+ if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
+ pcmk__str_none)) {
pcmk__order_after_each(dc_down, shutdown_ops);
}
// Order any non-DC fencing before any DC fencing or shutdown
- if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
@@ -633,8 +641,8 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
- order_actions((pe_action_t *) fencing_ops->data, dc_down,
- pe_order_optional);
+ order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
+ pcmk__ar_ordered);
}
}
g_list_free(fencing_ops);
@@ -642,24 +650,23 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
}
static void
-log_resource_details(pe_working_set_t *data_set)
+log_resource_details(pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
GList *all = NULL;
- /* We need a list of nodes that we are allowed to output information for.
- * This is necessary because out->message for all the resource-related
- * messages expects such a list, due to the `crm_mon --node=` feature. Here,
- * we just make it a list of all the nodes.
+ /* Due to the `crm_mon --node=` feature, out->message() for all the
+ * resource-related messages expects a list of nodes that we are allowed to
+ * output information for. Here, we create a wildcard to match all nodes.
*/
all = g_list_prepend(all, (gpointer) "*");
- for (GList *item = data_set->resources; item != NULL; item = item->next) {
- pe_resource_t *rsc = (pe_resource_t *) item->data;
+ for (GList *item = scheduler->resources; item != NULL; item = item->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
// Log all resources except inactive orphans
- if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
- || (rsc->role != RSC_ROLE_STOPPED)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
+ || (rsc->role != pcmk_role_stopped)) {
out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
}
}
@@ -668,12 +675,12 @@ log_resource_details(pe_working_set_t *data_set)
}
static void
-log_all_actions(pe_working_set_t *data_set)
+log_all_actions(pcmk_scheduler_t *scheduler)
{
/* This only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
- pcmk__output_t *prev_out = data_set->priv;
+ pcmk__output_t *prev_out = scheduler->priv;
pcmk__output_t *out = NULL;
if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
@@ -683,33 +690,35 @@ log_all_actions(pe_working_set_t *data_set)
pe__register_messages(out);
pcmk__register_lib_messages(out);
pcmk__output_set_log_level(out, LOG_NOTICE);
- data_set->priv = out;
+ scheduler->priv = out;
out->begin_list(out, NULL, NULL, "Actions");
- pcmk__output_actions(data_set);
+ pcmk__output_actions(scheduler);
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
- data_set->priv = prev_out;
+ scheduler->priv = prev_out;
}
/*!
* \internal
* \brief Log all required but unrunnable actions at trace level
*
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*/
static void
-log_unrunnable_actions(const pe_working_set_t *data_set)
+log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
{
- const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
+ const uint64_t flags = pcmk_action_optional
+ |pcmk_action_runnable
+ |pcmk_action_pseudo;
crm_trace("Required but unrunnable actions:");
- for (const GList *iter = data_set->actions;
+ for (const GList *iter = scheduler->actions;
iter != NULL; iter = iter->next) {
- const pe_action_t *action = (const pe_action_t *) iter->data;
+ const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
if (!pcmk_any_flags_set(action->flags, flags)) {
pcmk__log_action("\t", action, true);
@@ -721,23 +730,23 @@ log_unrunnable_actions(const pe_working_set_t *data_set)
* \internal
* \brief Unpack the CIB for scheduling
*
- * \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
- * \param[in] flags Working set flags to set in addition to defaults
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
+ * \param[in] flags Scheduler flags to set in addition to defaults
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
+unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
{
const char* localhost_save = NULL;
- if (pcmk_is_set(data_set->flags, pe_flag_have_status)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_have_status)) {
crm_trace("Reusing previously calculated cluster status");
- pe__set_working_set_flags(data_set, flags);
+ pe__set_working_set_flags(scheduler, flags);
return;
}
- if (data_set->localhost) {
- localhost_save = data_set->localhost;
+ if (scheduler->localhost) {
+ localhost_save = scheduler->localhost;
}
CRM_ASSERT(cib != NULL);
@@ -745,67 +754,67 @@ unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
/* This will zero the entire struct without freeing anything first, so
* callers should never call pcmk__schedule_actions() with a populated data
- * set unless pe_flag_have_status is set (i.e. cluster_status() was
+ * set unless pcmk_sched_have_status is set (i.e. cluster_status() was
* previously called, whether directly or via pcmk__schedule_actions()).
*/
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
if (localhost_save) {
- data_set->localhost = localhost_save;
+ scheduler->localhost = localhost_save;
}
- pe__set_working_set_flags(data_set, flags);
- data_set->input = cib;
- cluster_status(data_set); // Sets pe_flag_have_status
+ pe__set_working_set_flags(scheduler, flags);
+ scheduler->input = cib;
+ cluster_status(scheduler); // Sets pcmk_sched_have_status
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
- * \param[in,out] cib CIB XML to use as scheduler input
- * \param[in] flags Working set flags to set in addition to defaults
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] cib CIB XML to use as scheduler input
+ * \param[in] flags Scheduler flags to set in addition to defaults
+ * \param[in,out] scheduler Scheduler data
*/
void
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
- unpack_cib(cib, flags, data_set);
- pcmk__set_allocation_methods(data_set);
- pcmk__apply_node_health(data_set);
- pcmk__unpack_constraints(data_set);
- if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
+ unpack_cib(cib, flags, scheduler);
+ pcmk__set_assignment_methods(scheduler);
+ pcmk__apply_node_health(scheduler);
+ pcmk__unpack_constraints(scheduler);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_validate_only)) {
return;
}
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
- pcmk__is_daemon) {
- log_resource_details(data_set);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)
+ && pcmk__is_daemon) {
+ log_resource_details(scheduler);
}
- apply_node_criteria(data_set);
+ apply_node_criteria(scheduler);
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
return;
}
- pcmk__create_internal_constraints(data_set);
- pcmk__handle_rsc_config_changes(data_set);
- allocate_resources(data_set);
- schedule_resource_actions(data_set);
+ pcmk__create_internal_constraints(scheduler);
+ pcmk__handle_rsc_config_changes(scheduler);
+ assign_resources(scheduler);
+ schedule_resource_actions(scheduler);
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we can mark nodes as needing fencing.
*/
- pcmk__order_remote_connection_actions(data_set);
+ pcmk__order_remote_connection_actions(scheduler);
- schedule_fencing_and_shutdowns(data_set);
- pcmk__apply_orderings(data_set);
- log_all_actions(data_set);
- pcmk__create_graph(data_set);
+ schedule_fencing_and_shutdowns(scheduler);
+ pcmk__apply_orderings(scheduler);
+ log_all_actions(scheduler);
+ pcmk__create_graph(scheduler);
if (get_crm_log_level() == LOG_TRACE) {
- log_unrunnable_actions(data_set);
+ log_unrunnable_actions(scheduler);
}
}
diff --git a/lib/pacemaker/pcmk_simulate.c b/lib/pacemaker/pcmk_simulate.c
index 165c7d3..167f8a5 100644
--- a/lib/pacemaker/pcmk_simulate.c
+++ b/lib/pacemaker/pcmk_simulate.c
@@ -11,7 +11,7 @@
#include <crm/cib/internal.h>
#include <crm/common/output.h>
#include <crm/common/results.h>
-#include <crm/pengine/pe_types.h>
+#include <crm/common/scheduler.h>
#include <pacemaker-internal.h>
#include <pacemaker.h>
@@ -27,7 +27,7 @@ static cib_t *fake_cib = NULL;
static GList *fake_resource_list = NULL;
static const GList *fake_op_fail_list = NULL;
-static void set_effective_date(pe_working_set_t *data_set, bool print_original,
+static void set_effective_date(pcmk_scheduler_t *scheduler, bool print_original,
const char *use_date);
/*!
@@ -41,7 +41,7 @@ static void set_effective_date(pe_working_set_t *data_set, bool print_original,
* \note It is the caller's responsibility to free the result.
*/
static char *
-create_action_name(const pe_action_t *action, bool verbose)
+create_action_name(const pcmk_action_t *action, bool verbose)
{
char *action_name = NULL;
const char *prefix = "";
@@ -51,11 +51,11 @@ create_action_name(const pe_action_t *action, bool verbose)
if (action->node != NULL) {
action_host = action->node->details->uname;
- } else if (!pcmk_is_set(action->flags, pe_action_pseudo)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) {
action_host = "<none>";
}
- if (pcmk__str_eq(action->task, RSC_CANCEL, pcmk__str_none)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
prefix = "Cancel ";
task = action->cancel_task;
}
@@ -74,8 +74,8 @@ create_action_name(const pe_action_t *action, bool verbose)
interval_ms = 0;
}
- if (pcmk__strcase_any_of(action->task, RSC_NOTIFY, RSC_NOTIFIED,
- NULL)) {
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY,
+ PCMK_ACTION_NOTIFIED, NULL)) {
const char *n_type = g_hash_table_lookup(action->meta,
"notify_key_type");
const char *n_task = g_hash_table_lookup(action->meta,
@@ -96,7 +96,8 @@ create_action_name(const pe_action_t *action, bool verbose)
}
free(key);
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
+ pcmk__str_none)) {
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
action_name = crm_strdup_printf("%s%s '%s' %s",
@@ -127,17 +128,18 @@ create_action_name(const pe_action_t *action, bool verbose)
* \internal
* \brief Display the status of a cluster
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in] show_opts How to modify display (as pcmk_show_opt_e flags)
* \param[in] section_opts Sections to display (as pcmk_section_e flags)
* \param[in] title What to use as list title
* \param[in] print_spacer Whether to display a spacer first
*/
static void
-print_cluster_status(pe_working_set_t *data_set, uint32_t show_opts,
- uint32_t section_opts, const char *title, bool print_spacer)
+print_cluster_status(pcmk_scheduler_t *scheduler, uint32_t show_opts,
+ uint32_t section_opts, const char *title,
+ bool print_spacer)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
GList *all = NULL;
crm_exit_t stonith_rc = 0;
enum pcmk_pacemakerd_state state = pcmk_pacemakerd_state_invalid;
@@ -150,7 +152,7 @@ print_cluster_status(pe_working_set_t *data_set, uint32_t show_opts,
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
out->begin_list(out, NULL, NULL, "%s", title);
out->message(out, "cluster-status",
- data_set, state, stonith_rc, NULL,
+ scheduler, state, stonith_rc, NULL,
false, section_opts, show_opts, NULL, all, all);
out->end_list(out);
@@ -161,45 +163,45 @@ print_cluster_status(pe_working_set_t *data_set, uint32_t show_opts,
* \internal
* \brief Display a summary of all actions scheduled in a transition
*
- * \param[in,out] data_set Cluster working set (fully scheduled)
+ * \param[in,out] scheduler Scheduler data (fully scheduled)
* \param[in] print_spacer Whether to display a spacer first
*/
static void
-print_transition_summary(pe_working_set_t *data_set, bool print_spacer)
+print_transition_summary(pcmk_scheduler_t *scheduler, bool print_spacer)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
out->begin_list(out, NULL, NULL, "Transition Summary");
- pcmk__output_actions(data_set);
+ pcmk__output_actions(scheduler);
out->end_list(out);
}
/*!
* \internal
- * \brief Reset a cluster working set's input, output, date, and flags
+ * \brief Reset scheduler input, output, date, and flags
*
- * \param[in,out] data_set Cluster working set
- * \param[in] input What to set as cluster input
- * \param[in] out What to set as cluster output object
- * \param[in] use_date What to set as cluster's current timestamp
- * \param[in] flags Cluster flags to add (pe_flag_*)
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] input What to set as cluster input
+ * \param[in] out What to set as cluster output object
+ * \param[in] use_date What to set as cluster's current timestamp
+ * \param[in] flags Group of enum pcmk_scheduler_flags to set
*/
static void
-reset(pe_working_set_t *data_set, xmlNodePtr input, pcmk__output_t *out,
+reset(pcmk_scheduler_t *scheduler, xmlNodePtr input, pcmk__output_t *out,
const char *use_date, unsigned int flags)
{
- data_set->input = input;
- data_set->priv = out;
- set_effective_date(data_set, true, use_date);
+ scheduler->input = input;
+ scheduler->priv = out;
+ set_effective_date(scheduler, true, use_date);
if (pcmk_is_set(flags, pcmk_sim_sanitized)) {
- pe__set_working_set_flags(data_set, pe_flag_sanitized);
+ pe__set_working_set_flags(scheduler, pcmk_sched_sanitized);
}
if (pcmk_is_set(flags, pcmk_sim_show_scores)) {
- pe__set_working_set_flags(data_set, pe_flag_show_scores);
+ pe__set_working_set_flags(scheduler, pcmk_sched_output_scores);
}
if (pcmk_is_set(flags, pcmk_sim_show_utilization)) {
- pe__set_working_set_flags(data_set, pe_flag_show_utilization);
+ pe__set_working_set_flags(scheduler, pcmk_sched_show_utilization);
}
}
@@ -207,7 +209,7 @@ reset(pe_working_set_t *data_set, xmlNodePtr input, pcmk__output_t *out,
* \brief Write out a file in dot(1) format describing the actions that will
* be taken by the scheduler in response to an input CIB file.
*
- * \param[in,out] data_set Working set for the cluster
+ * \param[in,out] scheduler Scheduler data
* \param[in] dot_file The filename to write
* \param[in] all_actions Write all actions, even those that are optional
* or are on unmanaged resources
@@ -217,10 +219,10 @@ reset(pe_working_set_t *data_set, xmlNodePtr input, pcmk__output_t *out,
* \return Standard Pacemaker return code
*/
static int
-write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
+write_sim_dotfile(pcmk_scheduler_t *scheduler, const char *dot_file,
bool all_actions, bool verbose)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
FILE *dot_strm = fopen(dot_file, "w");
if (dot_strm == NULL) {
@@ -228,30 +230,30 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
}
fprintf(dot_strm, " digraph \"g\" {\n");
- for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
const char *style = "dashed";
const char *font = "black";
const char *color = "black";
char *action_name = create_action_name(action, verbose);
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
font = "orange";
}
- if (pcmk_is_set(action->flags, pe_action_dumped)) {
+ if (pcmk_is_set(action->flags, pcmk_action_added_to_graph)) {
style = "bold";
color = "green";
} else if ((action->rsc != NULL)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)) {
color = "red";
font = "purple";
if (!all_actions) {
goto do_not_write;
}
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
color = "blue";
if (!all_actions) {
goto do_not_write;
@@ -259,23 +261,23 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
} else {
color = "red";
- CRM_LOG_ASSERT(!pcmk_is_set(action->flags, pe_action_runnable));
+ CRM_LOG_ASSERT(!pcmk_is_set(action->flags, pcmk_action_runnable));
}
- pe__set_action_flags(action, pe_action_dumped);
+ pe__set_action_flags(action, pcmk_action_added_to_graph);
fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n",
action_name, style, color, font);
do_not_write:
free(action_name);
}
- for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
- GList *gIter2 = NULL;
+ for (GList *before_iter = action->actions_before;
+ before_iter != NULL; before_iter = before_iter->next) {
- for (gIter2 = action->actions_before; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_action_wrapper_t *before = (pe_action_wrapper_t *) gIter2->data;
+ pcmk__related_action_t *before = before_iter->data;
char *before_name = NULL;
char *after_name = NULL;
@@ -285,11 +287,12 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
if (before->state == pe_link_dumped) {
optional = false;
style = "bold";
- } else if (before->type == pe_order_none) {
+ } else if ((uint32_t) before->type == pcmk__ar_none) {
continue;
- } else if (pcmk_is_set(before->action->flags, pe_action_dumped)
- && pcmk_is_set(action->flags, pe_action_dumped)
- && before->type != pe_order_load) {
+ } else if (pcmk_is_set(before->action->flags,
+ pcmk_action_added_to_graph)
+ && pcmk_is_set(action->flags, pcmk_action_added_to_graph)
+ && (uint32_t) before->type != pcmk__ar_if_on_same_node_or_target) {
optional = false;
}
@@ -314,23 +317,23 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
* \brief Profile the configuration updates and scheduler actions in a single
* CIB file, printing the profiling timings.
*
- * \note \p data_set->priv must have been set to a valid \p pcmk__output_t
+ * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t
* object before this function is called.
*
- * \param[in] xml_file The CIB file to profile
- * \param[in] repeat Number of times to run
- * \param[in,out] data_set Working set for the cluster
- * \param[in] use_date The date to set the cluster's time to (may be NULL)
+ * \param[in] xml_file The CIB file to profile
+ * \param[in] repeat Number of times to run
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] use_date The date to set the cluster's time to (may be NULL)
*/
static void
-profile_file(const char *xml_file, long long repeat, pe_working_set_t *data_set,
- const char *use_date)
+profile_file(const char *xml_file, long long repeat,
+ pcmk_scheduler_t *scheduler, const char *use_date)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
xmlNode *cib_object = NULL;
clock_t start = 0;
clock_t end;
- unsigned long long data_set_flags = pe_flag_no_compat;
+ unsigned long long scheduler_flags = pcmk_sched_no_compat;
CRM_ASSERT(out != NULL);
@@ -351,20 +354,20 @@ profile_file(const char *xml_file, long long repeat, pe_working_set_t *data_set,
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_show_scores)) {
- data_set_flags |= pe_flag_show_scores;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) {
+ scheduler_flags |= pcmk_sched_output_scores;
}
- if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
- data_set_flags |= pe_flag_show_utilization;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
+ scheduler_flags |= pcmk_sched_show_utilization;
}
for (int i = 0; i < repeat; ++i) {
xmlNode *input = (repeat == 1)? cib_object : copy_xml(cib_object);
- data_set->input = input;
- set_effective_date(data_set, false, use_date);
- pcmk__schedule_actions(input, data_set_flags, data_set);
- pe_reset_working_set(data_set);
+ scheduler->input = input;
+ set_effective_date(scheduler, false, use_date);
+ pcmk__schedule_actions(input, scheduler_flags, scheduler);
+ pe_reset_working_set(scheduler);
}
end = clock();
@@ -372,10 +375,10 @@ profile_file(const char *xml_file, long long repeat, pe_working_set_t *data_set,
}
void
-pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set,
- const char *use_date)
+pcmk__profile_dir(const char *dir, long long repeat,
+ pcmk_scheduler_t *scheduler, const char *use_date)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
struct dirent **namelist;
int file_num = scandir(dir, &namelist, 0, alphasort);
@@ -398,9 +401,10 @@ pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set,
free(namelist[file_num]);
continue;
}
- snprintf(buffer, sizeof(buffer), "%s/%s", dir, namelist[file_num]->d_name);
+ snprintf(buffer, sizeof(buffer), "%s/%s",
+ dir, namelist[file_num]->d_name);
if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) {
- profile_file(buffer, repeat, data_set, use_date);
+ profile_file(buffer, repeat, scheduler, use_date);
}
free(namelist[file_num]);
}
@@ -414,37 +418,37 @@ pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set,
* \brief Set the date of the cluster, either to the value given by
* \p use_date, or to the "execution-date" value in the CIB.
*
- * \note \p data_set->priv must have been set to a valid \p pcmk__output_t
+ * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t
* object before this function is called.
*
- * \param[in,out] data_set Working set for the cluster
+ * \param[in,out] scheduler Scheduler data
* \param[in] print_original If \p true, the "execution-date" should
* also be printed
* \param[in] use_date The date to set the cluster's time to
* (may be NULL)
*/
static void
-set_effective_date(pe_working_set_t *data_set, bool print_original,
+set_effective_date(pcmk_scheduler_t *scheduler, bool print_original,
const char *use_date)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
time_t original_date = 0;
CRM_ASSERT(out != NULL);
- crm_element_value_epoch(data_set->input, "execution-date", &original_date);
+ crm_element_value_epoch(scheduler->input, "execution-date", &original_date);
if (use_date) {
- data_set->now = crm_time_new(use_date);
+ scheduler->now = crm_time_new(use_date);
out->info(out, "Setting effective cluster time: %s", use_date);
- crm_time_log(LOG_NOTICE, "Pretending 'now' is", data_set->now,
+ crm_time_log(LOG_NOTICE, "Pretending 'now' is", scheduler->now,
crm_time_log_date | crm_time_log_timeofday);
} else if (original_date != 0) {
- data_set->now = pcmk__copy_timet(original_date);
+ scheduler->now = pcmk__copy_timet(original_date);
if (print_original) {
- char *when = crm_time_as_string(data_set->now,
+ char *when = crm_time_as_string(scheduler->now,
crm_time_log_date|crm_time_log_timeofday);
out->info(out, "Using the original execution date of: %s", when);
@@ -543,7 +547,8 @@ simulate_resource_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
}
// Certain actions need to be displayed but don't need history entries
- if (pcmk__strcase_any_of(operation, "delete", RSC_METADATA, NULL)) {
+ if (pcmk__strcase_any_of(operation, PCMK_ACTION_DELETE,
+ PCMK_ACTION_META_DATA, NULL)) {
out->message(out, "inject-rsc-action", resource, operation, node,
(guint) 0);
goto done; // Confirm action and update graph
@@ -684,7 +689,7 @@ simulate_fencing_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
out->message(out, "inject-fencing-action", target, op);
- if (!pcmk__str_eq(op, "on", pcmk__str_casei)) {
+ if (!pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) {
int rc = pcmk_ok;
GString *xpath = g_string_sized_new(512);
@@ -725,7 +730,7 @@ simulate_fencing_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
}
enum pcmk__graph_status
-pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
+pcmk__simulate_transition(pcmk_scheduler_t *scheduler, cib_t *cib,
const GList *op_fail_list)
{
pcmk__graph_t *transition = NULL;
@@ -738,7 +743,7 @@ pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
simulate_fencing_action,
};
- out = data_set->priv;
+ out = scheduler->priv;
fake_cib = cib;
fake_op_fail_list = op_fail_list;
@@ -748,10 +753,10 @@ pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
}
pcmk__set_graph_functions(&simulation_fns);
- transition = pcmk__unpack_graph(data_set->graph, crm_system_name);
+ transition = pcmk__unpack_graph(scheduler->graph, crm_system_name);
pcmk__log_graph(LOG_DEBUG, transition);
- fake_resource_list = data_set->resources;
+ fake_resource_list = scheduler->resources;
do {
graph_rc = pcmk__execute_graph(transition);
} while (graph_rc == pcmk__graph_active);
@@ -772,15 +777,15 @@ pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
cib_sync_call|cib_scope_local);
CRM_ASSERT(rc == pcmk_ok);
- pe_reset_working_set(data_set);
- data_set->input = cib_object;
+ pe_reset_working_set(scheduler);
+ scheduler->input = cib_object;
out->end_list(out);
}
return graph_rc;
}
int
-pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
+pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
const pcmk_injections_t *injections, unsigned int flags,
uint32_t section_opts, const char *use_date,
const char *input_file, const char *graph_file,
@@ -796,8 +801,8 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
goto simulate_done;
}
- reset(data_set, input, out, use_date, flags);
- cluster_status(data_set);
+ reset(scheduler, input, out, use_date, flags);
+ cluster_status(scheduler);
if ((cib->variant == cib_native)
&& pcmk_is_set(section_opts, pcmk_section_times)) {
@@ -805,29 +810,30 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
// Currently used only in the times section
pcmk__query_node_name(out, 0, &pcmk__our_nodename, 0);
}
- data_set->localhost = pcmk__our_nodename;
+ scheduler->localhost = pcmk__our_nodename;
}
if (!out->is_quiet(out)) {
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- printed = out->message(out, "maint-mode", data_set->flags);
+ const bool show_pending = pcmk_is_set(flags, pcmk_sim_show_pending);
+
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ printed = out->message(out, "maint-mode", scheduler->flags);
}
- if (data_set->disabled_resources || data_set->blocked_resources) {
+ if (scheduler->disabled_resources || scheduler->blocked_resources) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
printed = out->info(out,
"%d of %d resource instances DISABLED and "
"%d BLOCKED from further action due to failure",
- data_set->disabled_resources,
- data_set->ninstances,
- data_set->blocked_resources);
+ scheduler->disabled_resources,
+ scheduler->ninstances,
+ scheduler->blocked_resources);
}
/* Most formatted output headers use caps for each word, but this one
* only has the first word capitalized for compatibility with pcs.
*/
- print_cluster_status(data_set,
- pcmk_is_set(flags, pcmk_sim_show_pending)? pcmk_show_pending : 0,
+ print_cluster_status(scheduler, (show_pending? pcmk_show_pending : 0),
section_opts, "Current cluster status",
(printed == pcmk_rc_ok));
printed = pcmk_rc_ok;
@@ -845,7 +851,7 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
|| (injections->watchdog != NULL)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
- pcmk__inject_scheduler_input(data_set, cib, injections);
+ pcmk__inject_scheduler_input(scheduler, cib, injections);
printed = pcmk_rc_ok;
rc = cib->cmds->query(cib, NULL, &input, cib_sync_call);
@@ -854,9 +860,9 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
goto simulate_done;
}
- cleanup_calculations(data_set);
- reset(data_set, input, out, use_date, flags);
- cluster_status(data_set);
+ cleanup_calculations(scheduler);
+ reset(scheduler, input, out, use_date, flags);
+ cluster_status(scheduler);
}
if (input_file != NULL) {
@@ -869,28 +875,29 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
if (pcmk_any_flags_set(flags, pcmk_sim_process | pcmk_sim_simulate)) {
pcmk__output_t *logger_out = NULL;
- unsigned long long data_set_flags = pe_flag_no_compat;
+ unsigned long long scheduler_flags = pcmk_sched_no_compat;
- if (pcmk_is_set(data_set->flags, pe_flag_show_scores)) {
- data_set_flags |= pe_flag_show_scores;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) {
+ scheduler_flags |= pcmk_sched_output_scores;
}
- if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
- data_set_flags |= pe_flag_show_utilization;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
+ scheduler_flags |= pcmk_sched_show_utilization;
}
- if (pcmk_all_flags_set(data_set->flags,
- pe_flag_show_scores|pe_flag_show_utilization)) {
+ if (pcmk_all_flags_set(scheduler->flags,
+ pcmk_sched_output_scores
+ |pcmk_sched_show_utilization)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
out->begin_list(out, NULL, NULL,
- "Allocation Scores and Utilization Information");
+ "Assignment Scores and Utilization Information");
printed = pcmk_rc_ok;
- } else if (pcmk_is_set(data_set->flags, pe_flag_show_scores)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
- out->begin_list(out, NULL, NULL, "Allocation Scores");
+ out->begin_list(out, NULL, NULL, "Assignment Scores");
printed = pcmk_rc_ok;
- } else if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
out->begin_list(out, NULL, NULL, "Utilization Information");
printed = pcmk_rc_ok;
@@ -902,23 +909,23 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
pe__register_messages(logger_out);
pcmk__register_lib_messages(logger_out);
- data_set->priv = logger_out;
+ scheduler->priv = logger_out;
}
- pcmk__schedule_actions(input, data_set_flags, data_set);
+ pcmk__schedule_actions(input, scheduler_flags, scheduler);
if (logger_out == NULL) {
out->end_list(out);
} else {
logger_out->finish(logger_out, CRM_EX_OK, true, NULL);
pcmk__output_free(logger_out);
- data_set->priv = out;
+ scheduler->priv = out;
}
input = NULL; /* Don't try and free it twice */
if (graph_file != NULL) {
- rc = write_xml_file(data_set->graph, graph_file, FALSE);
+ rc = write_xml_file(scheduler->graph, graph_file, FALSE);
if (rc < 0) {
rc = pcmk_rc_graph_error;
goto simulate_done;
@@ -926,7 +933,7 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
if (dot_file != NULL) {
- rc = write_sim_dotfile(data_set, dot_file,
+ rc = write_sim_dotfile(scheduler, dot_file,
pcmk_is_set(flags, pcmk_sim_all_actions),
pcmk_is_set(flags, pcmk_sim_verbose));
if (rc != pcmk_rc_ok) {
@@ -936,7 +943,7 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
if (!out->is_quiet(out)) {
- print_transition_summary(data_set, printed == pcmk_rc_ok);
+ print_transition_summary(scheduler, printed == pcmk_rc_ok);
}
}
@@ -947,8 +954,8 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
- if (pcmk__simulate_transition(data_set, cib,
- injections->op_fail) != pcmk__graph_complete) {
+ if (pcmk__simulate_transition(scheduler, cib, injections->op_fail)
+ != pcmk__graph_complete) {
rc = pcmk_rc_invalid_transition;
}
@@ -956,17 +963,17 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
goto simulate_done;
}
- set_effective_date(data_set, true, use_date);
+ set_effective_date(scheduler, true, use_date);
if (pcmk_is_set(flags, pcmk_sim_show_scores)) {
- pe__set_working_set_flags(data_set, pe_flag_show_scores);
+ pe__set_working_set_flags(scheduler, pcmk_sched_output_scores);
}
if (pcmk_is_set(flags, pcmk_sim_show_utilization)) {
- pe__set_working_set_flags(data_set, pe_flag_show_utilization);
+ pe__set_working_set_flags(scheduler, pcmk_sched_show_utilization);
}
- cluster_status(data_set);
- print_cluster_status(data_set, 0, section_opts, "Revised Cluster Status",
+ cluster_status(scheduler);
+ print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status",
true);
simulate_done:
@@ -975,7 +982,7 @@ simulate_done:
}
int
-pcmk_simulate(xmlNodePtr *xml, pe_working_set_t *data_set,
+pcmk_simulate(xmlNodePtr *xml, pcmk_scheduler_t *scheduler,
const pcmk_injections_t *injections, unsigned int flags,
unsigned int section_opts, const char *use_date,
const char *input_file, const char *graph_file,
@@ -992,7 +999,7 @@ pcmk_simulate(xmlNodePtr *xml, pe_working_set_t *data_set,
pe__register_messages(out);
pcmk__register_lib_messages(out);
- rc = pcmk__simulate(data_set, out, injections, flags, section_opts,
+ rc = pcmk__simulate(scheduler, out, injections, flags, section_opts,
use_date, input_file, graph_file, dot_file);
pcmk__xml_output_finish(out, xml);
return rc;
diff --git a/lib/pacemaker/pcmk_status.c b/lib/pacemaker/pcmk_status.c
index 0e82633..77b6c90 100644
--- a/lib/pacemaker/pcmk_status.c
+++ b/lib/pacemaker/pcmk_status.c
@@ -17,6 +17,7 @@
#include <crm/common/output.h>
#include <crm/common/results.h>
#include <crm/fencing/internal.h>
+#include <crm/pengine/internal.h>
#include <crm/stonith-ng.h>
#include <pacemaker.h>
#include <pacemaker-internal.h>
@@ -79,7 +80,7 @@ pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith, cib_t *cib,
xmlNode *cib_copy = copy_xml(current_cib);
stonith_history_t *stonith_history = NULL;
int history_rc = 0;
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
GList *unames = NULL;
GList *resources = NULL;
@@ -99,42 +100,43 @@ pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith, cib_t *cib,
fence_history);
}
- data_set = pe_new_working_set();
- CRM_ASSERT(data_set != NULL);
- pe__set_working_set_flags(data_set, pe_flag_no_compat);
+ scheduler = pe_new_working_set();
+ CRM_ASSERT(scheduler != NULL);
+ pe__set_working_set_flags(scheduler, pcmk_sched_no_compat);
- data_set->input = cib_copy;
- data_set->priv = out;
- cluster_status(data_set);
+ scheduler->input = cib_copy;
+ scheduler->priv = out;
+ cluster_status(scheduler);
if ((cib->variant == cib_native) && pcmk_is_set(show, pcmk_section_times)) {
if (pcmk__our_nodename == NULL) {
// Currently used only in the times section
pcmk__query_node_name(out, 0, &pcmk__our_nodename, 0);
}
- data_set->localhost = pcmk__our_nodename;
+ scheduler->localhost = pcmk__our_nodename;
}
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
* and bans need negative location constraints) */
- if (pcmk_is_set(show, pcmk_section_bans) || pcmk_is_set(show, pcmk_section_tickets)) {
- pcmk__unpack_constraints(data_set);
+ if (pcmk_is_set(show, pcmk_section_bans)
+ || pcmk_is_set(show, pcmk_section_tickets)) {
+ pcmk__unpack_constraints(scheduler);
}
- unames = pe__build_node_name_list(data_set, only_node);
- resources = pe__build_rsc_list(data_set, only_rsc);
+ unames = pe__build_node_name_list(scheduler, only_node);
+ resources = pe__build_rsc_list(scheduler, only_rsc);
/* Always print DC if NULL. */
- if (data_set->dc_node == NULL) {
+ if (scheduler->dc_node == NULL) {
show |= pcmk_section_dc;
}
if (simple_output) {
- rc = pcmk__output_simple_status(out, data_set);
+ rc = pcmk__output_simple_status(out, scheduler);
} else {
out->message(out, "cluster-status",
- data_set, pcmkd_state, pcmk_rc2exitc(history_rc),
+ scheduler, pcmkd_state, pcmk_rc2exitc(history_rc),
stonith_history, fence_history, show, show_opts,
neg_location_prefix, unames, resources);
}
@@ -144,7 +146,7 @@ pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith, cib_t *cib,
stonith_history_free(stonith_history);
stonith_history = NULL;
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
@@ -155,7 +157,9 @@ pcmk_status(xmlNodePtr *xml)
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
- uint32_t show_opts = pcmk_show_pending | pcmk_show_inactive_rscs | pcmk_show_timing;
+ uint32_t show_opts = pcmk_show_pending
+ |pcmk_show_inactive_rscs
+ |pcmk_show_timing;
cib = cib_new();
@@ -286,33 +290,41 @@ done:
return pcmk_rc_ok;
}
-/* This is an internal-only function that is planned to be deprecated and removed.
- * It should only ever be called from crm_mon.
+/*!
+ * \internal
+ * \brief Output cluster status in Nagios Plugin format
+ *
+ * \param[in,out] out Output object
+ * \param[in] scheduler Scheduler data
+ *
+ * \return Standard Pacemaker return code
+ * \note This is for a deprecated crm_mon option and should be called only for
+ * that.
*/
int
pcmk__output_simple_status(pcmk__output_t *out,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
int nodes_online = 0;
int nodes_standby = 0;
- int nodes_maintenance = 0;
+ int nodes_maint = 0;
GString *offline_nodes = NULL;
bool no_dc = false;
bool offline = false;
bool has_warnings = false;
- if (data_set->dc_node == NULL) {
+ if (scheduler->dc_node == NULL) {
has_warnings = true;
no_dc = true;
}
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
if (node->details->standby && node->details->online) {
nodes_standby++;
} else if (node->details->maintenance && node->details->online) {
- nodes_maintenance++;
+ nodes_maint++;
} else if (node->details->online) {
nodes_online++;
} else {
@@ -338,14 +350,15 @@ pcmk__output_simple_status(pcmk__output_t *out,
char *nodes_maint_s = NULL;
if (nodes_standby > 0) {
- nodes_standby_s = crm_strdup_printf(", %d standby node%s", nodes_standby,
+ nodes_standby_s = crm_strdup_printf(", %d standby node%s",
+ nodes_standby,
pcmk__plural_s(nodes_standby));
}
- if (nodes_maintenance > 0) {
+ if (nodes_maint > 0) {
nodes_maint_s = crm_strdup_printf(", %d maintenance node%s",
- nodes_maintenance,
- pcmk__plural_s(nodes_maintenance));
+ nodes_maint,
+ pcmk__plural_s(nodes_maint));
}
out->info(out, "CLUSTER OK: %d node%s online%s%s, "
@@ -353,7 +366,7 @@ pcmk__output_simple_status(pcmk__output_t *out,
nodes_online, pcmk__plural_s(nodes_online),
nodes_standby_s != NULL ? nodes_standby_s : "",
nodes_maint_s != NULL ? nodes_maint_s : "",
- data_set->ninstances, pcmk__plural_s(data_set->ninstances));
+ scheduler->ninstances, pcmk__plural_s(scheduler->ninstances));
free(nodes_standby_s);
free(nodes_maint_s);
diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am
index c2a8c90..9ffc745 100644
--- a/lib/pengine/Makefile.am
+++ b/lib/pengine/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2022 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -15,27 +15,33 @@ include $(top_srcdir)/mk/common.mk
SUBDIRS = . tests
## libraries
-lib_LTLIBRARIES = libpe_rules.la libpe_status.la
-check_LTLIBRARIES = libpe_rules_test.la libpe_status_test.la
+lib_LTLIBRARIES = libpe_rules.la \
+ libpe_status.la
+check_LTLIBRARIES = libpe_rules_test.la \
+ libpe_status_test.la
-## SOURCES
-noinst_HEADERS = variant.h pe_status_private.h
+noinst_HEADERS = pe_status_private.h
-libpe_rules_la_LDFLAGS = -version-info 30:0:4
+libpe_rules_la_LDFLAGS = -version-info 30:1:4
libpe_rules_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpe_rules_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-libpe_rules_la_SOURCES = rules.c rules_alerts.c common.c
-libpe_status_la_LDFLAGS = -version-info 34:0:6
+## Library sources (*must* use += format for bumplibs)
+libpe_rules_la_SOURCES = common.c
+libpe_rules_la_SOURCES += rules.c
+libpe_rules_la_SOURCES += rules_alerts.c
+
+libpe_status_la_LDFLAGS = -version-info 35:0:7
libpe_status_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpe_status_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
libpe_status_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-# Use += rather than backlashed continuation lines for parsing by bumplibs
+
+## Library sources (*must* use += format for bumplibs)
libpe_status_la_SOURCES =
libpe_status_la_SOURCES += bundle.c
libpe_status_la_SOURCES += clone.c
@@ -64,18 +70,26 @@ libpe_status_la_SOURCES += utils.c
include $(top_srcdir)/mk/tap.mk
libpe_rules_test_la_SOURCES = $(libpe_rules_la_SOURCES)
-libpe_rules_test_la_LDFLAGS = $(libpe_rules_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libpe_rules_test_la_LDFLAGS = $(libpe_rules_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
-libpe_rules_test_la_CFLAGS = $(libpe_rules_la_CFLAGS) -DPCMK__UNIT_TESTING \
+libpe_rules_test_la_CFLAGS = $(libpe_rules_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
-fno-builtin -fno-inline
-libpe_rules_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
+libpe_rules_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la \
+ -lcmocka \
+ -lm
libpe_status_test_la_SOURCES = $(libpe_status_la_SOURCES)
-libpe_status_test_la_LDFLAGS = $(libpe_status_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libpe_status_test_la_LDFLAGS = $(libpe_status_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
-libpe_status_test_la_CFLAGS = $(libpe_status_la_CFLAGS) -DPCMK__UNIT_TESTING \
- -fno-builtin -fno-inline
-libpe_status_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
-
-clean-generic:
- rm -f *.log *.debug *~
+libpe_status_test_la_CFLAGS = $(libpe_status_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
+ -fno-builtin \
+ -fno-inline
+libpe_status_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la \
+ -lcmocka \
+ -lm
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index ff1b365..fd859d5 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -20,8 +20,69 @@
#include <crm/common/xml_internal.h>
#include <pe_status_private.h>
-#define PE__VARIANT_BUNDLE 1
-#include "./variant.h"
+enum pe__bundle_mount_flags {
+ pe__bundle_mount_none = 0x00,
+
+ // mount instance-specific subdirectory rather than source directly
+ pe__bundle_mount_subdir = 0x01
+};
+
+typedef struct {
+ char *source;
+ char *target;
+ char *options;
+ uint32_t flags; // bitmask of pe__bundle_mount_flags
+} pe__bundle_mount_t;
+
+typedef struct {
+ char *source;
+ char *target;
+} pe__bundle_port_t;
+
+enum pe__container_agent {
+ PE__CONTAINER_AGENT_UNKNOWN,
+ PE__CONTAINER_AGENT_DOCKER,
+ PE__CONTAINER_AGENT_RKT,
+ PE__CONTAINER_AGENT_PODMAN,
+};
+
+#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
+#define PE__CONTAINER_AGENT_DOCKER_S "docker"
+#define PE__CONTAINER_AGENT_RKT_S "rkt"
+#define PE__CONTAINER_AGENT_PODMAN_S "podman"
+
+typedef struct pe__bundle_variant_data_s {
+ int promoted_max;
+ int nreplicas;
+ int nreplicas_per_host;
+ char *prefix;
+ char *image;
+ const char *ip_last;
+ char *host_network;
+ char *host_netmask;
+ char *control_port;
+ char *container_network;
+ char *ip_range_start;
+ gboolean add_host;
+ gchar *container_host_options;
+ char *container_command;
+ char *launcher_options;
+ const char *attribute_target;
+
+ pcmk_resource_t *child;
+
+ GList *replicas; // pe__bundle_replica_t *
+ GList *ports; // pe__bundle_port_t *
+ GList *mounts; // pe__bundle_mount_t *
+
+ enum pe__container_agent agent_type;
+} pe__bundle_variant_data_t;
+
+#define get_bundle_variant_data(data, rsc) \
+ CRM_ASSERT(rsc != NULL); \
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_bundle); \
+ CRM_ASSERT(rsc->variant_opaque != NULL); \
+ data = (pe__bundle_variant_data_t *) rsc->variant_opaque;
/*!
* \internal
@@ -32,7 +93,7 @@
* \return Maximum replicas for bundle corresponding to \p rsc
*/
int
-pe__bundle_max(const pe_resource_t *rsc)
+pe__bundle_max(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
@@ -42,19 +103,149 @@ pe__bundle_max(const pe_resource_t *rsc)
/*!
* \internal
- * \brief Get maximum number of bundle replicas allowed to run on one node
+ * \brief Get the resource inside a bundle
*
- * \param[in] rsc Bundle or bundled resource to check
+ * \param[in] bundle Bundle to check
*
- * \return Maximum replicas per node for bundle corresponding to \p rsc
+ * \return Resource inside \p bundle if any, otherwise NULL
*/
-int
-pe__bundle_max_per_node(const pe_resource_t *rsc)
+pcmk_resource_t *
+pe__bundled_resource(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
- return bundle_data->nreplicas_per_host;
+ return bundle_data->child;
+}
+
+/*!
+ * \internal
+ * \brief Get containerized resource corresponding to a given bundle container
+ *
+ * \param[in] instance Collective instance that might be a bundle container
+ *
+ * \return Bundled resource instance inside \p instance if it is a bundle
+ * container instance, otherwise NULL
+ */
+const pcmk_resource_t *
+pe__get_rsc_in_container(const pcmk_resource_t *instance)
+{
+ const pe__bundle_variant_data_t *data = NULL;
+ const pcmk_resource_t *top = pe__const_top_resource(instance, true);
+
+ if ((top == NULL) || (top->variant != pcmk_rsc_variant_bundle)) {
+ return NULL;
+ }
+ get_bundle_variant_data(data, top);
+
+ for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
+ const pe__bundle_replica_t *replica = iter->data;
+
+ if (instance == replica->container) {
+ return replica->child;
+ }
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given node is created by a bundle
+ *
+ * \param[in] bundle Bundle resource to check
+ * \param[in] node Node to check
+ *
+ * \return true if \p node is an instance of \p bundle, otherwise false
+ */
+bool
+pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
+ const pcmk_node_t *node)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
+ pe__bundle_replica_t *replica = iter->data;
+
+ if (pe__same_node(node, replica->node)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*!
+ * \internal
+ * \brief Get the container of a bundle's first replica
+ *
+ * \param[in] bundle Bundle resource to get container for
+ *
+ * \return Container resource from first replica of \p bundle if any,
+ * otherwise NULL
+ */
+pcmk_resource_t *
+pe__first_container(const pcmk_resource_t *bundle)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+ const pe__bundle_replica_t *replica = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ if (bundle_data->replicas == NULL) {
+ return NULL;
+ }
+ replica = bundle_data->replicas->data;
+ return replica->container;
+}
+
+/*!
+ * \internal
+ * \brief Iterate over bundle replicas
+ *
+ * \param[in,out] bundle Bundle to iterate over
+ * \param[in] fn Function to call for each replica (its return value
+ * indicates whether to continue iterating)
+ * \param[in,out] user_data Pointer to pass to \p fn
+ */
+void
+pe__foreach_bundle_replica(pcmk_resource_t *bundle,
+ bool (*fn)(pe__bundle_replica_t *, void *),
+ void *user_data)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
+ if (!fn((pe__bundle_replica_t *) iter->data, user_data)) {
+ break;
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Iterate over const bundle replicas
+ *
+ * \param[in] bundle Bundle to iterate over
+ * \param[in] fn Function to call for each replica (its return value
+ * indicates whether to continue iterating)
+ * \param[in,out] user_data Pointer to pass to \p fn
+ */
+void
+pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
+ bool (*fn)(const pe__bundle_replica_t *,
+ void *),
+ void *user_data)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (const GList *iter = bundle_data->replicas; iter != NULL;
+ iter = iter->next) {
+
+ if (!fn((const pe__bundle_replica_t *) iter->data, user_data)) {
+ break;
+ }
+ }
}
static char *
@@ -159,7 +350,8 @@ valid_network(pe__bundle_variant_data_t *data)
if(data->nreplicas_per_host > 1) {
pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
data->nreplicas_per_host = 1;
- // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
+ // @TODO to be sure:
+ // pe__clear_resource_flags(rsc, pcmk_rsc_unique);
}
return TRUE;
}
@@ -167,7 +359,7 @@ valid_network(pe__bundle_variant_data_t *data)
}
static int
-create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
if(data->ip_range_start) {
@@ -198,7 +390,8 @@ create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
}
xml_obj = create_xml_node(xml_ip, "operations");
- crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
+ crm_create_op_xml(xml_obj, ID(xml_ip), PCMK_ACTION_MONITOR, "60s",
+ NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
@@ -226,7 +419,7 @@ container_agent_str(enum pe__container_agent t)
}
static int
-create_container_resource(pe_resource_t *parent,
+create_container_resource(pcmk_resource_t *parent,
const pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
@@ -295,11 +488,11 @@ create_container_resource(pe_resource_t *parent,
}
if (data->control_port != NULL) {
- pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=",
- data->control_port, NULL);
+ pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=",
+ data->control_port, NULL);
} else {
- g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt,
- DEFAULT_REMOTE_PORT);
+ g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d",
+ env_opt, DEFAULT_REMOTE_PORT);
}
for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
@@ -449,14 +642,15 @@ create_container_resource(pe_resource_t *parent,
}
xml_obj = create_xml_node(xml_container, "operations");
- crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
+ crm_create_op_xml(xml_obj, ID(xml_container), PCMK_ACTION_MONITOR, "60s",
+ NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (pe__unpack_resource(xml_container, &replica->container, parent,
parent->cluster) != pcmk_rc_ok) {
return pcmk_rc_unpack_error;
}
- pe__set_resource_flags(replica->container, pe_rsc_replica_container);
+ pe__set_resource_flags(replica->container, pcmk_rsc_replica_container);
parent->children = g_list_append(parent->children, replica->container);
return pcmk_rc_ok;
@@ -469,13 +663,13 @@ create_container_resource(pe_resource_t *parent,
* \param[in] uname Name of node to ban
*/
static void
-disallow_node(pe_resource_t *rsc, const char *uname)
+disallow_node(pcmk_resource_t *rsc, const char *uname)
{
gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
if (match) {
- ((pe_node_t *) match)->weight = -INFINITY;
- ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
+ ((pcmk_node_t *) match)->weight = -INFINITY;
+ ((pcmk_node_t *) match)->rsc_discover_mode = pcmk_probe_never;
}
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
@@ -483,12 +677,12 @@ disallow_node(pe_resource_t *rsc, const char *uname)
}
static int
-create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
if (replica->child && valid_network(data)) {
GHashTableIter gIter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
xmlNode *xml_remote = NULL;
char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
char *port_s = NULL;
@@ -527,8 +721,8 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
free(port_s);
/* Abandon our created ID, and pull the copy from the XML, because we
- * need something that will get freed during data set cleanup to use as
- * the node ID and uname.
+ * need something that will get freed during scheduler data cleanup to
+ * use as the node ID and uname.
*/
free(id);
id = NULL;
@@ -545,12 +739,12 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
} else {
node->weight = -INFINITY;
}
- node->rsc_discover_mode = pe_discover_never;
+ node->rsc_discover_mode = pcmk_probe_never;
/* unpack_remote_nodes() ensures that each remote node and guest node
- * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
- * Unfortunately, a bundle has to be mostly unpacked before it's obvious
- * what nodes will be needed, so we do it just above.
+ * has a pcmk_node_t entry. Ideally, it would do the same for bundle
+ * nodes. Unfortunately, a bundle has to be mostly unpacked before it's
+ * obvious what nodes will be needed, so we do it just above.
*
* Worse, that means that the node may have been utilized while
* unpacking other resources, without our weight correction. The most
@@ -569,7 +763,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
replica->node = pe__copy_node(node);
replica->node->weight = 500;
- replica->node->rsc_discover_mode = pe_discover_exclusive;
+ replica->node->rsc_discover_mode = pcmk_probe_exclusive;
/* Ensure the node shows up as allowed and with the correct discovery set */
if (replica->child->allowed_nodes != NULL) {
@@ -581,7 +775,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__copy_node(replica->node));
{
- pe_node_t *copy = pe__copy_node(replica->node);
+ pcmk_node_t *copy = pe__copy_node(replica->node);
copy->weight = -INFINITY;
g_hash_table_insert(replica->child->parent->allowed_nodes,
(gpointer) replica->node->details->id, copy);
@@ -625,7 +819,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
}
static int
-create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_replica_resources(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
int rc = pcmk_rc_ok;
@@ -658,7 +852,8 @@ create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
* containers with pacemaker-remoted inside in order to start
* services inside those containers.
*/
- pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
+ pe__set_resource_flags(replica->remote,
+ pcmk_rsc_remote_nesting_allowed);
}
return rc;
}
@@ -695,9 +890,9 @@ port_free(pe__bundle_port_t *port)
}
static pe__bundle_replica_t *
-replica_for_remote(pe_resource_t *remote)
+replica_for_remote(pcmk_resource_t *remote)
{
- pe_resource_t *top = remote;
+ pcmk_resource_t *top = remote;
pe__bundle_variant_data_t *bundle_data = NULL;
if (top == NULL) {
@@ -722,7 +917,7 @@ replica_for_remote(pe_resource_t *remote)
}
bool
-pe__bundle_needs_remote_name(pe_resource_t *rsc)
+pe__bundle_needs_remote_name(pcmk_resource_t *rsc)
{
const char *value;
GHashTable *params = NULL;
@@ -740,12 +935,12 @@ pe__bundle_needs_remote_name(pe_resource_t *rsc)
}
const char *
-pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
+pe__add_bundle_remote_name(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
xmlNode *xml, const char *field)
{
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
pe__bundle_replica_t *replica = NULL;
if (!pe__bundle_needs_remote_name(rsc)) {
@@ -786,7 +981,7 @@ pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
} while (0)
gboolean
-pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
+pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
xmlNode *xml_obj = NULL;
@@ -819,7 +1014,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
}
// Use 0 for default, minimum, and invalid promoted-max
- value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
+ value = crm_element_value(xml_obj, PCMK_META_PROMOTED_MAX);
if (value == NULL) {
// @COMPAT deprecated since 2.0.0
value = crm_element_value(xml_obj, "masters");
@@ -842,7 +1037,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
value = crm_element_value(xml_obj, "replicas-per-host");
pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
if (bundle_data->nreplicas_per_host == 1) {
- pe__clear_resource_flags(rsc, pe_rsc_unique);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unique);
}
bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
@@ -934,13 +1129,11 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
value = pcmk__itoa(bundle_data->nreplicas);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_INCARNATION_MAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value);
free(value);
value = pcmk__itoa(bundle_data->nreplicas_per_host);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_INCARNATION_NODEMAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value);
free(value);
crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
@@ -951,8 +1144,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
value = pcmk__itoa(bundle_data->promoted_max);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_PROMOTED_MAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value);
free(value);
}
@@ -972,7 +1164,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
GString *buffer = NULL;
if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
return FALSE;
}
@@ -1033,8 +1225,8 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
replica->offset = lpc++;
// Ensure the child's notify gets set based on the underlying primitive's value
- if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
- pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
+ if (pcmk_is_set(replica->child->flags, pcmk_rsc_notify)) {
+ pe__set_resource_flags(bundle_data->child, pcmk_rsc_notify);
}
allocate_ip(bundle_data, replica, buffer);
@@ -1109,7 +1301,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
}
static int
-replica_resource_active(pe_resource_t *rsc, gboolean all)
+replica_resource_active(pcmk_resource_t *rsc, gboolean all)
{
if (rsc) {
gboolean child_active = rsc->fns->active(rsc, all);
@@ -1124,7 +1316,7 @@ replica_resource_active(pe_resource_t *rsc, gboolean all)
}
gboolean
-pe__bundle_active(pe_resource_t *rsc, gboolean all)
+pe__bundle_active(pcmk_resource_t *rsc, gboolean all)
{
pe__bundle_variant_data_t *bundle_data = NULL;
GList *iter = NULL;
@@ -1171,8 +1363,8 @@ pe__bundle_active(pe_resource_t *rsc, gboolean all)
*
* \return Bundle replica if found, NULL otherwise
*/
-pe_resource_t *
-pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
+pcmk_resource_t *
+pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(bundle && node);
@@ -1195,7 +1387,7 @@ pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
* \deprecated This function will be removed in a future release
*/
static void
-print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
+print_rsc_in_list(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
if (rsc != NULL) {
@@ -1214,7 +1406,7 @@ print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
* \deprecated This function will be removed in a future release
*/
static void
-bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+bundle_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1232,9 +1424,10 @@ bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
status_print("image=\"%s\" ", bundle_data->image);
- status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print(">\n");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
@@ -1254,12 +1447,13 @@ bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1313,10 +1507,11 @@ pe__bundle_xml(pcmk__output_t *out, va_list args)
"id", rsc->id,
"type", container_agent_str(bundle_data->agent_type),
"image", bundle_data->image,
- "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
+ "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique),
+ "maintenance",
+ pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
"description", desc);
CRM_ASSERT(rc == pcmk_rc_ok);
}
@@ -1358,9 +1553,9 @@ pe__bundle_xml(pcmk__output_t *out, va_list args)
static void
pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
- pe_node_t *node, uint32_t show_opts)
+ pcmk_node_t *node, uint32_t show_opts)
{
- pe_resource_t *rsc = replica->child;
+ pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1394,23 +1589,24 @@ pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replic
* otherwise unmanaged, or an empty string otherwise
*/
static const char *
-get_unmanaged_str(const pe_resource_t *rsc)
+get_unmanaged_str(const pcmk_resource_t *rsc)
{
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
return " (maintenance)";
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
return " (unmanaged)";
}
return "";
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1460,7 +1656,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1497,7 +1693,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1512,9 +1708,9 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
static void
pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
- pe_node_t *node, uint32_t show_opts)
+ pcmk_node_t *node, uint32_t show_opts)
{
- const pe_resource_t *rsc = replica->child;
+ const pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1538,12 +1734,13 @@ pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replic
pe__common_output_text(out, rsc, buffer, node, show_opts);
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1593,7 +1790,7 @@ pe__bundle_text(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1630,7 +1827,7 @@ pe__bundle_text(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1651,8 +1848,8 @@ static void
print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
long options, void *print_data)
{
- pe_node_t *node = NULL;
- pe_resource_t *rsc = replica->child;
+ pcmk_node_t *node = NULL;
+ pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1682,7 +1879,7 @@ print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
* \deprecated This function will be removed in a future release
*/
void
-pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
+pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1703,8 +1900,8 @@ pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%sContainer bundle%s: %s [%s]%s%s\n",
pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("<br />\n<ul>\n");
}
@@ -1784,7 +1981,7 @@ free_bundle_replica(pe__bundle_replica_t *replica)
}
void
-pe__free_bundle(pe_resource_t *rsc)
+pe__free_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
@@ -1818,9 +2015,9 @@ pe__free_bundle(pe_resource_t *rsc)
}
enum rsc_role_e
-pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
+pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current)
{
- enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e container_role = pcmk_role_unknown;
return container_role;
}
@@ -1832,9 +2029,9 @@ pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
* \return Number of configured replicas, or 0 on error
*/
int
-pe_bundle_replicas(const pe_resource_t *rsc)
+pe_bundle_replicas(const pcmk_resource_t *rsc)
{
- if ((rsc == NULL) || (rsc->variant != pe_container)) {
+ if ((rsc == NULL) || (rsc->variant != pcmk_rsc_variant_bundle)) {
return 0;
} else {
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1845,7 +2042,7 @@ pe_bundle_replicas(const pe_resource_t *rsc)
}
void
-pe__count_bundle(pe_resource_t *rsc)
+pe__count_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1869,7 +2066,7 @@ pe__count_bundle(pe_resource_t *rsc)
}
gboolean
-pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -1913,7 +2110,7 @@ pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
* g_list_free().
*/
GList *
-pe__bundle_containers(const pe_resource_t *bundle)
+pe__bundle_containers(const pcmk_resource_t *bundle)
{
GList *containers = NULL;
const pe__bundle_variant_data_t *data = NULL;
@@ -1927,14 +2124,14 @@ pe__bundle_containers(const pe_resource_t *bundle)
return containers;
}
-// Bundle implementation of resource_object_functions_t:active_node()
-pe_node_t *
-pe__bundle_active_node(const pe_resource_t *rsc, unsigned int *count_all,
+// Bundle implementation of pcmk_rsc_methods_t:active_node()
+pcmk_node_t *
+pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
- pe_node_t *active = NULL;
- pe_node_t *node = NULL;
- pe_resource_t *container = NULL;
+ pcmk_node_t *active = NULL;
+ pcmk_node_t *node = NULL;
+ pcmk_resource_t *container = NULL;
GList *containers = NULL;
GList *iter = NULL;
GHashTable *nodes = NULL;
@@ -2002,3 +2199,21 @@ done:
g_hash_table_destroy(nodes);
return active;
}
+
+/*!
+ * \internal
+ * \brief Get maximum bundle resource instances per node
+ *
+ * \param[in] rsc Bundle resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__bundle_max_per_node(const pcmk_resource_t *rsc)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, rsc);
+ CRM_ASSERT(bundle_data->nreplicas_per_host >= 0);
+ return (unsigned int) bundle_data->nreplicas_per_host;
+}
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index e411f98..a92a4b7 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -18,13 +18,14 @@
#include <crm/msg_xml.h>
#include <crm/common/output.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#ifdef PCMK__COMPAT_2_0
-#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_LEGACY_S "s"
-#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s"
+#define PROMOTED_INSTANCES PCMK__ROLE_PROMOTED_LEGACY "s"
+#define UNPROMOTED_INSTANCES PCMK__ROLE_UNPROMOTED_LEGACY "s"
#else
-#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_S
-#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S
+#define PROMOTED_INSTANCES PCMK__ROLE_PROMOTED
+#define UNPROMOTED_INSTANCES PCMK__ROLE_UNPROMOTED
#endif
typedef struct clone_variant_data_s {
@@ -36,7 +37,7 @@ typedef struct clone_variant_data_s {
int total_clones;
- uint32_t flags; // Group of enum pe__clone_flags
+ uint32_t flags; // Group of enum pcmk__clone_flags
notify_data_t *stop_notify;
notify_data_t *start_notify;
@@ -46,8 +47,8 @@ typedef struct clone_variant_data_s {
xmlNode *xml_obj_child;
} clone_variant_data_t;
-#define get_clone_variant_data(data, rsc) \
- CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_clone)); \
+#define get_clone_variant_data(data, rsc) \
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_clone)); \
data = (clone_variant_data_t *) rsc->variant_opaque;
/*!
@@ -59,7 +60,7 @@ typedef struct clone_variant_data_s {
* \return Maximum instances for \p clone
*/
int
-pe__clone_max(const pe_resource_t *clone)
+pe__clone_max(const pcmk_resource_t *clone)
{
const clone_variant_data_t *clone_data = NULL;
@@ -76,7 +77,7 @@ pe__clone_max(const pe_resource_t *clone)
* \return Maximum allowed instances per node for \p clone
*/
int
-pe__clone_node_max(const pe_resource_t *clone)
+pe__clone_node_max(const pcmk_resource_t *clone)
{
const clone_variant_data_t *clone_data = NULL;
@@ -93,7 +94,7 @@ pe__clone_node_max(const pe_resource_t *clone)
* \return Maximum promoted instances for \p clone
*/
int
-pe__clone_promoted_max(const pe_resource_t *clone)
+pe__clone_promoted_max(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -110,7 +111,7 @@ pe__clone_promoted_max(const pe_resource_t *clone)
* \return Maximum promoted instances for \p clone
*/
int
-pe__clone_promoted_node_max(const pe_resource_t *clone)
+pe__clone_promoted_node_max(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -167,16 +168,16 @@ node_list_to_str(const GList *list)
}
static void
-clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+clone_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc,
clone_variant_data_t *clone_data, const char *desc)
{
GString *attrs = NULL;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__add_separated_word(&attrs, 64, "promotable", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
pcmk__add_separated_word(&attrs, 64, "unique", ", ");
}
@@ -184,10 +185,10 @@ clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
}
@@ -206,8 +207,8 @@ clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
}
void
-pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
- pe_working_set_t *data_set)
+pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid,
+ pcmk_scheduler_t *scheduler)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = rsc->variant_opaque;
@@ -218,15 +219,15 @@ pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
- g_list_length(data_set->nodes));
+ g_list_length(scheduler->nodes));
}
}
-pe_resource_t *
-find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
+pcmk_resource_t *
+find_clone_instance(const pcmk_resource_t *rsc, const char *sub_id)
{
char *child_id = NULL;
- pe_resource_t *child = NULL;
+ pcmk_resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
@@ -240,13 +241,13 @@ find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
return child;
}
-pe_resource_t *
-pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
+pcmk_resource_t *
+pe__create_clone_child(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
- pe_resource_t *child_rsc = NULL;
+ pcmk_resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
@@ -268,7 +269,7 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (pe__unpack_resource(child_copy, &child_rsc, rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
@@ -278,10 +279,10 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
- pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
+ pe__set_resource_flags_recursive(child_rsc, pcmk_rsc_removed);
}
- add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
+ add_hash_param(child_rsc->meta, PCMK_META_CLONE_MAX, inc_max);
pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
bail:
@@ -291,90 +292,89 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
return child_rsc;
}
+/*!
+ * \internal
+ * \brief Unpack a nonnegative integer value from a resource meta-attribute
+ *
+ * \param[in] rsc Resource with meta-attribute
+ * \param[in] meta_name Name of meta-attribute to unpack
+ * \param[in] deprecated_name If not NULL, try unpacking this
+ * if \p meta_name is unset
+ * \param[in] default_value Value to use if unset
+ *
+ * \return Integer parsed from resource's specified meta-attribute if a valid
+ * nonnegative integer, \p default_value if unset, or 0 if invalid
+ */
+static int
+unpack_meta_int(const pcmk_resource_t *rsc, const char *meta_name,
+ const char *deprecated_name, int default_value)
+{
+ int integer = default_value;
+ const char *value = g_hash_table_lookup(rsc->meta, meta_name);
+
+ if ((value == NULL) && (deprecated_name != NULL)) {
+ value = g_hash_table_lookup(rsc->meta, deprecated_name);
+ }
+ if (value != NULL) {
+ pcmk__scan_min_int(value, &integer, 0);
+ }
+ return integer;
+}
+
gboolean
-clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
- const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
- const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
-
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- const char *promoted_max = NULL;
- const char *promoted_node_max = NULL;
-
- promoted_max = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_PROMOTED_MAX);
- if (promoted_max == NULL) {
- // @COMPAT deprecated since 2.0.0
- promoted_max = g_hash_table_lookup(rsc->meta,
- PCMK_XA_PROMOTED_MAX_LEGACY);
- }
-
- promoted_node_max = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_PROMOTED_NODEMAX);
- if (promoted_node_max == NULL) {
- // @COMPAT deprecated since 2.0.0
- promoted_node_max =
- g_hash_table_lookup(rsc->meta,
- PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
- }
-
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
// Use 1 as default but 0 for minimum and invalid
- if (promoted_max == NULL) {
- clone_data->promoted_max = 1;
- } else {
- pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0);
- }
+ // @COMPAT PCMK_XA_PROMOTED_MAX_LEGACY deprecated since 2.0.0
+ clone_data->promoted_max = unpack_meta_int(rsc, PCMK_META_PROMOTED_MAX,
+ PCMK_XA_PROMOTED_MAX_LEGACY,
+ 1);
// Use 1 as default but 0 for minimum and invalid
- if (promoted_node_max == NULL) {
- clone_data->promoted_node_max = 1;
- } else {
- pcmk__scan_min_int(promoted_node_max,
- &(clone_data->promoted_node_max), 0);
- }
+ // @COMPAT PCMK_XA_PROMOTED_NODE_MAX_LEGACY deprecated since 2.0.0
+ clone_data->promoted_node_max =
+ unpack_meta_int(rsc, PCMK_META_PROMOTED_NODE_MAX,
+ PCMK_XA_PROMOTED_NODE_MAX_LEGACY, 1);
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
// Use 1 as default but 0 for minimum and invalid
- if (max_clones_node == NULL) {
- clone_data->clone_node_max = 1;
- } else {
- pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0);
- }
+ clone_data->clone_node_max = unpack_meta_int(rsc, PCMK_META_CLONE_NODE_MAX,
+ NULL, 1);
/* Use number of nodes (but always at least 1, which is handy for crm_verify
* for a CIB without nodes) as default, but 0 for minimum and invalid
*/
- if (max_clones == NULL) {
- clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes));
- } else {
- pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0);
- }
+ clone_data->clone_max = unpack_meta_int(rsc, PCMK_META_CLONE_MAX, NULL,
+ QB_MAX(1, g_list_length(scheduler->nodes)));
if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED))) {
clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,
"Clone", rsc->id,
clone_data->flags,
- pe__clone_ordered,
- "pe__clone_ordered");
+ pcmk__clone_ordered,
+ "pcmk__clone_ordered");
}
- if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
- pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
+ && (clone_data->clone_node_max > 1)) {
+
+ pcmk__config_err("Ignoring " PCMK_META_CLONE_NODE_MAX " of %d for %s "
"because anonymous clones support only one instance "
- "per node", rsc->id);
+ "per node", clone_data->clone_node_max, rsc->id);
clone_data->clone_node_max = 1;
}
@@ -382,9 +382,9 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
- pe__rsc_bool_str(rsc, pe_rsc_unique));
+ pe__rsc_bool_str(rsc, pcmk_rsc_unique));
pe_rsc_trace(rsc, "\tClone is promotable: %s",
- pe__rsc_bool_str(rsc, pe_rsc_promotable));
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable));
// Clones may contain a single group or primitive
for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
@@ -415,20 +415,20 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
- pe__rsc_bool_str(rsc, pe_rsc_unique));
+ pe__rsc_bool_str(rsc, pcmk_rsc_unique));
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
- if (pe__create_clone_child(rsc, data_set) == NULL) {
+ if (pe__create_clone_child(rsc, scheduler) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
- if (pe__create_clone_child(rsc, data_set) == NULL) {
+ if (pe__create_clone_child(rsc, scheduler) == NULL) {
return FALSE;
}
}
@@ -439,12 +439,12 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-clone_active(pe_resource_t * rsc, gboolean all)
+clone_active(pcmk_resource_t * rsc, gboolean all)
{
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
@@ -492,27 +492,29 @@ short_print(const char *list, const char *prefix, const char *type,
}
static const char *
-configured_role_str(pe_resource_t * rsc)
+configured_role_str(pcmk_resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
- target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
+ pcmk_resource_t *instance = rsc->children->data; // Any instance will do
+
+ target_role = g_hash_table_lookup(instance->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
-configured_role(pe_resource_t * rsc)
+configured_role(pcmk_resource_t *rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
- return RSC_ROLE_UNKNOWN;
+ return pcmk_role_unknown;
}
/*!
@@ -520,7 +522,7 @@ configured_role(pe_resource_t * rsc)
* \deprecated This function will be removed in a future release
*/
static void
-clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+clone_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
char *child_text = crm_strdup_printf("%s ", pre_text);
@@ -530,19 +532,20 @@ clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%s<clone ", pre_text);
status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
status_print("multi_state=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_promotable));
- status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable));
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print("failure_ignored=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure));
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
@@ -552,7 +555,7 @@ clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
}
bool
-is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
+is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any)
{
GList *gIter;
bool all = !any;
@@ -587,7 +590,7 @@ is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
* \deprecated This function will be removed in a future release
*/
void
-clone_print(pe_resource_t *rsc, const char *pre_text, long options,
+clone_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
GString *list_text = NULL;
@@ -616,9 +619,9 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
- pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_promotable)? " (promotable)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n<ul>\n");
@@ -629,16 +632,17 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
- if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (partially_active
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
print_full = TRUE;
}
@@ -652,15 +656,15 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
- if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ if (!pcmk_is_set(child_rsc->flags, pcmk_rsc_removed)
&& !pcmk_is_set(options, pe_print_clone_active)) {
pcmk__add_word(&stopped_list, 1024, child_rsc->id);
}
- } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
- || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
- || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+ } else if (is_set_recursive(child_rsc, pcmk_rsc_removed, TRUE)
+ || !is_set_recursive(child_rsc, pcmk_rsc_managed, FALSE)
+ || is_set_recursive(child_rsc, pcmk_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
@@ -668,8 +672,9 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
- pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+ pcmk_node_t *location = NULL;
+ location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
@@ -678,7 +683,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
- } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ } else if (a_role > pcmk_role_unpromoted) {
promoted_list = g_list_append(promoted_list, location);
} else {
@@ -709,7 +714,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
/* Promoted */
promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
pcmk__add_word(&list_text, 1024, host->details->uname);
active_instances++;
@@ -725,17 +730,17 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
/* Started/Unpromoted */
started_list = g_list_sort(started_list, pe__cmp_node_name);
for (gIter = started_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
pcmk__add_word(&list_text, 1024, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_UNPROMOTED) {
+ if (role == pcmk_role_unpromoted) {
short_print((const char *) list_text->str, child_text,
UNPROMOTED_INSTANCES " (target-role)", NULL,
options, print_data);
@@ -756,11 +761,11 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_STOPPED) {
+ if (role == pcmk_role_stopped) {
state = "Stopped (disabled)";
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
@@ -780,7 +785,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
list = g_list_sort(list, pe__cmp_node_name);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
- pe_node_t *node = (pe_node_t *)nIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
pcmk__add_word(&stopped_list, 1024, node->details->uname);
@@ -809,12 +814,13 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -838,7 +844,7 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
all = g_list_prepend(all, (gpointer) "*");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
@@ -852,16 +858,18 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
printed_header = TRUE;
desc = pe__resource_description(rsc, show_opts);
-
rc = pe__name_and_nvpairs_xml(out, true, "clone", 10,
"id", rsc->id,
- "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
- "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
+ "multi_state",
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable),
+ "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique),
+ "maintenance",
+ pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
"disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
- "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
+ "failure_ignored",
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure),
"target_role", configured_role_str(rsc),
"description", desc);
CRM_ASSERT(rc == pcmk_rc_ok);
@@ -879,12 +887,13 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__clone_default(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -916,7 +925,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
@@ -931,9 +940,10 @@ pe__clone_default(pcmk__output_t *out, va_list args)
print_full = TRUE;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
- if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (partially_active
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
print_full = TRUE;
}
@@ -947,7 +957,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
- if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ if (!pcmk_is_set(child_rsc->flags, pcmk_rsc_removed)
&& !pcmk_is_set(show_opts, pcmk_show_clone_detail)
&& pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
if (stopped == NULL) {
@@ -956,9 +966,9 @@ pe__clone_default(pcmk__output_t *out, va_list args)
g_hash_table_insert(stopped, strdup(child_rsc->id), strdup("Stopped"));
}
- } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
- || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
- || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+ } else if (is_set_recursive(child_rsc, pcmk_rsc_removed, TRUE)
+ || !is_set_recursive(child_rsc, pcmk_rsc_managed, FALSE)
+ || is_set_recursive(child_rsc, pcmk_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
@@ -966,8 +976,9 @@ pe__clone_default(pcmk__output_t *out, va_list args)
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
- pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+ pcmk_node_t *location = NULL;
+ location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
@@ -976,7 +987,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
- } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ } else if (a_role > pcmk_role_unpromoted) {
promoted_list = g_list_append(promoted_list, location);
} else {
@@ -1014,7 +1025,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
/* Promoted */
promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
if (!pcmk__str_in_list(host->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -1037,7 +1048,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
/* Started/Unpromoted */
started_list = g_list_sort(started_list, pe__cmp_node_name);
for (gIter = started_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
if (!pcmk__str_in_list(host->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -1052,10 +1063,10 @@ pe__clone_default(pcmk__output_t *out, va_list args)
if ((list_text != NULL) && (list_text->len > 0)) {
clone_header(out, &rc, rsc, clone_data, desc);
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_UNPROMOTED) {
+ if (role == pcmk_role_unpromoted) {
out->list_item(out, NULL,
UNPROMOTED_INSTANCES " (target-role): [ %s ]",
(const char *) list_text->str);
@@ -1075,7 +1086,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
@@ -1096,7 +1107,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
list = g_list_sort(list, pe__cmp_node_name);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
- pe_node_t *node = (pe_node_t *)nIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(node->details->uname, only_node,
@@ -1104,7 +1115,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node->details->uname);
const char *state = "Stopped";
- if (configured_role(rsc) == RSC_ROLE_STOPPED) {
+ if (configured_role(rsc) == pcmk_role_stopped) {
state = "Stopped (disabled)";
}
@@ -1166,7 +1177,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
}
void
-clone_free(pe_resource_t * rsc)
+clone_free(pcmk_resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
@@ -1175,7 +1186,7 @@ clone_free(pe_resource_t * rsc)
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
@@ -1200,13 +1211,13 @@ clone_free(pe_resource_t * rsc)
}
enum rsc_role_e
-clone_resource_state(const pe_resource_t * rsc, gboolean current)
+clone_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
- enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e clone_role = pcmk_role_unknown;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
@@ -1222,17 +1233,17 @@ clone_resource_state(const pe_resource_t * rsc, gboolean current)
* \internal
* \brief Check whether a clone has an instance for every node
*
- * \param[in] rsc Clone to check
- * \param[in] data_set Cluster state
+ * \param[in] rsc Clone to check
+ * \param[in] scheduler Scheduler data
*/
bool
-pe__is_universal_clone(const pe_resource_t *rsc,
- const pe_working_set_t *data_set)
+pe__is_universal_clone(const pcmk_resource_t *rsc,
+ const pcmk_scheduler_t *scheduler)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = rsc->variant_opaque;
- if (clone_data->clone_max == g_list_length(data_set->nodes)) {
+ if (clone_data->clone_max == g_list_length(scheduler->nodes)) {
return TRUE;
}
}
@@ -1240,7 +1251,7 @@ pe__is_universal_clone(const pe_resource_t *rsc,
}
gboolean
-pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -1256,9 +1267,9 @@ pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child_rsc = NULL;
+ const pcmk_resource_t *child_rsc = NULL;
- child_rsc = (const pe_resource_t *) iter->data;
+ child_rsc = (const pcmk_resource_t *) iter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
break;
@@ -1270,7 +1281,7 @@ pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
}
const char *
-pe__clone_child_id(const pe_resource_t *rsc)
+pe__clone_child_id(const pcmk_resource_t *rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
@@ -1286,12 +1297,12 @@ pe__clone_child_id(const pe_resource_t *rsc)
* \return true if clone is ordered, otherwise false
*/
bool
-pe__clone_is_ordered(const pe_resource_t *clone)
+pe__clone_is_ordered(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
- return pcmk_is_set(clone_data->flags, pe__clone_ordered);
+ return pcmk_is_set(clone_data->flags, pcmk__clone_ordered);
}
/*!
@@ -1305,7 +1316,7 @@ pe__clone_is_ordered(const pe_resource_t *clone)
* already set or pcmk_rc_already if it was)
*/
int
-pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
+pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag)
{
clone_variant_data_t *clone_data = NULL;
@@ -1321,6 +1332,26 @@ pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
/*!
* \internal
+ * \brief Check whether a clone flag is set
+ *
+ * \param[in] group Clone resource to check
+ * \param[in] flags Flag or flags to check
+ *
+ * \return \c true if all \p flags are set for \p clone, otherwise \c false
+ */
+bool
+pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+ CRM_ASSERT(clone_data != NULL);
+
+ return pcmk_all_flags_set(clone_data->flags, flags);
+}
+
+/*!
+ * \internal
* \brief Create pseudo-actions needed for promotable clones
*
* \param[in,out] clone Promotable clone to create actions for
@@ -1328,63 +1359,59 @@ pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
* \param[in] any_demoting Whether any instance will be demoted
*/
void
-pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
+pe__create_promotable_pseudo_ops(pcmk_resource_t *clone, bool any_promoting,
bool any_demoting)
{
- pe_action_t *action = NULL;
- pe_action_t *action_complete = NULL;
+ pcmk_action_t *action = NULL;
+ pcmk_action_t *action_complete = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
// Create a "promote" action for the clone itself
- action = pe__new_rsc_pseudo_action(clone, RSC_PROMOTE, !any_promoting,
- true);
+ action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTE,
+ !any_promoting, true);
// Create a "promoted" action for when all promotions are done
- action_complete = pe__new_rsc_pseudo_action(clone, RSC_PROMOTED,
+ action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTED,
!any_promoting, true);
action_complete->priority = INFINITY;
// Create notification pseudo-actions for promotion
if (clone_data->promote_notify == NULL) {
clone_data->promote_notify = pe__action_notif_pseudo_ops(clone,
- RSC_PROMOTE,
+ PCMK_ACTION_PROMOTE,
action,
action_complete);
}
// Create a "demote" action for the clone itself
- action = pe__new_rsc_pseudo_action(clone, RSC_DEMOTE, !any_demoting, true);
+ action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTE,
+ !any_demoting, true);
// Create a "demoted" action for when all demotions are done
- action_complete = pe__new_rsc_pseudo_action(clone, RSC_DEMOTED,
+ action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTED,
!any_demoting, true);
action_complete->priority = INFINITY;
// Create notification pseudo-actions for demotion
if (clone_data->demote_notify == NULL) {
clone_data->demote_notify = pe__action_notif_pseudo_ops(clone,
- RSC_DEMOTE,
+ PCMK_ACTION_DEMOTE,
action,
action_complete);
if (clone_data->promote_notify != NULL) {
order_actions(clone_data->stop_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->start_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->start_notify->pre,
- pe_order_optional);
+ clone_data->start_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->stop_notify->pre,
- pe_order_optional);
+ clone_data->stop_notify->pre, pcmk__ar_ordered);
}
}
}
@@ -1396,7 +1423,7 @@ pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
* \param[in,out] clone Clone to create notifications for
*/
void
-pe__create_clone_notifications(pe_resource_t *clone)
+pe__create_clone_notifications(pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -1415,7 +1442,7 @@ pe__create_clone_notifications(pe_resource_t *clone)
* \param[in,out] clone Clone to free notification data for
*/
void
-pe__free_clone_notification_data(pe_resource_t *clone)
+pe__free_clone_notification_data(pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -1445,26 +1472,45 @@ pe__free_clone_notification_data(pe_resource_t *clone)
* \param[in,out] stopped Stopped action for \p clone
*/
void
-pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
- pe_action_t *start, pe_action_t *started,
- pe_action_t *stop, pe_action_t *stopped)
+pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone,
+ pcmk_action_t *start, pcmk_action_t *started,
+ pcmk_action_t *stop, pcmk_action_t *stopped)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
if (clone_data->start_notify == NULL) {
- clone_data->start_notify = pe__action_notif_pseudo_ops(clone, RSC_START,
+ clone_data->start_notify = pe__action_notif_pseudo_ops(clone,
+ PCMK_ACTION_START,
start, started);
}
if (clone_data->stop_notify == NULL) {
- clone_data->stop_notify = pe__action_notif_pseudo_ops(clone, RSC_STOP,
+ clone_data->stop_notify = pe__action_notif_pseudo_ops(clone,
+ PCMK_ACTION_STOP,
stop, stopped);
if ((clone_data->start_notify != NULL)
&& (clone_data->stop_notify != NULL)) {
order_actions(clone_data->stop_notify->post_done,
- clone_data->start_notify->pre, pe_order_optional);
+ clone_data->start_notify->pre, pcmk__ar_ordered);
}
}
}
+
+/*!
+ * \internal
+ * \brief Get maximum clone resource instances per node
+ *
+ * \param[in] rsc Clone resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__clone_max_per_node(const pcmk_resource_t *rsc)
+{
+ const clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, rsc);
+ return clone_data->clone_node_max;
+}
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
index 6c69bfc..0fdd5a1 100644
--- a/lib/pengine/common.c
+++ b/lib/pengine/common.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,6 +15,7 @@
#include <glib.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
gboolean was_processing_error = FALSE;
@@ -104,7 +105,7 @@ static pcmk__cluster_option_t pe_opts[] = {
},
{
"stonith-action", NULL, "select", "reboot, off, poweroff",
- "reboot", pcmk__is_fencing_action,
+ PCMK_ACTION_REBOOT, pcmk__is_fencing_action,
N_("Action to send to fence device when a node needs to be fenced "
"(\"poweroff\" is a deprecated alias for \"off\")"),
NULL
@@ -157,7 +158,17 @@ static pcmk__cluster_option_t pe_opts[] = {
"twice, the maximum `pcmk_delay_base/max`. By default, priority "
"fencing delay is disabled.")
},
-
+ {
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL,
+ "0", pcmk__valid_interval_spec,
+ N_("How long to wait for a node that has joined the cluster to join "
+ "the controller process group"),
+ N_("Fence nodes that do not join the controller process group within "
+ "this much time after joining the cluster, to allow the cluster "
+ "to continue managing resources. A value of 0 means never fence "
+ "pending nodes. Setting the value to 2h means fence nodes after "
+ "2 hours.")
+ },
{
"cluster-delay", NULL, "time", NULL,
"60s", pcmk__valid_interval_spec,
@@ -311,34 +322,34 @@ fail2text(enum action_fail_response fail)
const char *result = "<unknown>";
switch (fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
result = "ignore";
break;
- case action_fail_demote:
+ case pcmk_on_fail_demote:
result = "demote";
break;
- case action_fail_block:
+ case pcmk_on_fail_block:
result = "block";
break;
- case action_fail_recover:
+ case pcmk_on_fail_restart:
result = "recover";
break;
- case action_fail_migrate:
+ case pcmk_on_fail_ban:
result = "migrate";
break;
- case action_fail_stop:
+ case pcmk_on_fail_stop:
result = "stop";
break;
- case action_fail_fence:
+ case pcmk_on_fail_fence_node:
result = "fence";
break;
- case action_fail_standby:
+ case pcmk_on_fail_standby_node:
result = "standby";
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
result = "restart-container";
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
result = "reset-remote";
break;
}
@@ -348,49 +359,46 @@ fail2text(enum action_fail_response fail)
enum action_tasks
text2task(const char *task)
{
- if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- return stop_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STOPPED, pcmk__str_casei)) {
- return stopped_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
- return start_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STARTED, pcmk__str_casei)) {
- return started_rsc;
- } else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
- return shutdown_crm;
- } else if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
- return stonith_node;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
- return monitor_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei)) {
- return action_notify;
- } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFIED, pcmk__str_casei)) {
- return action_notified;
- } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
- return action_promote;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
- return action_demote;
- } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTED, pcmk__str_casei)) {
- return action_promoted;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTED, pcmk__str_casei)) {
- return action_demoted;
- }
-#if SUPPORT_TRACING
- if (pcmk__str_eq(task, CRMD_ACTION_CANCEL, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DELETE, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
- return no_action;
- }
- crm_trace("Unsupported action: %s", task);
-#endif
+ if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)) {
+ return pcmk_action_stop;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_STOPPED, pcmk__str_casei)) {
+ return pcmk_action_stopped;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_casei)) {
+ return pcmk_action_start;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_RUNNING, pcmk__str_casei)) {
+ return pcmk_action_started;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_casei)) {
+ return pcmk_action_shutdown;
- return no_action;
+ } else if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
+ return pcmk_action_fence;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
+ return pcmk_action_monitor;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
+ return pcmk_action_notify;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFIED, pcmk__str_casei)) {
+ return pcmk_action_notified;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
+ return pcmk_action_promote;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
+ return pcmk_action_demote;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTED, pcmk__str_casei)) {
+ return pcmk_action_promoted;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTED, pcmk__str_casei)) {
+ return pcmk_action_demoted;
+ }
+ return pcmk_action_unspecified;
}
const char *
@@ -399,47 +407,47 @@ task2text(enum action_tasks task)
const char *result = "<unknown>";
switch (task) {
- case no_action:
+ case pcmk_action_unspecified:
result = "no_action";
break;
- case stop_rsc:
- result = CRMD_ACTION_STOP;
+ case pcmk_action_stop:
+ result = PCMK_ACTION_STOP;
break;
- case stopped_rsc:
- result = CRMD_ACTION_STOPPED;
+ case pcmk_action_stopped:
+ result = PCMK_ACTION_STOPPED;
break;
- case start_rsc:
- result = CRMD_ACTION_START;
+ case pcmk_action_start:
+ result = PCMK_ACTION_START;
break;
- case started_rsc:
- result = CRMD_ACTION_STARTED;
+ case pcmk_action_started:
+ result = PCMK_ACTION_RUNNING;
break;
- case shutdown_crm:
- result = CRM_OP_SHUTDOWN;
+ case pcmk_action_shutdown:
+ result = PCMK_ACTION_DO_SHUTDOWN;
break;
- case stonith_node:
- result = CRM_OP_FENCE;
+ case pcmk_action_fence:
+ result = PCMK_ACTION_STONITH;
break;
- case monitor_rsc:
- result = CRMD_ACTION_STATUS;
+ case pcmk_action_monitor:
+ result = PCMK_ACTION_MONITOR;
break;
- case action_notify:
- result = CRMD_ACTION_NOTIFY;
+ case pcmk_action_notify:
+ result = PCMK_ACTION_NOTIFY;
break;
- case action_notified:
- result = CRMD_ACTION_NOTIFIED;
+ case pcmk_action_notified:
+ result = PCMK_ACTION_NOTIFIED;
break;
- case action_promote:
- result = CRMD_ACTION_PROMOTE;
+ case pcmk_action_promote:
+ result = PCMK_ACTION_PROMOTE;
break;
- case action_promoted:
- result = CRMD_ACTION_PROMOTED;
+ case pcmk_action_promoted:
+ result = PCMK_ACTION_PROMOTED;
break;
- case action_demote:
- result = CRMD_ACTION_DEMOTE;
+ case pcmk_action_demote:
+ result = PCMK_ACTION_DEMOTE;
break;
- case action_demoted:
- result = CRMD_ACTION_DEMOTED;
+ case pcmk_action_demoted:
+ result = PCMK_ACTION_DEMOTED;
break;
}
@@ -450,50 +458,50 @@ const char *
role2text(enum rsc_role_e role)
{
switch (role) {
- case RSC_ROLE_UNKNOWN:
- return RSC_ROLE_UNKNOWN_S;
- case RSC_ROLE_STOPPED:
- return RSC_ROLE_STOPPED_S;
- case RSC_ROLE_STARTED:
- return RSC_ROLE_STARTED_S;
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_stopped:
+ return PCMK__ROLE_STOPPED;
+
+ case pcmk_role_started:
+ return PCMK__ROLE_STARTED;
+
+ case pcmk_role_unpromoted:
#ifdef PCMK__COMPAT_2_0
- return RSC_ROLE_UNPROMOTED_LEGACY_S;
+ return PCMK__ROLE_UNPROMOTED_LEGACY;
#else
- return RSC_ROLE_UNPROMOTED_S;
+ return PCMK__ROLE_UNPROMOTED;
#endif
- case RSC_ROLE_PROMOTED:
+
+ case pcmk_role_promoted:
#ifdef PCMK__COMPAT_2_0
- return RSC_ROLE_PROMOTED_LEGACY_S;
+ return PCMK__ROLE_PROMOTED_LEGACY;
#else
- return RSC_ROLE_PROMOTED_S;
+ return PCMK__ROLE_PROMOTED;
#endif
+
+ default: // pcmk_role_unknown
+ return PCMK__ROLE_UNKNOWN;
}
- CRM_CHECK(role >= RSC_ROLE_UNKNOWN, return RSC_ROLE_UNKNOWN_S);
- CRM_CHECK(role < RSC_ROLE_MAX, return RSC_ROLE_UNKNOWN_S);
- // coverity[dead_error_line]
- return RSC_ROLE_UNKNOWN_S;
}
enum rsc_role_e
text2role(const char *role)
{
CRM_ASSERT(role != NULL);
- if (pcmk__str_eq(role, RSC_ROLE_STOPPED_S, pcmk__str_casei)) {
- return RSC_ROLE_STOPPED;
- } else if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_casei)) {
- return RSC_ROLE_STARTED;
- } else if (pcmk__strcase_any_of(role, RSC_ROLE_UNPROMOTED_S,
- RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
- return RSC_ROLE_UNPROMOTED;
- } else if (pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S, NULL)) {
- return RSC_ROLE_PROMOTED;
- } else if (pcmk__str_eq(role, RSC_ROLE_UNKNOWN_S, pcmk__str_casei)) {
- return RSC_ROLE_UNKNOWN;
+ if (pcmk__str_eq(role, PCMK__ROLE_STOPPED, pcmk__str_casei)) {
+ return pcmk_role_stopped;
+ } else if (pcmk__str_eq(role, PCMK__ROLE_STARTED, pcmk__str_casei)) {
+ return pcmk_role_started;
+ } else if (pcmk__strcase_any_of(role, PCMK__ROLE_UNPROMOTED,
+ PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
+ return pcmk_role_unpromoted;
+ } else if (pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
+ return pcmk_role_promoted;
+ } else if (pcmk__str_eq(role, PCMK__ROLE_UNKNOWN, pcmk__str_casei)) {
+ return pcmk_role_unknown;
}
crm_err("Unknown role: %s", role);
- return RSC_ROLE_UNKNOWN;
+ return pcmk_role_unknown;
}
void
@@ -514,48 +522,103 @@ add_hash_param(GHashTable * hash, const char *name, const char *value)
}
}
+/*!
+ * \internal
+ * \brief Look up an attribute value on the appropriate node
+ *
+ * If \p node is a guest node and either the \c XML_RSC_ATTR_TARGET meta
+ * attribute is set to "host" for \p rsc or \p force_host is \c true, query the
+ * attribute on the node's host. Otherwise, query the attribute on \p node
+ * itself.
+ *
+ * \param[in] node Node to query attribute value on by default
+ * \param[in] name Name of attribute to query
+ * \param[in] rsc Resource on whose behalf we're querying
+ * \param[in] node_type Type of resource location lookup
+ * \param[in] force_host Force a lookup on the guest node's host, regardless of
+ * the \c XML_RSC_ATTR_TARGET value
+ *
+ * \return Value of the attribute on \p node or on the host of \p node
+ *
+ * \note If \p force_host is \c true, \p node \e must be a guest node.
+ */
const char *
-pe_node_attribute_calculated(const pe_node_t *node, const char *name,
- const pe_resource_t *rsc)
+pe__node_attribute_calculated(const pcmk_node_t *node, const char *name,
+ const pcmk_resource_t *rsc,
+ enum pcmk__rsc_node node_type,
+ bool force_host)
{
- const char *source;
-
- if(node == NULL) {
- return NULL;
+ // @TODO: Use pe__is_guest_node() after merging libpe_{rules,status}
+ bool is_guest = (node != NULL)
+ && (node->details->type == pcmk_node_variant_remote)
+ && (node->details->remote_rsc != NULL)
+ && (node->details->remote_rsc->container != NULL);
+ const char *source = NULL;
+ const char *node_type_s = NULL;
+ const char *reason = NULL;
+
+ const pcmk_resource_t *container = NULL;
+ const pcmk_node_t *host = NULL;
+
+ CRM_ASSERT((node != NULL) && (name != NULL) && (rsc != NULL)
+ && (!force_host || is_guest));
+
+ /* Ignore XML_RSC_ATTR_TARGET if node is not a guest node. This represents a
+ * user configuration error.
+ */
+ source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
+ if (!force_host
+ && (!is_guest || !pcmk__str_eq(source, "host", pcmk__str_casei))) {
- } else if(rsc == NULL) {
return g_hash_table_lookup(node->details->attrs, name);
}
- source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
- if(source == NULL || !pcmk__str_eq("host", source, pcmk__str_casei)) {
- return g_hash_table_lookup(node->details->attrs, name);
- }
+ container = node->details->remote_rsc->container;
- /* Use attributes set for the containers location
- * instead of for the container itself
- *
- * Useful when the container is using the host's local
- * storage
- */
+ switch (node_type) {
+ case pcmk__rsc_node_assigned:
+ node_type_s = "assigned";
+ host = container->allocated_to;
+ if (host == NULL) {
+ reason = "not assigned";
+ }
+ break;
- CRM_ASSERT(node->details->remote_rsc);
- CRM_ASSERT(node->details->remote_rsc->container);
+ case pcmk__rsc_node_current:
+ node_type_s = "current";
- if(node->details->remote_rsc->container->running_on) {
- pe_node_t *host = node->details->remote_rsc->container->running_on->data;
- pe_rsc_trace(rsc, "%s: Looking for %s on the container host %s",
- rsc->id, name, pe__node_name(host));
- return g_hash_table_lookup(host->details->attrs, name);
+ if (container->running_on != NULL) {
+ host = container->running_on->data;
+ }
+ if (host == NULL) {
+ reason = "inactive";
+ }
+ break;
+
+ default:
+ // Add support for other enum pcmk__rsc_node values if needed
+ CRM_ASSERT(false);
+ break;
}
- pe_rsc_trace(rsc, "%s: Not looking for %s on the container host: %s is inactive",
- rsc->id, name, node->details->remote_rsc->container->id);
+ if (host != NULL) {
+ const char *value = g_hash_table_lookup(host->details->attrs, name);
+
+ pe_rsc_trace(rsc,
+ "%s: Value lookup for %s on %s container host %s %s%s",
+ rsc->id, name, node_type_s, pe__node_name(host),
+ ((value != NULL)? "succeeded: " : "failed"),
+ pcmk__s(value, ""));
+ return value;
+ }
+ pe_rsc_trace(rsc,
+ "%s: Not looking for %s on %s container host: %s is %s",
+ rsc->id, name, node_type_s, container->id, reason);
return NULL;
}
const char *
-pe_node_attribute_raw(const pe_node_t *node, const char *name)
+pe_node_attribute_raw(const pcmk_node_t *node, const char *name)
{
if(node == NULL) {
return NULL;
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index f168124..0ab2e04 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -13,15 +13,17 @@
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
-static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
- unsigned int *count_clean);
+static pcmk_node_t *active_node(const pcmk_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
-resource_object_functions_t resource_class_functions[] = {
+pcmk_rsc_methods_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
@@ -34,6 +36,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__native_is_filtered,
active_node,
+ pe__primitive_max_per_node,
},
{
group_unpack,
@@ -47,6 +50,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__group_is_filtered,
active_node,
+ pe__group_max_per_node,
},
{
clone_unpack,
@@ -60,6 +64,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__clone_is_filtered,
active_node,
+ pe__clone_max_per_node,
},
{
pe__unpack_bundle,
@@ -73,6 +78,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_bundle,
pe__bundle_is_filtered,
pe__bundle_active_node,
+ pe__bundle_max_per_node,
}
};
@@ -80,23 +86,23 @@ static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
- return pe_native;
+ return pcmk_rsc_variant_primitive;
} else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
- return pe_group;
+ return pcmk_rsc_variant_group;
} else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
- return pe_clone;
+ return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
- return pe_clone;
+ return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
- return pe_container;
+ return pcmk_rsc_variant_bundle;
}
- return pe_unknown;
+ return pcmk_rsc_variant_unknown;
}
static void
@@ -106,10 +112,12 @@ dup_attr(gpointer key, gpointer value, gpointer user_data)
}
static void
-expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
+expand_parents_fixed_nvpairs(pcmk_resource_t *rsc,
+ pe_rule_eval_data_t *rule_data,
+ GHashTable *meta_hash, pcmk_scheduler_t *scheduler)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
- pe_resource_t *p = rsc->parent;
+ pcmk_resource_t *p = rsc->parent;
if (p == NULL) {
return ;
@@ -119,8 +127,8 @@ expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_dat
/* The fixed value of the lower parent resource takes precedence and is not overwritten. */
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
- pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
- rule_data, parent_orig_meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS, rule_data,
+ parent_orig_meta, NULL, FALSE, scheduler);
p = p->parent;
}
@@ -146,8 +154,8 @@ expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_dat
}
void
-get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
- pe_node_t * node, pe_working_set_t * data_set)
+get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t * rsc,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
@@ -157,8 +165,8 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
@@ -170,23 +178,23 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
const char *prop_name = (const char *) a->name;
- const char *prop_value = crm_element_value(rsc->xml, prop_name);
+ const char *prop_value = pcmk__xml_attr_value(a);
add_hash_param(meta_hash, prop_name, prop_value);
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
- meta_hash, NULL, FALSE, data_set);
+ meta_hash, NULL, FALSE, scheduler);
/* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
/* If it is already explicitly set as a child, it will not be overwritten. */
if (rsc->parent != NULL) {
- expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
+ expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, scheduler);
}
/* check the defaults */
- pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
- &rule_data, meta_hash, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, XML_TAG_META_SETS,
+ &rule_data, meta_hash, NULL, FALSE, scheduler);
/* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
/* The values already set up to this point will not be overwritten. */
@@ -196,13 +204,13 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
}
void
-get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
- const pe_node_t *node, pe_working_set_t *data_set)
+get_rsc_attributes(GHashTable *meta_hash, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -213,16 +221,17 @@ get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
- meta_hash, NULL, FALSE, data_set);
+ meta_hash, NULL, FALSE, scheduler);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
- get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
+ get_rsc_attributes(meta_hash, rsc->parent, node, scheduler);
} else {
/* and finally check the defaults */
- pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
- &rule_data, meta_hash, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, XML_TAG_ATTR_SETS,
+ &rule_data, meta_hash, NULL, FALSE,
+ scheduler);
}
}
@@ -234,9 +243,9 @@ template_op_key(xmlNode * op)
char *key = NULL;
if ((role == NULL)
- || pcmk__strcase_any_of(role, RSC_ROLE_STARTED_S, RSC_ROLE_UNPROMOTED_S,
- RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
- role = RSC_ROLE_UNKNOWN_S;
+ || pcmk__strcase_any_of(role, PCMK__ROLE_STARTED, PCMK__ROLE_UNPROMOTED,
+ PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
+ role = PCMK__ROLE_UNKNOWN;
}
key = crm_strdup_printf("%s-%s", name, role);
@@ -244,7 +253,8 @@ template_op_key(xmlNode * op)
}
static gboolean
-unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
+unpack_template(xmlNode *xml_obj, xmlNode **expanded_xml,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
@@ -268,7 +278,7 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
id = ID(xml_obj);
if (id == NULL) {
- pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
@@ -277,7 +287,8 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
return FALSE;
}
- cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
+ cib_resources = get_xpath_object("//" XML_CIB_TAG_RESOURCES,
+ scheduler->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
@@ -292,7 +303,7 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
- crm_xml_replace(new_xml, XML_ATTR_ID, id);
+ crm_xml_add(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
@@ -346,19 +357,19 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
- /* Disable multi-level templates for now */
- /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
+#if 0 /* Disable multi-level templates for now */
+ if (!unpack_template(new_xml, expanded_xml, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
-
return FALSE;
- } */
+ }
+#endif
return TRUE;
}
static gboolean
-add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
+add_template_rsc(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *template_ref = NULL;
const char *id = NULL;
@@ -375,7 +386,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
id = ID(xml_obj);
if (id == NULL) {
- pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
@@ -384,7 +395,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
return FALSE;
}
- if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
+ if (add_tag_ref(scheduler->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
@@ -392,7 +403,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
}
static bool
-detect_promotable(pe_resource_t *rsc)
+detect_promotable(pcmk_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
@@ -402,8 +413,7 @@ detect_promotable(pe_resource_t *rsc)
}
// @COMPAT deprecated since 2.0.0
- if (pcmk__str_eq(crm_element_name(rsc->xml), PCMK_XE_PROMOTABLE_LEGACY,
- pcmk__str_casei)) {
+ if (pcmk__xe_is(rsc->xml, PCMK_XE_PROMOTABLE_LEGACY)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
@@ -423,18 +433,18 @@ free_params_table(gpointer data)
/*!
* \brief Get a table of resource parameters
*
- * \param[in,out] rsc Resource to query
- * \param[in] node Node for evaluating rules (NULL for defaults)
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Resource to query
+ * \param[in] node Node for evaluating rules (NULL for defaults)
+ * \param[in,out] scheduler Scheduler data
*
* \return Hash table containing resource parameter names and values
- * (or NULL if \p rsc or \p data_set is NULL)
+ * (or NULL if \p rsc or \p scheduler is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
-pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
GHashTable *params_on_node = NULL;
@@ -445,7 +455,7 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
const char *node_name = "";
// Sanity check
- if ((rsc == NULL) || (data_set == NULL)) {
+ if ((rsc == NULL) || (scheduler == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
@@ -462,7 +472,7 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
- get_rsc_attributes(params_on_node, rsc, node, data_set);
+ get_rsc_attributes(params_on_node, rsc, node, scheduler);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
@@ -478,29 +488,30 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
* \param[in] is_default Whether \p value was selected by default
*/
static void
-unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
+unpack_requires(pcmk_resource_t *rsc, const char *value, bool is_default)
{
if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
- if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_fencing);
+ if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
- } else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"is disabled", rsc->id);
@@ -508,27 +519,29 @@ unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
return;
} else {
- pe__set_resource_flags(rsc,
- pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_fencing
+ |pcmk_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
- if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
value = PCMK__VALUE_QUORUM;
- } else if ((rsc->variant == pe_native)
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
&& xml_contains_remote_node(rsc->xml)) {
value = PCMK__VALUE_QUORUM;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_enable_unfencing)) {
value = PCMK__VALUE_UNFENCING;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
value = PCMK__VALUE_FENCING;
- } else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
+ } else if (rsc->cluster->no_quorum_policy == pcmk_no_quorum_ignore) {
value = PCMK__VALUE_NOTHING;
} else {
@@ -550,18 +563,18 @@ unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
#ifndef PCMK__COMPAT_2_0
static void
-warn_about_deprecated_classes(pe_resource_t *rsc)
+warn_about_deprecated_classes(pcmk_resource_t *rsc)
{
const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
- pe_warn_once(pe_wo_upstart,
+ pe_warn_once(pcmk__wo_upstart,
"Support for Upstart resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
} else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
- pe_warn_once(pe_wo_nagios,
+ pe_warn_once(pcmk__wo_nagios,
"Support for Nagios resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
@@ -574,12 +587,12 @@ warn_about_deprecated_classes(pe_resource_t *rsc)
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
- * \c pe_resource_t object.
+ * \c pcmk_resource_t object.
*
- * \param[in] xml_obj XML node containing the resource's configuration
- * \param[out] rsc Where to store the unpacked resource information
- * \param[in] parent Resource's parent, if any
- * \param[in,out] data_set Cluster working set
+ * \param[in] xml_obj XML node containing the resource's configuration
+ * \param[out] rsc Where to store the unpacked resource information
+ * \param[in] parent Resource's parent, if any
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
@@ -587,8 +600,8 @@ warn_about_deprecated_classes(pe_resource_t *rsc)
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
-pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
- pe_resource_t *parent, pe_working_set_t *data_set)
+pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
+ pcmk_resource_t *parent, pcmk_scheduler_t *scheduler)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
@@ -599,7 +612,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
@@ -607,31 +620,31 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
};
CRM_CHECK(rsc != NULL, return EINVAL);
- CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
+ CRM_CHECK((xml_obj != NULL) && (scheduler != NULL),
*rsc = NULL;
return EINVAL);
- rule_data.now = data_set->now;
+ rule_data.now = scheduler->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
- if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
+ if (unpack_template(xml_obj, &expanded_xml, scheduler) == FALSE) {
return pcmk_rc_unpack_error;
}
- *rsc = calloc(1, sizeof(pe_resource_t));
+ *rsc = calloc(1, sizeof(pcmk_resource_t));
if (*rsc == NULL) {
crm_crit("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
- (*rsc)->cluster = data_set;
+ (*rsc)->cluster = scheduler;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
@@ -648,12 +661,12 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
- (*rsc)->ops_xml = expand_idref(ops, data_set->input);
+ (*rsc)->ops_xml = expand_idref(ops, scheduler->input);
- (*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
- if ((*rsc)->variant == pe_unknown) {
+ (*rsc)->variant = get_resource_type((const char *) (*rsc)->xml->name);
+ if ((*rsc)->variant == pcmk_rsc_variant_unknown) {
pe_err("Ignoring resource '%s' of unknown type '%s'",
- id, crm_element_name((*rsc)->xml));
+ id, (*rsc)->xml->name);
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
@@ -678,23 +691,23 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
- get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
- (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
+ get_meta_attributes((*rsc)->meta, *rsc, NULL, scheduler);
+ (*rsc)->parameters = pe_rsc_params(*rsc, NULL, scheduler); // \deprecated
(*rsc)->flags = 0;
- pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
+ pe__set_resource_flags(*rsc, pcmk_rsc_runnable|pcmk_rsc_unassigned);
- if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- pe__set_resource_flags(*rsc, pe_rsc_managed);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ pe__set_resource_flags(*rsc, pcmk_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
- (*rsc)->role = RSC_ROLE_STOPPED;
- (*rsc)->next_role = RSC_ROLE_UNKNOWN;
+ (*rsc)->role = pcmk_role_stopped;
+ (*rsc)->next_role = pcmk_role_unknown;
- (*rsc)->recovery_type = recovery_stop_start;
+ (*rsc)->recovery_type = pcmk_multiply_active_restart;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
@@ -704,12 +717,12 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_critical);
+ pe__set_resource_flags(*rsc, pcmk_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_notify);
+ pe__set_resource_flags(*rsc, pcmk_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
@@ -723,7 +736,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(*rsc, pcmk_rsc_migratable);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
@@ -732,38 +745,38 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
*/
- pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(*rsc, pcmk_rsc_migratable);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_managed);
} else {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
- pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_maintenance);
}
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
- pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_maintenance);
}
if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_unique);
+ pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
if (detect_promotable(*rsc)) {
- pe__set_resource_flags(*rsc, pe_rsc_promotable);
+ pe__set_resource_flags(*rsc, pcmk_rsc_promotable);
}
} else {
- pe__set_resource_flags(*rsc, pe_rsc_unique);
+ pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
@@ -771,7 +784,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
(*rsc)->id);
- pe_warn_once(pe_wo_restart_type,
+ pe_warn_once(pcmk__wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
@@ -782,17 +795,17 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_stop_only;
+ (*rsc)->recovery_type = pcmk_multiply_active_stop;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_block;
+ (*rsc)->recovery_type = pcmk_multiply_active_block;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_stop_unexpected;
+ (*rsc)->recovery_type = pcmk_multiply_active_unexpected;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
@@ -803,7 +816,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
", using default of \"stop_start\"", value);
}
- (*rsc)->recovery_type = recovery_stop_start;
+ (*rsc)->recovery_type = pcmk_multiply_active_restart;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop/start", (*rsc)->id);
}
@@ -813,7 +826,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->stickiness = char2score(value);
}
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MIGRATION_THRESHOLD);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
@@ -821,8 +834,8 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
- pe_warn_once(pe_wo_neg_threshold,
- XML_RSC_ATTR_FAIL_STICKINESS
+ pe_warn_once(pcmk__wo_neg_threshold,
+ PCMK_META_MIGRATION_THRESHOLD
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
@@ -830,21 +843,21 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
- pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
- pe__set_resource_flags(*rsc, pe_rsc_fence_device);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_fencing);
+ pe__set_resource_flags(*rsc, pcmk_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
unpack_requires(*rsc, value, false);
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_FAILURE_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
- GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
+ GHashTable *params = pe_rsc_params(*rsc, NULL, scheduler);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
@@ -865,34 +878,35 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
- (*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
+ (*rsc)->next_role != pcmk_role_unknown? role2text((*rsc)->next_role) : "default");
- if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
+ if ((*rsc)->fns->unpack(*rsc, scheduler) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
- if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
- resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
+ resource_location(*rsc, NULL, 0, "symmetric_default", scheduler);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
- resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
+ resource_location(*rsc, NULL, 0, "remote_connection_default",
+ scheduler);
}
pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
- pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
+ pcmk_is_set((*rsc)->flags, pcmk_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
- (*rsc)->utilization, NULL, FALSE, data_set);
+ (*rsc)->utilization, NULL, FALSE, scheduler);
if (expanded_xml) {
- if (add_template_rsc(xml_obj, data_set) == FALSE) {
+ if (add_template_rsc(xml_obj, scheduler) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
@@ -902,9 +916,9 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
}
gboolean
-is_parent(pe_resource_t *child, pe_resource_t *rsc)
+is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc)
{
- pe_resource_t *parent = child;
+ pcmk_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
@@ -918,15 +932,16 @@ is_parent(pe_resource_t *child, pe_resource_t *rsc)
return FALSE;
}
-pe_resource_t *
-uber_parent(pe_resource_t * rsc)
+pcmk_resource_t *
+uber_parent(pcmk_resource_t *rsc)
{
- pe_resource_t *parent = rsc;
+ pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
- while (parent->parent != NULL && parent->parent->variant != pe_container) {
+ while ((parent->parent != NULL)
+ && (parent->parent->variant != pcmk_rsc_variant_bundle)) {
parent = parent->parent;
}
return parent;
@@ -943,16 +958,17 @@ uber_parent(pe_resource_t * rsc)
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
-const pe_resource_t *
-pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
+const pcmk_resource_t *
+pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
{
- const pe_resource_t *parent = rsc;
+ const pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL) {
- if (!include_bundle && (parent->parent->variant == pe_container)) {
+ if (!include_bundle
+ && (parent->parent->variant == pcmk_rsc_variant_bundle)) {
break;
}
parent = parent->parent;
@@ -961,7 +977,7 @@ pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
}
void
-common_free(pe_resource_t * rsc)
+common_free(pcmk_resource_t * rsc)
{
if (rsc == NULL) {
return;
@@ -984,7 +1000,9 @@ common_free(pe_resource_t * rsc)
g_hash_table_destroy(rsc->utilization);
}
- if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((rsc->parent == NULL)
+ && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
+
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
@@ -1037,8 +1055,8 @@ common_free(pe_resource_t * rsc)
* \return true if the count should continue, or false if sufficiently known
*/
bool
-pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
- pe_node_t **active, unsigned int *count_all,
+pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_node_t **active, unsigned int *count_all,
unsigned int *count_clean)
{
bool keep_looking = false;
@@ -1065,7 +1083,7 @@ pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
} else {
keep_looking = true;
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
if (is_happy && ((*active == NULL) || !(*active)->details->online
|| (*active)->details->unclean)) {
*active = node; // This is the first clean node
@@ -1079,12 +1097,12 @@ pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
return keep_looking;
}
-// Shared implementation of resource_object_functions_t:active_node()
-static pe_node_t *
-active_node(const pe_resource_t *rsc, unsigned int *count_all,
+// Shared implementation of pcmk_rsc_methods_t:active_node()
+static pcmk_node_t *
+active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
- pe_node_t *active = NULL;
+ pcmk_node_t *active = NULL;
if (count_all != NULL) {
*count_all = 0;
@@ -1096,7 +1114,7 @@ active_node(const pe_resource_t *rsc, unsigned int *count_all,
return NULL;
}
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
+ if (!pe__count_active_node(rsc, (pcmk_node_t *) iter->data, &active,
count_all, count_clean)) {
break; // Don't waste time iterating if we don't have to
}
@@ -1117,8 +1135,8 @@ active_node(const pe_resource_t *rsc, unsigned int *count_all,
* active nodes or only clean active nodes is desired according to the
* "requires" meta-attribute.
*/
-pe_node_t *
-pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
+pcmk_node_t *
+pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count)
{
if (rsc == NULL) {
if (count != NULL) {
@@ -1126,7 +1144,7 @@ pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
}
return NULL;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
return rsc->fns->active_node(rsc, count, NULL);
} else {
@@ -1135,20 +1153,20 @@ pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
}
void
-pe__count_common(pe_resource_t *rsc)
+pe__count_common(pcmk_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
- ((pe_resource_t *) item->data)->fns->count(item->data);
+ ((pcmk_resource_t *) item->data)->fns->count(item->data);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
- || (rsc->role > RSC_ROLE_STOPPED)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
+ || (rsc->role > pcmk_role_stopped)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
rsc->cluster->blocked_resources++;
}
}
@@ -1163,7 +1181,7 @@ pe__count_common(pe_resource_t *rsc)
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
-pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
+pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
diff --git a/lib/pengine/failcounts.c b/lib/pengine/failcounts.c
index a4a3e11..6990d3d 100644
--- a/lib/pengine/failcounts.c
+++ b/lib/pengine/failcounts.c
@@ -77,7 +77,8 @@ is_matched_failure(const char *rsc_id, const xmlNode *conf_op_xml,
}
static gboolean
-block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
+block_failure(const pcmk_node_t *node, pcmk_resource_t *rsc,
+ const xmlNode *xml_op)
{
char *xml_name = clone_strip(rsc->id);
@@ -180,11 +181,11 @@ block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
* \note The caller is responsible for freeing the result.
*/
static inline char *
-rsc_fail_name(const pe_resource_t *rsc)
+rsc_fail_name(const pcmk_resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
- return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
+ return pcmk_is_set(rsc->flags, pcmk_rsc_unique)? strdup(name) : clone_strip(name);
}
/*!
@@ -236,7 +237,6 @@ generate_fail_regex(const char *prefix, const char *rsc_name,
* \brief Compile regular expressions to match failure-related node attributes
*
* \param[in] rsc Resource being checked for failures
- * \param[in] data_set Data set (for CRM feature set version)
* \param[out] failcount_re Storage for regular expression for fail count
* \param[out] lastfailure_re Storage for regular expression for last failure
*
@@ -245,23 +245,25 @@ generate_fail_regex(const char *prefix, const char *rsc_name,
* regfree().
*/
static int
-generate_fail_regexes(const pe_resource_t *rsc,
- const pe_working_set_t *data_set,
+generate_fail_regexes(const pcmk_resource_t *rsc,
regex_t *failcount_re, regex_t *lastfailure_re)
{
+ int rc = pcmk_rc_ok;
char *rsc_name = rsc_fail_name(rsc);
- const char *version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ const char *version = crm_element_value(rsc->cluster->input,
+ XML_ATTR_CRM_VERSION);
+
+ // @COMPAT Pacemaker <= 1.1.16 used a single fail count per resource
gboolean is_legacy = (compare_version(version, "3.0.13") < 0);
- int rc = pcmk_rc_ok;
if (generate_fail_regex(PCMK__FAIL_COUNT_PREFIX, rsc_name, is_legacy,
- pcmk_is_set(rsc->flags, pe_rsc_unique),
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique),
failcount_re) != pcmk_rc_ok) {
rc = EINVAL;
} else if (generate_fail_regex(PCMK__LAST_FAILURE_PREFIX, rsc_name,
is_legacy,
- pcmk_is_set(rsc->flags, pe_rsc_unique),
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique),
lastfailure_re) != pcmk_rc_ok) {
rc = EINVAL;
regfree(failcount_re);
@@ -271,68 +273,137 @@ generate_fail_regexes(const pe_resource_t *rsc,
return rc;
}
-int
-pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
- time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+// Data for fail-count-related iterators
+struct failcount_data {
+ const pcmk_node_t *node;// Node to check for fail count
+ pcmk_resource_t *rsc; // Resource to check for fail count
+ uint32_t flags; // Fail count flags
+ const xmlNode *xml_op; // History entry for expiration purposes (or NULL)
+ regex_t failcount_re; // Fail count regular expression to match
+ regex_t lastfailure_re; // Last failure regular expression to match
+ int failcount; // Fail count so far
+ time_t last_failure; // Time of most recent failure so far
+};
+
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a node attribute
+ *
+ * \param[in] key Node attribute name
+ * \param[in] value Node attribute value
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_attr(gpointer key, gpointer value, gpointer user_data)
{
- char *key = NULL;
- const char *value = NULL;
- regex_t failcount_re, lastfailure_re;
- int failcount = 0;
- time_t last = 0;
- GHashTableIter iter;
-
- CRM_CHECK(generate_fail_regexes(rsc, rsc->cluster, &failcount_re,
- &lastfailure_re) == pcmk_rc_ok,
- return 0);
+ struct failcount_data *fc_data = user_data;
+
+ // If this is a matching fail count attribute, update fail count
+ if (regexec(&(fc_data->failcount_re), (const char *) key, 0, NULL, 0) == 0) {
+ fc_data->failcount = pcmk__add_scores(fc_data->failcount,
+ char2score(value));
+ pe_rsc_trace(fc_data->rsc, "Added %s (%s) to %s fail count (now %s)",
+ (const char *) key, (const char *) value, fc_data->rsc->id,
+ pcmk_readable_score(fc_data->failcount));
+ return;
+ }
- /* Resource fail count is sum of all matching operation fail counts */
- g_hash_table_iter_init(&iter, node->details->attrs);
- while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
- if (regexec(&failcount_re, key, 0, NULL, 0) == 0) {
- failcount = pcmk__add_scores(failcount, char2score(value));
- crm_trace("Added %s (%s) to %s fail count (now %s)",
- key, value, rsc->id, pcmk_readable_score(failcount));
- } else if (regexec(&lastfailure_re, key, 0, NULL, 0) == 0) {
- long long last_ll;
-
- if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
- last = (time_t) QB_MAX(last, last_ll);
- }
+ // If this is a matching last failure attribute, update last failure
+ if (regexec(&(fc_data->lastfailure_re), (const char *) key, 0, NULL,
+ 0) == 0) {
+ long long last_ll;
+
+ if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
+ fc_data->last_failure = (time_t) QB_MAX(fc_data->last_failure,
+ last_ll);
}
}
+}
- regfree(&failcount_re);
- regfree(&lastfailure_re);
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a filler resource
+ *
+ * \param[in] data Filler resource
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_filler(gpointer data, gpointer user_data)
+{
+ pcmk_resource_t *filler = data;
+ struct failcount_data *fc_data = user_data;
+ time_t filler_last_failure = 0;
+
+ fc_data->failcount += pe_get_failcount(fc_data->node, filler,
+ &filler_last_failure, fc_data->flags,
+ fc_data->xml_op);
+ fc_data->last_failure = QB_MAX(fc_data->last_failure, filler_last_failure);
+}
- if ((failcount > 0) && (last > 0) && (last_failure != NULL)) {
- *last_failure = last;
- }
+/*!
+ * \internal
+ * \brief Get a resource's fail count on a node
+ *
+ * \param[in] node Node to check
+ * \param[in,out] rsc Resource to check
+ * \param[out] last_failure If not NULL, where to set time of most recent
+ * failure of \p rsc on \p node
+ * \param[in] flags Group of enum pcmk__fc_flags
+ * \param[in] xml_op If not NULL, consider only the action in this
+ * history entry when determining whether on-fail
+ * is configured as "blocked", otherwise consider
+ * all actions configured for \p rsc
+ *
+ * \return Fail count for \p rsc on \p node according to \p flags
+ */
+int
+pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc,
+ time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+{
+ struct failcount_data fc_data = {
+ .node = node,
+ .rsc = rsc,
+ .flags = flags,
+ .xml_op = xml_op,
+ .failcount = 0,
+ .last_failure = (time_t) 0,
+ };
+
+ // Calculate resource failcount as sum of all matching operation failcounts
+ CRM_CHECK(generate_fail_regexes(rsc, &fc_data.failcount_re,
+ &fc_data.lastfailure_re) == pcmk_rc_ok,
+ return 0);
+ g_hash_table_foreach(node->details->attrs, update_failcount_for_attr,
+ &fc_data);
+ regfree(&(fc_data.failcount_re));
+ regfree(&(fc_data.lastfailure_re));
- /* If failure blocks the resource, disregard any failure timeout */
- if ((failcount > 0) && rsc->failure_timeout
+ // If failure blocks the resource, disregard any failure timeout
+ if ((fc_data.failcount > 0) && (rsc->failure_timeout > 0)
&& block_failure(node, rsc, xml_op)) {
- pe_warn("Ignoring failure timeout %d for %s because it conflicts with on-fail=block",
+ pe_warn("Ignoring failure timeout %d for %s "
+ "because it conflicts with on-fail=block",
rsc->failure_timeout, rsc->id);
rsc->failure_timeout = 0;
}
- /* If all failures have expired, ignore fail count */
- if (pcmk_is_set(flags, pe_fc_effective) && (failcount > 0) && (last > 0)
- && rsc->failure_timeout) {
+ // If all failures have expired, ignore fail count
+ if (pcmk_is_set(flags, pcmk__fc_effective) && (fc_data.failcount > 0)
+ && (fc_data.last_failure > 0) && (rsc->failure_timeout != 0)) {
time_t now = get_effective_time(rsc->cluster);
- if (now > (last + rsc->failure_timeout)) {
- crm_debug("Failcount for %s on %s expired after %ds",
- rsc->id, pe__node_name(node), rsc->failure_timeout);
- failcount = 0;
+ if (now > (fc_data.last_failure + rsc->failure_timeout)) {
+ pe_rsc_debug(rsc, "Failcount for %s on %s expired after %ds",
+ rsc->id, pe__node_name(node), rsc->failure_timeout);
+ fc_data.failcount = 0;
}
}
- /* We never want the fail counts of a bundle container's fillers to
- * count towards the container's fail count.
+ /* Add the fail count of any filler resources, except that we never want the
+ * fail counts of a bundle container's fillers to count towards the
+ * container's fail count.
*
* Most importantly, a Pacemaker Remote connection to a bundle container
* is a filler of the container, but can reside on a different node than the
@@ -340,62 +411,56 @@ pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
* container's fail count on that node could lead to attempting to stop the
* container on the wrong node.
*/
-
- if (pcmk_is_set(flags, pe_fc_fillers) && rsc->fillers
+ if (pcmk_is_set(flags, pcmk__fc_fillers) && (rsc->fillers != NULL)
&& !pe_rsc_is_bundled(rsc)) {
- GList *gIter = NULL;
-
- for (gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *filler = (pe_resource_t *) gIter->data;
- time_t filler_last_failure = 0;
-
- failcount += pe_get_failcount(node, filler, &filler_last_failure,
- flags, xml_op);
-
- if (last_failure && filler_last_failure > *last_failure) {
- *last_failure = filler_last_failure;
- }
- }
-
- if (failcount > 0) {
- crm_info("Container %s and the resources within it "
- "have failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ g_list_foreach(rsc->fillers, update_failcount_for_filler, &fc_data);
+ if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc,
+ "Container %s and the resources within it "
+ "have failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- } else if (failcount > 0) {
- crm_info("%s has failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ } else if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc, "%s has failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- return failcount;
+ if (last_failure != NULL) {
+ if ((fc_data.failcount > 0) && (fc_data.last_failure > 0)) {
+ *last_failure = fc_data.last_failure;
+ } else {
+ *last_failure = 0;
+ }
+ }
+ return fc_data.failcount;
}
/*!
* \brief Schedule a controller operation to clear a fail count
*
- * \param[in,out] rsc Resource with failure
- * \param[in] node Node failure occurred on
- * \param[in] reason Readable description why needed (for logging)
- * \param[in,out] data_set Working set for cluster
+ * \param[in,out] rsc Resource with failure
+ * \param[in] node Node failure occurred on
+ * \param[in] reason Readable description why needed (for logging)
+ * \param[in,out] scheduler Scheduler data cluster
*
* \return Scheduled action
*/
-pe_action_t *
-pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
- const char *reason, pe_working_set_t *data_set)
+pcmk_action_t *
+pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *reason, pcmk_scheduler_t *scheduler)
{
char *key = NULL;
- pe_action_t *clear = NULL;
+ pcmk_action_t *clear = NULL;
- CRM_CHECK(rsc && node && reason && data_set, return NULL);
+ CRM_CHECK(rsc && node && reason && scheduler, return NULL);
- key = pcmk__op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
- clear = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE,
- data_set);
+ key = pcmk__op_key(rsc->id, PCMK_ACTION_CLEAR_FAILCOUNT, 0);
+ clear = custom_action(rsc, key, PCMK_ACTION_CLEAR_FAILCOUNT, node, FALSE,
+ scheduler);
add_hash_param(clear->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s",
rsc->id, pe__node_name(node), reason, clear->uuid);
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index d54b01a..dad610c 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -21,8 +21,8 @@
#include <pe_status_private.h>
typedef struct group_variant_data_s {
- pe_resource_t *last_child; // Last group member
- uint32_t flags; // Group of enum pe__group_flags
+ pcmk_resource_t *last_child; // Last group member
+ uint32_t flags; // Group of enum pcmk__group_flags
} group_variant_data_t;
/*!
@@ -33,11 +33,11 @@ typedef struct group_variant_data_s {
*
* \return Last member of \p group if any, otherwise NULL
*/
-pe_resource_t *
-pe__last_group_member(const pe_resource_t *group)
+pcmk_resource_t *
+pe__last_group_member(const pcmk_resource_t *group)
{
if (group != NULL) {
- CRM_CHECK((group->variant == pe_group)
+ CRM_CHECK((group->variant == pcmk_rsc_variant_group)
&& (group->variant_opaque != NULL), return NULL);
return ((group_variant_data_t *) group->variant_opaque)->last_child;
}
@@ -54,11 +54,11 @@ pe__last_group_member(const pe_resource_t *group)
* \return true if all \p flags are set for \p group, otherwise false
*/
bool
-pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
+pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags)
{
group_variant_data_t *group_data = NULL;
- CRM_CHECK((group != NULL) && (group->variant == pe_group)
+ CRM_CHECK((group != NULL) && (group->variant == pcmk_rsc_variant_group)
&& (group->variant_opaque != NULL), return false);
group_data = (group_variant_data_t *) group->variant_opaque;
return pcmk_all_flags_set(group_data->flags, flags);
@@ -74,7 +74,7 @@ pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
* \param[in] wo_bit "Warn once" flag to use for deprecation warning
*/
static void
-set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
+set_group_flag(pcmk_resource_t *group, const char *option, uint32_t flag,
uint32_t wo_bit)
{
const char *value_s = NULL;
@@ -97,12 +97,12 @@ set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
}
static int
-inactive_resources(pe_resource_t *rsc)
+inactive_resources(pcmk_resource_t *rsc)
{
int retval = 0;
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (!child_rsc->fns->active(child_rsc, TRUE)) {
retval++;
@@ -113,7 +113,7 @@ inactive_resources(pe_resource_t *rsc)
}
static void
-group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+group_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc,
int n_inactive, bool show_inactive, const char *desc)
{
GString *attrs = NULL;
@@ -128,10 +128,10 @@ group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
}
@@ -150,8 +150,8 @@ group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
}
static bool
-skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
- GList *only_rsc, uint32_t show_opts)
+skip_child_rsc(pcmk_resource_t *rsc, pcmk_resource_t *child,
+ gboolean parent_passes, GList *only_rsc, uint32_t show_opts)
{
bool star_list = pcmk__list_of_1(only_rsc) &&
pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
@@ -177,7 +177,7 @@ skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
}
gboolean
-group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
@@ -191,9 +191,10 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
rsc->variant_opaque = group_data;
// @COMPAT These are deprecated since 2.1.5
- set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pe__group_ordered,
- pe_wo_group_order);
- set_group_flag(rsc, "collocated", pe__group_colocated, pe_wo_group_coloc);
+ set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pcmk__group_ordered,
+ pcmk__wo_group_order);
+ set_group_flag(rsc, "collocated", pcmk__group_colocated,
+ pcmk__wo_group_coloc);
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
@@ -202,11 +203,11 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
if (pcmk__str_eq((const char *)xml_native_rsc->name,
XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
- pe_resource_t *new_rsc = NULL;
+ pcmk_resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
continue;
}
@@ -232,14 +233,14 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-group_active(pe_resource_t * rsc, gboolean all)
+group_active(pcmk_resource_t *rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
@@ -261,7 +262,7 @@ group_active(pe_resource_t * rsc, gboolean all)
* \deprecated This function will be removed in a future release
*/
static void
-group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+group_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
GList *gIter = rsc->children;
@@ -272,7 +273,7 @@ group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
@@ -286,7 +287,7 @@ group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
* \deprecated This function will be removed in a future release
*/
void
-group_print(pe_resource_t *rsc, const char *pre_text, long options,
+group_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
char *child_text = NULL;
@@ -317,7 +318,7 @@ group_print(pe_resource_t *rsc, const char *pre_text, long options,
} else {
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("<li>\n");
@@ -335,12 +336,13 @@ group_print(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__group_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -359,7 +361,7 @@ pe__group_xml(pcmk__output_t *out, va_list args)
}
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
@@ -367,8 +369,8 @@ pe__group_xml(pcmk__output_t *out, va_list args)
if (rc == pcmk_rc_no_output) {
char *count = pcmk__itoa(g_list_length(gIter));
- const char *maint_s = pe__rsc_bool_str(rsc, pe_rsc_maintenance);
- const char *managed_s = pe__rsc_bool_str(rsc, pe_rsc_managed);
+ const char *maint_s = pe__rsc_bool_str(rsc, pcmk_rsc_maintenance);
+ const char *managed_s = pe__rsc_bool_str(rsc, pcmk_rsc_managed);
const char *disabled_s = pcmk__btoa(pe__resource_is_disabled(rsc));
rc = pe__name_and_nvpairs_xml(out, true, "group", 5,
@@ -393,12 +395,13 @@ pe__group_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__group_default(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -431,7 +434,7 @@ pe__group_default(pcmk__output_t *out, va_list args)
} else {
for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
@@ -450,14 +453,14 @@ pe__group_default(pcmk__output_t *out, va_list args)
}
void
-group_free(pe_resource_t * rsc)
+group_free(pcmk_resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
@@ -471,13 +474,13 @@ group_free(pe_resource_t * rsc)
}
enum rsc_role_e
-group_resource_state(const pe_resource_t * rsc, gboolean current)
+group_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
- enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e group_role = pcmk_role_unknown;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
@@ -490,7 +493,7 @@ group_resource_state(const pe_resource_t * rsc, gboolean current)
}
gboolean
-pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -508,7 +511,7 @@ pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child_rsc = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child_rsc = iter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
@@ -519,3 +522,18 @@ pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
return !passes;
}
+
+/*!
+ * \internal
+ * \brief Get maximum group resource instances per node
+ *
+ * \param[in] rsc Group resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__group_max_per_node(const pcmk_resource_t *rsc)
+{
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
+ return 1U;
+}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 5e92ddc..48b1a6a 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -30,18 +30,19 @@
* \brief Check whether a resource is active on multiple nodes
*/
static bool
-is_multiply_active(const pe_resource_t *rsc)
+is_multiply_active(const pcmk_resource_t *rsc)
{
unsigned int count = 0;
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
static void
-native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
+native_priority_to_node(pcmk_resource_t *rsc, pcmk_node_t *node,
+ gboolean failed)
{
int priority = 0;
@@ -49,7 +50,7 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
return;
}
- if (rsc->role == RSC_ROLE_PROMOTED) {
+ if (rsc->role == pcmk_role_promoted) {
// Promoted instance takes base priority + 1
priority = rsc->priority + 1;
@@ -60,9 +61,9 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
node->details->priority += priority;
pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
pe__node_name(node), node->details->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ (rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "");
+ (rsc->role == pcmk_role_promoted)? " + 1" : "");
/* Priority of a resource running on a guest node is added to the cluster
* node as well. */
@@ -71,28 +72,29 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
GList *gIter = node->details->remote_rsc->container->running_on;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = gIter->data;
+ pcmk_node_t *a_node = gIter->data;
a_node->details->priority += priority;
pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) "
"from guest node %s",
pe__node_name(a_node), a_node->details->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ (rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "",
+ (rsc->role == pcmk_role_promoted)? " + 1" : "",
pe__node_name(node));
}
}
}
void
-native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed)
+native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler, gboolean failed)
{
GList *gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = (pe_node_t *) gIter->data;
+ pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
@@ -101,25 +103,27 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
}
pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node),
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : "(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
native_priority_to_node(rsc, node, failed);
}
- if (rsc->variant == pe_native && node->details->maintenance) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_maintenance);
+ if ((rsc->variant == pcmk_rsc_variant_primitive)
+ && node->details->maintenance) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_maintenance);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- pe_resource_t *p = rsc->parent;
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ pcmk_resource_t *p = rsc->parent;
pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
- resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
+ resource_location(rsc, node, INFINITY, "not_managed_default",
+ scheduler);
while(p && node->details->online) {
/* add without the additional location constraint */
@@ -131,43 +135,46 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
- case recovery_stop_only:
+ case pcmk_multiply_active_stop:
{
GHashTableIter gIter;
- pe_node_t *local_node = NULL;
+ pcmk_node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
- rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
+ rsc->allowed_nodes = pe__node_list2table(scheduler->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -INFINITY;
}
}
break;
- case recovery_block:
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ case pcmk_multiply_active_block:
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
/* If the resource belongs to a group or bundle configured with
* multiple-active=block, block the entire entity.
*/
if (rsc->parent
- && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
- && rsc->parent->recovery_type == recovery_block) {
+ && ((rsc->parent->variant == pcmk_rsc_variant_group)
+ || (rsc->parent->variant == pcmk_rsc_variant_bundle))
+ && (rsc->parent->recovery_type == pcmk_multiply_active_block)) {
GList *gIter = rsc->parent->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = gIter->data;
- pe__clear_resource_flags(child, pe_rsc_managed);
- pe__set_resource_flags(child, pe_rsc_block);
+ pe__clear_resource_flags(child, pcmk_rsc_managed);
+ pe__set_resource_flags(child, pcmk_rsc_blocked);
}
}
break;
- default: // recovery_stop_start, recovery_stop_unexpected
+
+ // pcmk_multiply_active_restart, pcmk_multiply_active_unexpected
+ default:
/* The scheduler will do the right thing because the relevant
* variables and flags are set when unpacking the history.
*/
@@ -183,22 +190,22 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
}
if (rsc->parent != NULL) {
- native_add_running(rsc->parent, node, data_set, FALSE);
+ native_add_running(rsc->parent, node, scheduler, FALSE);
}
}
static void
-recursive_clear_unique(pe_resource_t *rsc, gpointer user_data)
+recursive_clear_unique(pcmk_resource_t *rsc, gpointer user_data)
{
- pe__clear_resource_flags(rsc, pe_rsc_unique);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unique);
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
}
gboolean
-native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *parent = uber_parent(rsc);
+ pcmk_resource_t *parent = uber_parent(rsc);
const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
@@ -206,14 +213,15 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
// Only some agent standards support unique and promotable clones
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
- && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_unique)
+ && pe_rsc_is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
- pe__force_anon(standard, parent, rsc->id, data_set);
+ pe__force_anon(standard, parent, rsc->id, scheduler);
/* Clear globally-unique on the parent and all its descendants unpacked
* so far (clearing the parent should make any future children unpacking
@@ -224,7 +232,7 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
recursive_clear_unique(rsc, NULL);
}
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
- && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
+ && pcmk_is_set(parent->flags, pcmk_rsc_promotable)) {
pe_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
@@ -235,42 +243,44 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
static bool
-rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
+rsc_is_on_node(pcmk_resource_t *rsc, const pcmk_node_t *node, int flags)
{
pe_rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, pe__node_name(node));
- if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
+ if (pcmk_is_set(flags, pcmk_rsc_match_current_node)
+ && (rsc->running_on != NULL)) {
for (GList *iter = rsc->running_on; iter; iter = iter->next) {
- pe_node_t *loc = (pe_node_t *) iter->data;
+ pcmk_node_t *loc = (pcmk_node_t *) iter->data;
if (loc->details == node->details) {
return true;
}
}
- } else if (pcmk_is_set(flags, pe_find_inactive)
+ } else if (pcmk_is_set(flags, pe_find_inactive) // @COMPAT deprecated
&& (rsc->running_on == NULL)) {
return true;
- } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
+ } else if (!pcmk_is_set(flags, pcmk_rsc_match_current_node)
+ && (rsc->allocated_to != NULL)
&& (rsc->allocated_to->details == node->details)) {
return true;
}
return false;
}
-pe_resource_t *
-native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
- int flags)
+pcmk_resource_t *
+native_find_rsc(pcmk_resource_t *rsc, const char *id,
+ const pcmk_node_t *on_node, int flags)
{
bool match = false;
- pe_resource_t *result = NULL;
+ pcmk_resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
- if (flags & pe_find_clone) {
+ if (pcmk_is_set(flags, pcmk_rsc_match_clone_only)) {
const char *rid = ID(rsc->xml);
if (!pe_rsc_is_clone(pe__const_top_resource(rsc, false))) {
@@ -283,13 +293,13 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
} else if (!strcmp(id, rsc->id)) {
match = true;
- } else if (pcmk_is_set(flags, pe_find_renamed)
+ } else if (pcmk_is_set(flags, pcmk_rsc_match_history)
&& rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = true;
- } else if (pcmk_is_set(flags, pe_find_any)
- || (pcmk_is_set(flags, pe_find_anon)
- && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
+ } else if (pcmk_is_set(flags, pcmk_rsc_match_basename)
+ || (pcmk_is_set(flags, pcmk_rsc_match_anon_basename)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
@@ -304,7 +314,7 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
}
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
result = rsc->fns->find_rsc(child, id, on_node, flags);
if (result) {
@@ -316,8 +326,8 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
// create is ignored
char *
-native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
- pe_working_set_t * data_set)
+native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
+ const char *name, pcmk_scheduler_t *scheduler)
{
char *value_copy = NULL;
const char *value = NULL;
@@ -327,7 +337,7 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
value = g_hash_table_lookup(params, name);
if (value == NULL) {
/* try meta attributes instead */
@@ -338,16 +348,17 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c
}
gboolean
-native_active(pe_resource_t * rsc, gboolean all)
+native_active(pcmk_resource_t * rsc, gboolean all)
{
for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = (pe_node_t *) gIter->data;
+ pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
if (a_node->details->unclean) {
pe_rsc_trace(rsc, "Resource %s: %s is unclean",
rsc->id, pe__node_name(a_node));
return TRUE;
- } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!a_node->details->online
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "Resource %s: %s is offline",
rsc->id, pe__node_name(a_node));
} else {
@@ -365,27 +376,32 @@ struct print_data_s {
};
static const char *
-native_pending_state(const pe_resource_t *rsc)
+native_pending_state(const pcmk_resource_t *rsc)
{
const char *pending_state = NULL;
- if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
+ if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_START, pcmk__str_casei)) {
pending_state = "Starting";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
pending_state = "Stopping";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_TO,
+ pcmk__str_casei)) {
pending_state = "Migrating";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_FROM,
+ pcmk__str_casei)) {
/* Work might be done in here. */
pending_state = "Migrating";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_PROMOTE,
+ pcmk__str_casei)) {
pending_state = "Promoting";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_DEMOTE,
+ pcmk__str_casei)) {
pending_state = "Demoting";
}
@@ -393,11 +409,11 @@ native_pending_state(const pe_resource_t *rsc)
}
static const char *
-native_pending_task(const pe_resource_t *rsc)
+native_pending_task(const pcmk_resource_t *rsc)
{
const char *pending_task = NULL;
- if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
@@ -415,21 +431,21 @@ native_pending_task(const pe_resource_t *rsc)
}
static enum rsc_role_e
-native_displayable_role(const pe_resource_t *rsc)
+native_displayable_role(const pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
- if ((role == RSC_ROLE_STARTED)
+ if ((role == pcmk_role_started)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)) {
+ pcmk_rsc_promotable)) {
- role = RSC_ROLE_UNPROMOTED;
+ role = pcmk_role_unpromoted;
}
return role;
}
static const char *
-native_displayable_state(const pe_resource_t *rsc, bool print_pending)
+native_displayable_state(const pcmk_resource_t *rsc, bool print_pending)
{
const char *rsc_state = NULL;
@@ -447,7 +463,7 @@ native_displayable_state(const pe_resource_t *rsc, bool print_pending)
* \deprecated This function will be removed in a future release
*/
static void
-native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+native_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -471,12 +487,14 @@ native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print("target_role=\"%s\" ", target_role);
}
status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
- status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
- status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_removed));
+ status_print("blocked=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_blocked));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print("failure_ignored=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure));
status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
if (options & pe_print_pending) {
@@ -496,7 +514,7 @@ native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
status_print("%s <node name=\"%s\" " XML_ATTR_ID "=\"%s\" "
"cached=\"%s\"/>\n",
@@ -542,8 +560,8 @@ add_output_node(GString *s, const char *node, bool have_nodes)
* \note Caller must free the result with g_free().
*/
gchar *
-pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
- const pe_node_t *node, uint32_t show_opts,
+pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
+ const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -552,7 +570,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
GString *outstr = NULL;
bool have_flags = false;
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
return NULL;
}
@@ -580,14 +598,14 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
pcmk__s(provider, ""), ":", kind, "):\t", NULL);
// State on node
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
g_string_append(outstr, " ORPHANED");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
enum rsc_role_e role = native_displayable_role(rsc);
g_string_append(outstr, " FAILED");
- if (role > RSC_ROLE_UNPROMOTED) {
+ if (role > pcmk_role_unpromoted) {
pcmk__add_word(&outstr, 0, role2text(role));
}
} else {
@@ -600,7 +618,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
}
// Failed probe operation
- if (native_displayable_role(rsc) == RSC_ROLE_STOPPED) {
+ if (native_displayable_role(rsc) == pcmk_role_stopped) {
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
if (probe_op != NULL) {
int rc;
@@ -632,30 +650,31 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
* Started, as it is the default anyways, and doesn't prevent the
* resource from becoming promoted).
*/
- if (target_role_e == RSC_ROLE_STOPPED) {
+ if (target_role_e == pcmk_role_stopped) {
have_flags = add_output_flag(outstr, "disabled", have_flags);
} else if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)
- && target_role_e == RSC_ROLE_UNPROMOTED) {
+ pcmk_rsc_promotable)
+ && (target_role_e == pcmk_role_unpromoted)) {
have_flags = add_output_flag(outstr, "target-role:", have_flags);
g_string_append(outstr, target_role);
}
}
// Blocked or maintenance implies unmanaged
- if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_any_flags_set(rsc->flags,
+ pcmk_rsc_blocked|pcmk_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
have_flags = add_output_flag(outstr, "blocked", have_flags);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
have_flags = add_output_flag(outstr, "maintenance", have_flags);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
have_flags = add_output_flag(outstr, "unmanaged", have_flags);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
have_flags = add_output_flag(outstr, "failure ignored", have_flags);
}
@@ -682,7 +701,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
bool have_nodes = false;
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *n = (pe_node_t *) iter->data;
+ pcmk_node_t *n = (pcmk_node_t *) iter->data;
have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
}
@@ -695,8 +714,8 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
}
int
-pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
@@ -705,7 +724,7 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
xmlNodePtr list_node = NULL;
const char *cl = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
@@ -720,19 +739,20 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
cl = "rsc-managed";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
cl = "rsc-failed";
- } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
+ && (rsc->running_on == NULL)) {
cl = "rsc-failed";
} else if (pcmk__list_of_multiple(rsc->running_on)) {
cl = "rsc-multiple";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
cl = "rsc-failure-ignored";
} else {
@@ -752,13 +772,13 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
}
int
-pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *target_role = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
@@ -788,12 +808,12 @@ pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
* \deprecated This function will be removed in a future release
*/
void
-common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
- const pe_node_t *node, long options, void *print_data)
+common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name,
+ const pcmk_node_t *node, long options, void *print_data)
{
const char *target_role = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
@@ -818,10 +838,10 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
}
if (options & pe_print_html) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
status_print("<font color=\"yellow\">");
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
status_print("<font color=\"red\">");
} else if (rsc->running_on == NULL) {
@@ -830,7 +850,7 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
} else if (pcmk__list_of_multiple(rsc->running_on)) {
status_print("<font color=\"orange\">");
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
status_print("<font color=\"yellow\">");
} else {
@@ -863,7 +883,7 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
}
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *n = (pe_node_t *) gIter->data;
+ pcmk_node_t *n = (pcmk_node_t *) gIter->data;
counter++;
@@ -908,12 +928,12 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
* \deprecated This function will be removed in a future release
*/
void
-native_print(pe_resource_t *rsc, const char *pre_text, long options,
+native_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
@@ -929,12 +949,13 @@ native_print(pe_resource_t *rsc, const char *pre_text, long options,
common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -956,7 +977,7 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
@@ -979,12 +1000,12 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
"role", rsc_state,
"target_role", target_role,
"active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
- "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
- "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
- "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "orphaned", pe__rsc_bool_str(rsc, pcmk_rsc_removed),
+ "blocked", pe__rsc_bool_str(rsc, pcmk_rsc_blocked),
+ "maintenance", pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
+ "failure_ignored", pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure),
"nodes_running_on", nodes_running_on,
"pending", (print_pending? native_pending_task(rsc) : NULL),
"locked_to", lock_node_name,
@@ -997,7 +1018,7 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
GList *gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
"name", node->details->uname,
@@ -1011,22 +1032,23 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
@@ -1035,18 +1057,19 @@ pe__resource_html(pcmk__output_t *out, va_list args)
return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
@@ -1060,14 +1083,14 @@ pe__resource_text(pcmk__output_t *out, va_list args)
}
void
-native_free(pe_resource_t * rsc)
+native_free(pcmk_resource_t * rsc)
{
pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
-native_resource_state(const pe_resource_t * rsc, gboolean current)
+native_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
@@ -1089,17 +1112,18 @@ native_resource_state(const pe_resource_t * rsc, gboolean current)
*
* \return If list contains only one node, that node, or NULL otherwise
*/
-pe_node_t *
-native_location(const pe_resource_t *rsc, GList **list, int current)
+pcmk_node_t *
+native_location(const pcmk_resource_t *rsc, GList **list, int current)
{
- pe_node_t *one = NULL;
+ // @COMPAT: Accept a pcmk__rsc_node argument instead of int current
+ pcmk_node_t *one = NULL;
GList *result = NULL;
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
child->fns->location(child, &result, current);
}
@@ -1126,7 +1150,7 @@ native_location(const pe_resource_t *rsc, GList **list, int current)
GList *gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
@@ -1144,7 +1168,7 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
GList *gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
@@ -1155,7 +1179,7 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
int *rsc_counter = NULL;
int *active_counter = NULL;
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
continue;
}
@@ -1185,11 +1209,11 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
GList *gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_node_t *node = (pe_node_t *) gIter2->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE &&
- pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
continue;
}
@@ -1398,17 +1422,32 @@ pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
}
gboolean
-pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
return FALSE;
} else if (check_parent && rsc->parent) {
- const pe_resource_t *up = pe__const_top_resource(rsc, true);
+ const pcmk_resource_t *up = pe__const_top_resource(rsc, true);
return up->fns->is_filtered(up, only_rsc, FALSE);
}
return TRUE;
}
+
+/*!
+ * \internal
+ * \brief Get maximum primitive resource instances per node
+ *
+ * \param[in] rsc Primitive resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__primitive_max_per_node(const pcmk_resource_t *rsc)
+{
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
+ return 1U;
+}
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index ed7f0da..aaa6598 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,29 +14,30 @@
#include <crm/crm.h>
#include <crm/msg_xml.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
+#include <crm/common/xml_internal.h>
#include "pe_status_private.h"
-static void unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms);
+static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms);
static void
-add_singleton(pe_working_set_t *data_set, pe_action_t *action)
+add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action)
{
- if (data_set->singletons == NULL) {
- data_set->singletons = pcmk__strkey_table(NULL, NULL);
+ if (scheduler->singletons == NULL) {
+ scheduler->singletons = pcmk__strkey_table(NULL, NULL);
}
- g_hash_table_insert(data_set->singletons, action->uuid, action);
+ g_hash_table_insert(scheduler->singletons, action->uuid, action);
}
-static pe_action_t *
-lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
+static pcmk_action_t *
+lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid)
{
- if (data_set->singletons == NULL) {
+ if (scheduler->singletons == NULL) {
return NULL;
}
- return g_hash_table_lookup(data_set->singletons, action_uuid);
+ return g_hash_table_lookup(scheduler->singletons, action_uuid);
}
/*!
@@ -46,21 +47,21 @@ lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Existing action that matches arguments (or NULL if none)
*/
-static pe_action_t *
-find_existing_action(const char *key, const pe_resource_t *rsc,
- const pe_node_t *node, const pe_working_set_t *data_set)
+static pcmk_action_t *
+find_existing_action(const char *key, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const pcmk_scheduler_t *scheduler)
{
GList *matches = NULL;
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- /* When rsc is NULL, it would be quicker to check data_set->singletons,
- * but checking all data_set->actions takes the node into account.
+ /* When rsc is NULL, it would be quicker to check scheduler->singletons,
+ * but checking all scheduler->actions takes the node into account.
*/
- matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
+ matches = find_actions(((rsc == NULL)? scheduler->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
@@ -72,79 +73,78 @@ find_existing_action(const char *key, const pe_resource_t *rsc,
return action;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration corresponding to a specific action key
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] key "RSC_ACTION_INTERVAL" of action to find
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
static xmlNode *
-find_rsc_op_entry_helper(const pe_resource_t *rsc, const char *key,
- gboolean include_disabled)
+find_exact_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- guint interval_ms = 0;
- gboolean do_retry = TRUE;
- char *local_key = NULL;
- const char *name = NULL;
- const char *interval_spec = NULL;
- char *match_key = NULL;
- xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- retry:
- for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
- operation = pcmk__xe_next(operation)) {
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
+ bool enabled = false;
+ const char *config_name = NULL;
+ const char *interval_spec = NULL;
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
-
- interval_ms = crm_parse_interval_spec(interval_spec);
- match_key = pcmk__op_key(rsc->id, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
-
- if (rsc->clone_name) {
- match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
- }
-
- if (op != NULL) {
- free(local_key);
- return op;
- }
+ // @TODO This does not consider rules, defaults, etc.
+ if (!include_disabled
+ && (pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
}
- }
-
- free(local_key);
- if (do_retry == FALSE) {
- return NULL;
- }
- do_retry = FALSE;
- if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
- local_key = pcmk__op_key(rsc->id, "migrate", 0);
- key = local_key;
- goto retry;
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) != interval_ms) {
+ continue;
+ }
- } else if (strstr(key, "_notify_")) {
- local_key = pcmk__op_key(rsc->id, "notify", 0);
- key = local_key;
- goto retry;
+ config_name = crm_element_value(operation, "name");
+ if (pcmk__str_eq(action_name, config_name, pcmk__str_none)) {
+ return operation;
+ }
}
-
return NULL;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration of a resource action
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] action_name Action name to search for
+ * \param[in] interval_ms Action interval (in milliseconds) to search for
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
xmlNode *
-find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
+pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- return find_rsc_op_entry_helper(rsc, key, FALSE);
+ xmlNode *action_config = NULL;
+
+ // Try requested action first
+ action_config = find_exact_action_config(rsc, action_name, interval_ms,
+ include_disabled);
+
+ // For migrate_to and migrate_from actions, retry with "migrate"
+ // @TODO This should be either documented or deprecated
+ if ((action_config == NULL)
+ && pcmk__str_any_of(action_name, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ action_config = find_exact_action_config(rsc, "migrate", 0,
+ include_disabled);
+ }
+
+ return action_config;
}
/*!
@@ -156,98 +156,106 @@ find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
-static pe_action_t *
-new_action(char *key, const char *task, pe_resource_t *rsc,
- const pe_node_t *node, bool optional, bool for_graph,
- pe_working_set_t *data_set)
+static pcmk_action_t *
+new_action(char *key, const char *task, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, bool optional, pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = calloc(1, sizeof(pe_action_t));
+ pcmk_action_t *action = calloc(1, sizeof(pcmk_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
- action->extra = pcmk__strkey_table(free, free);
- action->meta = pcmk__strkey_table(free, free);
if (node) {
action->node = pe__copy_node(node);
}
- if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
- pe__set_action_flags(action, pe_action_dc);
+ pe__set_action_flags(action, pcmk_action_on_dc);
}
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
if (optional) {
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
} else {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
- if (rsc != NULL) {
+ if (rsc == NULL) {
+ action->meta = pcmk__strkey_table(free, free);
+ } else {
guint interval_ms = 0;
- action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
- unpack_operation(action, action->op_entry, rsc->container, data_set,
- interval_ms);
+ action->op_entry = pcmk__find_action_config(rsc, task, interval_ms,
+ true);
+
+ /* If the given key is for one of the many notification pseudo-actions
+ * (pre_notify_promote, etc.), the actual action name is "notify"
+ */
+ if ((action->op_entry == NULL) && (strstr(key, "_notify_") != NULL)) {
+ action->op_entry = find_exact_action_config(rsc, PCMK_ACTION_NOTIFY,
+ 0, true);
+ }
+
+ unpack_operation(action, action->op_entry, interval_ms);
}
- if (for_graph) {
- pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
- (optional? "optional" : "required"),
- data_set->action_id, key, task,
- ((rsc == NULL)? "no resource" : rsc->id),
- pe__node_name(node));
- action->id = data_set->action_id++;
+ pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
+ (optional? "optional" : "required"),
+ scheduler->action_id, key, task,
+ ((rsc == NULL)? "no resource" : rsc->id),
+ pe__node_name(node));
+ action->id = scheduler->action_id++;
- data_set->actions = g_list_prepend(data_set->actions, action);
- if (rsc == NULL) {
- add_singleton(data_set, action);
- } else {
- rsc->actions = g_list_prepend(rsc->actions, action);
- }
+ scheduler->actions = g_list_prepend(scheduler->actions, action);
+ if (rsc == NULL) {
+ add_singleton(scheduler, action);
+ } else {
+ rsc->actions = g_list_prepend(rsc->actions, action);
}
return action;
}
/*!
* \internal
- * \brief Evaluate node attribute values for an action
+ * \brief Unpack a resource's action-specific instance parameters
*
- * \param[in,out] action Action to unpack attributes for
- * \param[in,out] data_set Cluster working set
+ * \param[in] action_xml XML of action's configuration in CIB (if any)
+ * \param[in,out] node_attrs Table of node attributes (for rule evaluation)
+ * \param[in,out] scheduler Cluster working set (for rule evaluation)
+ *
+ * \return Newly allocated hash table of action-specific instance parameters
*/
-static void
-unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
+GHashTable *
+pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
+ GHashTable *node_attrs,
+ pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
- && (action->op_entry != NULL)) {
-
- pe_rule_eval_data_t rule_data = {
- .node_hash = action->node->details->attrs,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
- .match_data = NULL,
- .rsc_data = NULL,
- .op_data = NULL
- };
-
- pe__set_action_flags(action, pe_action_have_node_attrs);
- pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
- &rule_data, action->extra, NULL,
- FALSE, data_set);
- }
+ GHashTable *params = pcmk__strkey_table(free, free);
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_attrs,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ pe__unpack_dataset_nvpairs(action_xml, XML_TAG_ATTR_SETS,
+ &rule_data, params, NULL,
+ FALSE, scheduler);
+ return params;
}
/*!
@@ -258,46 +266,46 @@ unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
* \param[in] optional Requested optional status
*/
static void
-update_action_optional(pe_action_t *action, gboolean optional)
+update_action_optional(pcmk_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
- && !pcmk_is_set(action->flags, pe_action_pseudo)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
+ && !pcmk_is_set(action->flags, pcmk_action_pseudo)
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, pe__node_name(action->node),
action->rsc->id);
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
}
static enum pe_quorum_policy
-effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
+effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
- enum pe_quorum_policy policy = data_set->no_quorum_policy;
+ enum pe_quorum_policy policy = scheduler->no_quorum_policy;
- if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- policy = no_quorum_ignore;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
+ policy = pcmk_no_quorum_ignore;
- } else if (data_set->no_quorum_policy == no_quorum_demote) {
+ } else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) {
switch (rsc->role) {
- case RSC_ROLE_PROMOTED:
- case RSC_ROLE_UNPROMOTED:
- if (rsc->next_role > RSC_ROLE_UNPROMOTED) {
- pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED,
+ case pcmk_role_promoted:
+ case pcmk_role_unpromoted:
+ if (rsc->next_role > pcmk_role_unpromoted) {
+ pe__set_next_role(rsc, pcmk_role_unpromoted,
"no-quorum-policy=demote");
}
- policy = no_quorum_ignore;
+ policy = pcmk_no_quorum_ignore;
break;
default:
- policy = no_quorum_stop;
+ policy = pcmk_no_quorum_stop;
break;
}
}
@@ -309,50 +317,47 @@ effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
* \brief Update a resource action's runnable flag
*
* \param[in,out] action Action to update
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
-update_resource_action_runnable(pe_action_t *action, bool for_graph,
- pe_working_set_t *data_set)
+update_resource_action_runnable(pcmk_action_t *action,
+ pcmk_scheduler_t *scheduler)
{
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
- "%s on %s is unrunnable (node is offline)",
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING, "%s on %s is unrunnable (node is offline)",
action->uuid, pe__node_name(action->node));
- if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
- && for_graph
- && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
- pe_fence_node(data_set, action->node, "stop is unrunnable", false);
+ pe_fence_node(scheduler, action->node, "stop is unrunnable", false);
}
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& action->node->details->pending) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING,
"Action %s on %s is unrunnable (node is pending)",
action->uuid, pe__node_name(action->node));
- } else if (action->needs == rsc_req_nothing) {
+ } else if (action->needs == pcmk_requires_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
- && !pe_can_fence(data_set, action->node)) {
+ && !pe_can_fence(scheduler, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
@@ -361,37 +366,37 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, pe__node_name(action->node));
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
} else {
- switch (effective_quorum_policy(action->rsc, data_set)) {
- case no_quorum_stop:
+ switch (effective_quorum_policy(action->rsc, scheduler)) {
+ case pcmk_no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
- case no_quorum_freeze:
+ case pcmk_no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
break;
}
}
@@ -405,19 +410,20 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
* \param[in] action New action
*/
static void
-update_resource_flags_for_action(pe_resource_t *rsc, const pe_action_t *action)
+update_resource_flags_for_action(pcmk_resource_t *rsc,
+ const pcmk_action_t *action)
{
- /* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used
- * within Pacemaker, and should be deprecated and eventually removed
+ /* @COMPAT pcmk_rsc_starting and pcmk_rsc_stopping are deprecated and unused
+ * within Pacemaker, and will eventually be removed
*/
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_stopping);
+ if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_stopping);
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
- if (pcmk_is_set(action->flags, pe_action_runnable)) {
- pe__set_resource_flags(rsc, pe_rsc_starting);
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_starting);
} else {
- pe__clear_resource_flags(rsc, pe_rsc_starting);
+ pe__clear_resource_flags(rsc, pcmk_rsc_starting);
}
}
}
@@ -428,80 +434,121 @@ valid_stop_on_fail(const char *value)
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
-static const char *
-unpack_operation_on_fail(pe_action_t * action)
+/*!
+ * \internal
+ * \brief Validate (and possibly reset) resource action's on_fail meta-attribute
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] action_config Action configuration XML from CIB (if any)
+ * \param[in,out] meta Table of action meta-attributes
+ */
+static void
+validate_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ const xmlNode *action_config, GHashTable *meta)
{
const char *name = NULL;
const char *role = NULL;
- const char *on_fail = NULL;
const char *interval_spec = NULL;
- const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ const char *value = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+ char *key = NULL;
+ char *new_value = NULL;
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ // Stop actions can only use certain on-fail values
+ if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
- "allowed for stop", action->rsc->id, value);
- return NULL;
-
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
- // demote on_fail defaults to monitor value for promoted role if present
- xmlNode *operation = NULL;
+ "allowed for stop", rsc->id, value);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
+ }
- CRM_CHECK(action->rsc != NULL, return NULL);
+ /* Demote actions default on-fail to the on-fail value for the first
+ * recurring monitor for the promoted role (if any).
+ */
+ if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
+ && (value == NULL)) {
- for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
- (operation != NULL) && (value == NULL);
- operation = pcmk__xe_next(operation)) {
+ /* @TODO This does not consider promote options set in a meta-attribute
+ * block (which may have rules that need to be evaluated) rather than
+ * XML properties.
+ */
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
+ const char *promote_on_fail = NULL;
- if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ /* We only care about explicit on-fail (if promote uses default, so
+ * can demote)
+ */
+ promote_on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
+ if (promote_on_fail == NULL) {
continue;
}
+
+ // We only care about recurring monitors for the promoted role
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
- on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!on_fail) {
- continue;
- } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) {
+ if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
continue;
- } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S,
- NULL)) {
+ }
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
- } else if (crm_parse_interval_spec(interval_spec) == 0) {
+ }
+
+ // We only care about enabled monitors
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
continue;
- } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
+ }
+
+ // Demote actions can't default to on-fail="demote"
+ if (pcmk__str_eq(promote_on_fail, "demote", pcmk__str_casei)) {
continue;
}
- value = on_fail;
+ // Use value from first applicable promote action found
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup(promote_on_fail);
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
}
- } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
- value = "ignore";
+ return;
+ }
- } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- name = crm_element_value(action->op_entry, "name");
- role = crm_element_value(action->op_entry, "role");
- interval_spec = crm_element_value(action->op_entry,
+ if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
+ && !pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup("ignore");
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
+ return;
+ }
+
+ // on-fail="demote" is allowed only for certain actions
+ if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ name = crm_element_value(action_config, "name");
+ role = crm_element_value(action_config, "role");
+ interval_spec = crm_element_value(action_config,
XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
- && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S, NULL)
+ if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ && (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
- "allowed for it", action->rsc->id, name);
- return NULL;
+ "allowed for it", rsc->id, name);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
}
}
-
- return value;
}
static int
@@ -510,7 +557,7 @@ unpack_timeout(const char *value)
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
return timeout_ms;
}
@@ -579,346 +626,475 @@ unpack_start_delay(const char *value, GHashTable *meta)
return start_delay;
}
+/*!
+ * \internal
+ * \brief Find a resource's most frequent recurring monitor
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Operation XML configured for most frequent recurring monitor for
+ * \p rsc (if any)
+ */
static xmlNode *
-find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
+most_frequent_monitor(const pcmk_resource_t *rsc)
{
- guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
- const char *name = NULL;
- const char *interval_spec = NULL;
xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- for (operation = pcmk__xe_first_child(rsc->ops_xml);
- operation != NULL;
- operation = pcmk__xe_next(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
-
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
+ bool enabled = false;
+ guint interval_ms = 0;
+ const char *interval_spec = crm_element_value(operation,
+ XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
- continue;
- }
+ // We only care about enabled recurring monitors
+ if (!pcmk__str_eq(crm_element_value(operation, "name"),
+ PCMK_ACTION_MONITOR, pcmk__str_none)) {
+ continue;
+ }
+ interval_ms = crm_parse_interval_spec(interval_spec);
+ if (interval_ms == 0) {
+ continue;
+ }
- interval_ms = crm_parse_interval_spec(interval_spec);
+ // @TODO This does not account for rules, defaults, etc.
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
+ }
- if (interval_ms && (interval_ms < min_interval_ms)) {
- min_interval_ms = interval_ms;
- op = operation;
- }
+ if (interval_ms < min_interval_ms) {
+ min_interval_ms = interval_ms;
+ op = operation;
}
}
-
return op;
}
/*!
- * \brief Unpack operation XML into an action structure
+ * \internal
+ * \brief Unpack action meta-attributes
*
- * Unpack an operation's meta-attributes (normalizing the interval, timeout,
- * and start delay values as integer milliseconds), requirements, and
- * failure policy.
+ * \param[in,out] rsc Resource that action is for
+ * \param[in] node Node that action is on
+ * \param[in] action_name Action name
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] action_config Action XML configuration from CIB (if any)
*
- * \param[in,out] action Action to unpack into
- * \param[in] xml_obj Operation XML (or NULL if all defaults)
- * \param[in] container Resource that contains affected resource, if any
- * \param[in,out] data_set Cluster state
- * \param[in] interval_ms How frequently to perform the operation
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds) from its CIB XML
+ * configuration (including defaults).
+ *
+ * \return Newly allocated hash table with normalized action meta-attributes
*/
-static void
-unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms)
+GHashTable *
+pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *action_name, guint interval_ms,
+ const xmlNode *action_config)
{
- int timeout_ms = 0;
- const char *value = NULL;
- bool is_probe = false;
+ GHashTable *meta = NULL;
+ char *name = NULL;
+ char *value = NULL;
+ const char *timeout_spec = NULL;
+ const char *str = NULL;
pe_rsc_eval_data_t rsc_rule_data = {
- .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
- .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
- .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
+ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE),
};
pe_op_eval_data_t op_rule_data = {
- .op_name = action->task,
- .interval = interval_ms
+ .op_name = action_name,
+ .interval = interval_ms,
};
pe_rule_eval_data_t rule_data = {
- .node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .node_hash = (node == NULL)? NULL : node->details->attrs,
+ .role = pcmk_role_unknown,
+ .now = rsc->cluster->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
- .op_data = &op_rule_data
+ .op_data = &op_rule_data,
};
- CRM_CHECK(action && action->rsc, return);
-
- is_probe = pcmk_is_probe(action->task, interval_ms);
+ meta = pcmk__strkey_table(free, free);
// Cluster-wide <op_defaults> <meta_attributes>
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, FALSE, data_set);
-
- // Determine probe default timeout differently
- if (is_probe) {
- xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
-
- if (min_interval_mon) {
- value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
- if (value) {
- crm_trace("\t%s: Setting default timeout to minimum-interval "
- "monitor's timeout '%s'", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ pe__unpack_dataset_nvpairs(rsc->cluster->op_defaults, XML_TAG_META_SETS,
+ &rule_data, meta, NULL, FALSE, rsc->cluster);
+
+ // Derive default timeout for probes from recurring monitor timeouts
+ if (pcmk_is_probe(action_name, interval_ms)) {
+ xmlNode *min_interval_mon = most_frequent_monitor(rsc);
+
+ if (min_interval_mon != NULL) {
+ /* @TODO This does not consider timeouts set in meta_attributes
+ * blocks (which may also have rules that need to be evaluated).
+ */
+ timeout_spec = crm_element_value(min_interval_mon,
+ XML_ATTR_TIMEOUT);
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting default timeout for %s probe to "
+ "most frequent monitor's timeout '%s'",
+ rsc->id, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
}
- if (xml_obj) {
- xmlAttrPtr xIter = NULL;
-
+ if (action_config != NULL) {
// <op> <meta_attributes> take precedence over defaults
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, TRUE, data_set);
+ pe__unpack_dataset_nvpairs(action_config, XML_TAG_META_SETS, &rule_data,
+ meta, NULL, TRUE, rsc->cluster);
/* Anything set as an <op> XML property has highest precedence.
* This ensures we use the name and interval from the <op> tag.
+ * (See below for the only exception, fence device start/probe timeout.)
*/
- for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
- const char *prop_name = (const char *)xIter->name;
- const char *prop_value = crm_element_value(xml_obj, prop_name);
+ for (xmlAttrPtr attr = action_config->properties;
+ attr != NULL; attr = attr->next) {
+ name = strdup((const char *) attr->name);
+ value = strdup(pcmk__xml_attr_value(attr));
- g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
- g_hash_table_remove(action->meta, "id");
+ g_hash_table_remove(meta, XML_ATTR_ID);
// Normalize interval to milliseconds
if (interval_ms > 0) {
- g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
- crm_strdup_printf("%u", interval_ms));
+ name = strdup(XML_LRM_ATTR_INTERVAL);
+ CRM_ASSERT(name != NULL);
+ value = crm_strdup_printf("%u", interval_ms);
+ g_hash_table_insert(meta, name, value);
} else {
- g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
- }
-
- /*
- * Timeout order of precedence:
- * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
- * and task is start or a probe; pcmk_monitor_timeout works
- * by default for a recurring monitor)
- * 2. explicit op timeout on the primitive
- * 3. default op timeout
- * a. if probe, then min-interval monitor's timeout
- * b. else, in XML_CIB_TAG_OPCONFIG
- * 4. CRM_DEFAULT_OP_TIMEOUT_S
- *
- * #1 overrides general rule of <op> XML property having highest
- * precedence.
+ g_hash_table_remove(meta, XML_LRM_ATTR_INTERVAL);
+ }
+
+ /* Timeout order of precedence (highest to lowest):
+ * 1. pcmk_monitor_timeout resource parameter (only for starts and probes
+ * when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
+ * monitors via the executor instead)
+ * 2. timeout configured in <op> (with <op timeout> taking precedence over
+ * <op> <meta_attributes>)
+ * 3. timeout configured in <op_defaults> <meta_attributes>
+ * 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
*/
+
+ // Check for pcmk_monitor_timeout
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
- && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
- || is_probe)) {
-
- GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
+ && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
+ || pcmk_is_probe(action_name, interval_ms))) {
+
+ GHashTable *params = pe_rsc_params(rsc, node, rsc->cluster);
+
+ timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting timeout for %s %s to "
+ "pcmk_monitor_timeout (%s)",
+ rsc->id, action_name, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
+ }
+ }
- value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ // Normalize timeout to positive milliseconds
+ name = strdup(XML_ATTR_TIMEOUT);
+ CRM_ASSERT(name != NULL);
+ timeout_spec = g_hash_table_lookup(meta, XML_ATTR_TIMEOUT);
+ g_hash_table_insert(meta, name, pcmk__itoa(unpack_timeout(timeout_spec)));
+
+ // Ensure on-fail has a valid value
+ validate_on_fail(rsc, action_name, action_config, meta);
+
+ // Normalize start-delay
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_START_DELAY);
+ if (str != NULL) {
+ unpack_start_delay(str, meta);
+ } else {
+ long long start_delay = 0;
- if (value) {
- crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
- "overriding default", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_ORIGIN);
+ if (unpack_interval_origin(str, action_config, interval_ms,
+ rsc->cluster->now, &start_delay)) {
+ name = strdup(XML_OP_ATTR_START_DELAY);
+ CRM_ASSERT(name != NULL);
+ g_hash_table_insert(meta, name,
+ crm_strdup_printf("%lld", start_delay));
}
}
+ return meta;
+}
- // Normalize timeout to positive milliseconds
- value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
- timeout_ms = unpack_timeout(value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- pcmk__itoa(timeout_ms));
+/*!
+ * \internal
+ * \brief Determine an action's quorum and fencing dependency
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action being unpacked
+ *
+ * \return Quorum and fencing dependency appropriate to action
+ */
+enum rsc_start_requirement
+pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name)
+{
+ const char *value = NULL;
+ enum rsc_start_requirement requires = pcmk_requires_nothing;
+
+ CRM_CHECK((rsc != NULL) && (action_name != NULL), return requires);
- if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
- action->needs = rsc_req_nothing;
+ if (!pcmk__strcase_any_of(action_name, PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE, NULL)) {
value = "nothing (not start or promote)";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
- action->needs = rsc_req_stonith;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
+ requires = pcmk_requires_fencing;
value = "fencing";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
- action->needs = rsc_req_quorum;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_quorum)) {
+ requires = pcmk_requires_quorum;
value = "quorum";
} else {
- action->needs = rsc_req_nothing;
value = "nothing";
}
- pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
+ pe_rsc_trace(rsc, "%s of %s requires %s", action_name, rsc->id, value);
+ return requires;
+}
- value = unpack_operation_on_fail(action);
+/*!
+ * \internal
+ * \brief Parse action failure response from a user-provided string
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] value User-provided configuration value for on-fail
+ *
+ * \return Action failure response parsed from \p text
+ */
+enum action_fail_response
+pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, const char *value)
+{
+ const char *desc = NULL;
+ bool needs_remote_reset = false;
+ enum action_fail_response on_fail = pcmk_on_fail_ignore;
if (value == NULL) {
+ // Use default
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
- action->on_fail = action_fail_block;
- g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
- value = "block"; // The above could destroy the original string
+ on_fail = pcmk_on_fail_block;
+ desc = "block";
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
- action->on_fail = action_fail_fence;
- value = "node fencing";
-
- if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "node fencing";
+ } else {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
- "operation '%s' to 'stop' because 'fence' is not "
- "valid when fencing is disabled", action->uuid);
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ "%s of %s to 'stop' because 'fence' is not "
+ "valid when fencing is disabled",
+ action_name, rsc->id);
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
- action->on_fail = action_fail_standby;
- value = "node standby";
+ on_fail = pcmk_on_fail_standby_node;
+ desc = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
NULL)) {
- action->on_fail = action_fail_ignore;
- value = "ignore";
+ desc = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
- action->on_fail = action_fail_migrate;
- value = "force migration";
+ on_fail = pcmk_on_fail_ban;
+ desc = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate)";
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
- if (container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate)";
-
+ if (rsc->container == NULL) {
+ pe_rsc_debug(rsc,
+ "Using default " XML_OP_ATTR_ON_FAIL
+ " for %s of %s because it does not have a container",
+ action_name, rsc->id);
} else {
- value = NULL;
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate)";
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- action->on_fail = action_fail_demote;
- value = "demote instance";
+ on_fail = pcmk_on_fail_demote;
+ desc = "demote instance";
} else {
- pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
- value = NULL;
+ pcmk__config_err("Using default '" XML_OP_ATTR_ON_FAIL "' for "
+ "%s of %s because '%s' is not valid",
+ action_name, rsc->id, value);
}
- /* defaults */
- if (value == NULL && container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate) (default)";
+ /* Remote node connections are handled specially. Failures that result
+ * in dropping an active connection must result in fencing. The only
+ * failures that don't are probes and starts. The user can explicitly set
+ * on-fail="fence" to fence after start failures.
+ */
+ if (pe__resource_is_remote_conn(rsc)
+ && !pcmk_is_probe(action_name, interval_ms)
+ && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) {
+ needs_remote_reset = true;
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ desc = NULL; // Force default for unmanaged connections
+ }
+ }
- /* For remote nodes, ensure that any failure that results in dropping an
- * active connection to the node results in fencing of the node.
- *
- * There are only two action failures that don't result in fencing.
- * 1. probes - probe failures are expected.
- * 2. start - a start failure indicates that an active connection does not already
- * exist. The user can set op on-fail=fence if they really want to fence start
- * failures. */
- } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
- && pe__resource_is_remote_conn(action->rsc, data_set)
- && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
- && (interval_ms == 0))
- && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
-
- if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop unmanaged remote node (enforcing default)";
+ if (desc != NULL) {
+ // Explicit value used, default not needed
- } else {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- value = "fence remote node (default)";
- } else {
- value = "recover remote node connection (default)";
- }
+ } else if (rsc->container != NULL) {
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate) (default)";
- if (action->rsc->remote_reconnect_ms) {
- action->fail_role = RSC_ROLE_STOPPED;
+ } else if (needs_remote_reset) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
+ desc = "fence remote node (default)";
+ } else {
+ desc = "recover remote node connection (default)";
}
- action->on_fail = action_fail_reset_remote;
+ on_fail = pcmk_on_fail_reset_remote;
+ } else {
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop unmanaged remote node (enforcing default)";
}
- } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- action->on_fail = action_fail_fence;
- value = "resource fence (default)";
-
+ } else if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "resource fence (default)";
} else {
- action->on_fail = action_fail_block;
- value = "resource block (default)";
+ on_fail = pcmk_on_fail_block;
+ desc = "resource block (default)";
}
- } else if (value == NULL) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate) (default)";
+ } else {
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate) (default)";
}
- pe_rsc_trace(action->rsc, "%s failure handling: %s",
- action->uuid, value);
+ pe_rsc_trace(rsc, "Failure handling for %s-interval %s of %s: %s",
+ pcmk__readable_interval(interval_ms), action_name,
+ rsc->id, desc);
+ return on_fail;
+}
- value = NULL;
- if (xml_obj != NULL) {
- value = g_hash_table_lookup(action->meta, "role_after_failure");
- if (value) {
- pe_warn_once(pe_wo_role_after,
- "Support for role_after_failure is deprecated and will be removed in a future release");
- }
+/*!
+ * \internal
+ * \brief Determine a resource's role after failure of an action
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] on_fail Failure handling for action
+ * \param[in] meta Unpacked action meta-attributes
+ *
+ * \return Resource role that results from failure of action
+ */
+enum rsc_role_e
+pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name,
+ enum action_fail_response on_fail, GHashTable *meta)
+{
+ const char *value = NULL;
+ enum rsc_role_e role = pcmk_role_unknown;
+
+ // Set default for role after failure specially in certain circumstances
+ switch (on_fail) {
+ case pcmk_on_fail_stop:
+ role = pcmk_role_stopped;
+ break;
+
+ case pcmk_on_fail_reset_remote:
+ if (rsc->remote_reconnect_ms != 0) {
+ role = pcmk_role_stopped;
+ }
+ break;
+
+ default:
+ break;
}
- if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
- action->fail_role = text2role(value);
+
+ // @COMPAT Check for explicitly configured role (deprecated)
+ value = g_hash_table_lookup(meta, "role_after_failure");
+ if (value != NULL) {
+ pe_warn_once(pcmk__wo_role_after,
+ "Support for role_after_failure is deprecated "
+ "and will be removed in a future release");
+ if (role == pcmk_role_unknown) {
+ role = text2role(value);
+ }
}
- /* defaults */
- if (action->fail_role == RSC_ROLE_UNKNOWN) {
- if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
- action->fail_role = RSC_ROLE_UNPROMOTED;
+
+ if (role == pcmk_role_unknown) {
+ // Use default
+ if (pcmk__str_eq(action_name, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
+ role = pcmk_role_unpromoted;
} else {
- action->fail_role = RSC_ROLE_STARTED;
+ role = pcmk_role_started;
}
}
- pe_rsc_trace(action->rsc, "%s failure results in: %s",
- action->uuid, role2text(action->fail_role));
+ pe_rsc_trace(rsc, "Role after %s %s failure is: %s",
+ rsc->id, action_name, role2text(role));
+ return role;
+}
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
- if (value) {
- unpack_start_delay(value, action->meta);
- } else {
- long long start_delay = 0;
+/*!
+ * \internal
+ * \brief Unpack action configuration
+ *
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds), requirements, and
+ * failure policy from its CIB XML configuration (including defaults).
+ *
+ * \param[in,out] action Resource action to unpack into
+ * \param[in] xml_obj Action configuration XML (NULL for defaults only)
+ * \param[in] interval_ms How frequently to perform the operation
+ */
+static void
+unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms)
+{
+ const char *value = NULL;
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
- if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
- &start_delay)) {
- g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
- crm_strdup_printf("%lld", start_delay));
- }
- }
+ action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
+ action->task, interval_ms, xml_obj);
+ action->needs = pcmk__action_requires(action->rsc, action->task);
+
+ value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ action->on_fail = pcmk__parse_on_fail(action->rsc, action->task,
+ interval_ms, value);
+
+ action->fail_role = pcmk__role_after_failure(action->rsc, action->task,
+ action->on_fail, action->meta);
}
/*!
@@ -929,31 +1105,26 @@ unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] save_action Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
- * \return Action object corresponding to arguments
- * \note This function takes ownership of (and might free) \p key. If
- * \p save_action is true, \p data_set will own the returned action,
- * otherwise it is the caller's responsibility to free the return value
- * with pe_free_action().
+ * \return Action object corresponding to arguments (guaranteed not to be
+ * \c NULL)
+ * \note This function takes ownership of (and might free) \p key, and
+ * \p scheduler takes ownership of the returned action (the caller should
+ * not free it).
*/
-pe_action_t *
-custom_action(pe_resource_t *rsc, char *key, const char *task,
- const pe_node_t *on_node, gboolean optional, gboolean save_action,
- pe_working_set_t *data_set)
+pcmk_action_t *
+custom_action(pcmk_resource_t *rsc, char *key, const char *task,
+ const pcmk_node_t *on_node, gboolean optional,
+ pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
-
- if (save_action) {
- action = find_existing_action(key, rsc, on_node, data_set);
- }
+ CRM_ASSERT((key != NULL) && (task != NULL) && (scheduler != NULL));
+ action = find_existing_action(key, rsc, on_node, scheduler);
if (action == NULL) {
- action = new_action(key, task, rsc, on_node, optional, save_action,
- data_set);
+ action = new_action(key, task, rsc, on_node, optional, scheduler);
} else {
free(key);
}
@@ -961,28 +1132,38 @@ custom_action(pe_resource_t *rsc, char *key, const char *task,
update_action_optional(action, optional);
if (rsc != NULL) {
- if (action->node != NULL) {
- unpack_action_node_attributes(action, data_set);
- }
+ if ((action->node != NULL) && (action->op_entry != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_attrs_evaluated)) {
- update_resource_action_runnable(action, save_action, data_set);
+ GHashTable *attrs = action->node->details->attrs;
- if (save_action) {
- update_resource_flags_for_action(rsc, action);
+ if (action->extra != NULL) {
+ g_hash_table_destroy(action->extra);
+ }
+ action->extra = pcmk__unpack_action_rsc_params(action->op_entry,
+ attrs, scheduler);
+ pe__set_action_flags(action, pcmk_action_attrs_evaluated);
}
+
+ update_resource_action_runnable(action, scheduler);
+ update_resource_flags_for_action(rsc, action);
+ }
+
+ if (action->extra == NULL) {
+ action->extra = pcmk__strkey_table(free, free);
}
return action;
}
-pe_action_t *
-get_pseudo_op(const char *name, pe_working_set_t * data_set)
+pcmk_action_t *
+get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler)
{
- pe_action_t *op = lookup_singleton(data_set, name);
+ pcmk_action_t *op = lookup_singleton(scheduler, name);
if (op == NULL) {
- op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
- pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
+ op = custom_action(NULL, strdup(name), name, NULL, TRUE, scheduler);
+ pe__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
}
return op;
}
@@ -991,15 +1172,15 @@ static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *candidate = gIter->data;
+ pcmk_resource_t *candidate = gIter->data;
if (candidate->children != NULL) {
matches = find_unfencing_devices(candidate->children, matches);
- } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
+ } else if (!pcmk_is_set(candidate->flags, pcmk_rsc_fence_device)) {
continue;
- } else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) {
+ } else if (pcmk_is_set(candidate->flags, pcmk_rsc_needs_unfencing)) {
matches = g_list_prepend(matches, candidate);
} else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
@@ -1013,8 +1194,8 @@ find_unfencing_devices(GList *candidates, GList *matches)
}
static int
-node_priority_fencing_delay(const pe_node_t *node,
- const pe_working_set_t *data_set)
+node_priority_fencing_delay(const pcmk_node_t *node,
+ const pcmk_scheduler_t *scheduler)
{
int member_count = 0;
int online_count = 0;
@@ -1023,13 +1204,13 @@ node_priority_fencing_delay(const pe_node_t *node,
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
- if (data_set->priority_fencing_delay <= 0) {
+ if (scheduler->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
- if (node->details->type != node_member) {
+ if (node->details->type != pcmk_node_variant_cluster) {
return 0;
}
@@ -1038,10 +1219,10 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *n = gIter->data;
+ for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *n = gIter->data;
- if (n->details->type != node_member) {
+ if (n->details->type != pcmk_node_variant_cluster) {
continue;
}
@@ -1077,54 +1258,58 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- return data_set->priority_fencing_delay;
+ return scheduler->priority_fencing_delay;
}
-pe_action_t *
-pe_fence_op(pe_node_t *node, const char *op, bool optional,
- const char *reason, bool priority_delay, pe_working_set_t *data_set)
+pcmk_action_t *
+pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
+ const char *reason, bool priority_delay,
+ pcmk_scheduler_t *scheduler)
{
char *op_key = NULL;
- pe_action_t *stonith_op = NULL;
+ pcmk_action_t *stonith_op = NULL;
if(op == NULL) {
- op = data_set->stonith_action;
+ op = scheduler->stonith_action;
}
- op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
+ op_key = crm_strdup_printf("%s-%s-%s",
+ PCMK_ACTION_STONITH, node->details->uname, op);
- stonith_op = lookup_singleton(data_set, op_key);
+ stonith_op = lookup_singleton(scheduler, op_key);
if(stonith_op == NULL) {
- stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
+ stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
+ TRUE, scheduler);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
- if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* Extra work to detect device changes
*/
GString *digests_all = g_string_sized_new(1024);
GString *digests_secure = g_string_sized_new(1024);
- GList *matches = find_unfencing_devices(data_set->resources, NULL);
+ GList *matches = find_unfencing_devices(scheduler->resources, NULL);
char *key = NULL;
char *value = NULL;
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *match = gIter->data;
+ pcmk_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
- data = pe__compare_fencing_digest(match, agent, node, data_set);
- if(data->rc == RSC_DIGEST_ALL) {
+ data = pe__compare_fencing_digest(match, agent, node,
+ scheduler);
+ if (data->rc == pcmk__digest_mismatch) {
optional = FALSE;
crm_notice("Unfencing node %s because the definition of "
"%s changed", pe__node_name(node), match->id);
- if (!pcmk__is_daemon && data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ if (!pcmk__is_daemon && scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
out->info(out,
"notice: Unfencing node %s because the "
@@ -1157,7 +1342,7 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
free(op_key);
}
- if (data_set->priority_fencing_delay > 0
+ if (scheduler->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
@@ -1174,15 +1359,16 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
- char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
+ char *delay_s = pcmk__itoa(node_priority_fencing_delay(node,
+ scheduler));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
- if(optional == FALSE && pe_can_fence(data_set, node)) {
- pe__clear_action_flags(stonith_op, pe_action_optional);
+ if(optional == FALSE && pe_can_fence(scheduler, node)) {
+ pe__clear_action_flags(stonith_op, pcmk_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
@@ -1193,13 +1379,13 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
}
void
-pe_free_action(pe_action_t * action)
+pe_free_action(pcmk_action_t *action)
{
if (action == NULL) {
return;
}
- g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
- g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
+ g_list_free_full(action->actions_before, free);
+ g_list_free_full(action->actions_after, free);
if (action->extra) {
g_hash_table_destroy(action->extra);
}
@@ -1215,7 +1401,8 @@ pe_free_action(pe_action_t * action)
}
int
-pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
+pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
@@ -1224,8 +1411,8 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -1240,10 +1427,11 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
}
- if (timeout_spec == NULL && data_set->op_defaults) {
+ if (timeout_spec == NULL && scheduler->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
- &rule_data, action_meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->op_defaults, XML_TAG_META_SETS,
+ &rule_data, action_meta, NULL, FALSE,
+ scheduler);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
@@ -1252,7 +1440,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
if (action_meta != NULL) {
@@ -1262,16 +1450,16 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
enum action_tasks
-get_complex_task(const pe_resource_t *rsc, const char *name)
+get_complex_task(const pcmk_resource_t *rsc, const char *name)
{
enum action_tasks task = text2task(name);
- if ((rsc != NULL) && (rsc->variant == pe_native)) {
+ if ((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)) {
switch (task) {
- case stopped_rsc:
- case started_rsc:
- case action_demoted:
- case action_promoted:
+ case pcmk_action_stopped:
+ case pcmk_action_started:
+ case pcmk_action_demoted:
+ case pcmk_action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s",
name, rsc->id);
--task;
@@ -1294,14 +1482,14 @@ get_complex_task(const pe_resource_t *rsc, const char *name)
*
* \return First action in list that matches criteria, or NULL if none
*/
-pe_action_t *
+pcmk_action_t *
find_first_action(const GList *input, const char *uuid, const char *task,
- const pe_node_t *on_node)
+ const pcmk_node_t *on_node)
{
CRM_CHECK(uuid || task, return NULL);
for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
@@ -1324,7 +1512,7 @@ find_first_action(const GList *input, const char *uuid, const char *task,
}
GList *
-find_actions(GList *input, const char *key, const pe_node_t *on_node)
+find_actions(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
@@ -1332,7 +1520,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
@@ -1358,7 +1546,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
}
GList *
-find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
+find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *result = NULL;
@@ -1369,7 +1557,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
@@ -1397,7 +1585,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
* without a node will be assigned to node.
*/
GList *
-pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
+pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
@@ -1423,16 +1611,18 @@ pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
* \note It is the caller's responsibility to free() the result.
*/
char *
-pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
+pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
- case pe_action_runnable:
- case pe_action_migrate_runnable:
+ case pcmk_action_runnable:
change = "unrunnable";
break;
- case pe_action_optional:
+ case pcmk_action_migratable:
+ change = "unmigrateable";
+ break;
+ case pcmk_action_optional:
change = "required";
break;
default:
@@ -1446,7 +1636,8 @@ pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
action->task);
}
-void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
+void pe_action_set_reason(pcmk_action_t *action, const char *reason,
+ bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
@@ -1468,20 +1659,14 @@ void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrit
*
* \param[in,out] rsc Resource to clear
* \param[in] node Node to clear history on
- * \param[in,out] data_set Cluster working set
- *
- * \return New action to clear resource history
*/
-pe_action_t *
-pe__clear_resource_history(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+void
+pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- char *key = NULL;
+ CRM_ASSERT((rsc != NULL) && (node != NULL));
- CRM_ASSERT(rsc && node);
- key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
- return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
- data_set);
+ custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
+ PCMK_ACTION_LRM_DELETE, node, FALSE, rsc->cluster);
}
#define sort_return(an_int, why) do { \
@@ -1646,19 +1831,19 @@ sort_op_by_callid(gconstpointer a, gconstpointer b)
*
* \return New action object corresponding to arguments
*/
-pe_action_t *
-pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
+pcmk_action_t *
+pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional,
bool runnable)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL));
action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
- optional, TRUE, rsc->cluster);
- pe__set_action_flags(action, pe_action_pseudo);
+ optional, rsc->cluster);
+ pe__set_action_flags(action, pcmk_action_pseudo);
if (runnable) {
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
return action;
}
@@ -1673,7 +1858,7 @@ pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
* \note This is more efficient than calling add_hash_param().
*/
void
-pe__add_action_expected_result(pe_action_t *action, int expected_result)
+pe__add_action_expected_result(pcmk_action_t *action, int expected_result)
{
char *name = NULL;
diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c
index b8047da..546a2a7 100644
--- a/lib/pengine/pe_digest.c
+++ b/lib/pengine/pe_digest.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -93,27 +93,27 @@ attr_in_string(xmlAttrPtr a, void *user_data)
* \param[in] xml_op Unused
* \param[in] op_version CRM feature set to use for digest calculation
* \param[in] overrides Key/value table to override resource parameters
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *params,
+calculate_main_digest(op_digest_cache_t *data, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *params,
const char *task, guint *interval_ms,
const xmlNode *xml_op, const char *op_version,
- GHashTable *overrides, pe_working_set_t *data_set)
+ GHashTable *overrides, pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ xmlNode *action_config = NULL;
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
/* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers
* that themselves are Pacemaker Remote nodes
*/
- (void) pe__add_bundle_remote_name(rsc, data_set, data->params_all,
+ (void) pe__add_bundle_remote_name(rsc, scheduler, data->params_all,
XML_RSC_ATTR_REMOTE_RA_ADDR);
- // If interval was overridden, reset it
if (overrides != NULL) {
+ // If interval was overridden, reset it
const char *interval_s = g_hash_table_lookup(overrides, CRM_META "_"
XML_LRM_ATTR_INTERVAL);
@@ -125,34 +125,42 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
*interval_ms = (guint) value_ll;
}
}
- }
- action = custom_action(rsc, pcmk__op_key(rsc->id, task, *interval_ms),
- task, node, TRUE, FALSE, data_set);
- if (overrides != NULL) {
+ // Add overrides to list of all parameters
g_hash_table_foreach(overrides, hash2field, data->params_all);
}
- g_hash_table_foreach(params, hash2field, data->params_all);
- g_hash_table_foreach(action->extra, hash2field, data->params_all);
- g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
- pcmk__filter_op_for_digest(data->params_all);
+ // Add provided instance parameters
+ g_hash_table_foreach(params, hash2field, data->params_all);
- /* Given a non-recurring operation with extra parameters configured,
- * in case that the main digest doesn't match, even if the restart
- * digest matches, enforce a restart rather than a reload-agent anyway.
- * So that it ensures any changes of the extra parameters get applied
- * for this specific operation, and the digests calculated for the
- * resulting lrm_rsc_op will be correct.
- * Mark the implied rc RSC_DIGEST_RESTART for the case that the main
- * digest doesn't match.
+ // Find action configuration XML in CIB
+ action_config = pcmk__find_action_config(rsc, task, *interval_ms, true);
+
+ /* Add action-specific resource instance attributes to the digest list.
+ *
+ * If this is a one-time action with action-specific instance attributes,
+ * enforce a restart instead of reload-agent in case the main digest doesn't
+ * match, even if the restart digest does. This ensures any changes of the
+ * action-specific parameters get applied for this specific action, and
+ * digests calculated for the resulting history will be correct. Default the
+ * result to RSC_DIGEST_RESTART for the case where the main digest doesn't
+ * match.
*/
- if (*interval_ms == 0
- && g_hash_table_size(action->extra) > 0) {
- data->rc = RSC_DIGEST_RESTART;
+ params = pcmk__unpack_action_rsc_params(action_config, node->details->attrs,
+ scheduler);
+ if ((*interval_ms == 0) && (g_hash_table_size(params) > 0)) {
+ data->rc = pcmk__digest_restart;
}
+ g_hash_table_foreach(params, hash2field, data->params_all);
+ g_hash_table_destroy(params);
+
+ // Add action meta-attributes
+ params = pcmk__unpack_action_meta(rsc, node, task, *interval_ms,
+ action_config);
+ g_hash_table_foreach(params, hash2metafield, data->params_all);
+ g_hash_table_destroy(params);
- pe_free_action(action);
+ pcmk__filter_op_for_digest(data->params_all);
data->digest_all_calc = calculate_operation_digest(data->params_all,
op_version);
@@ -177,7 +185,7 @@ is_fence_param(xmlAttrPtr attr, void *user_data)
* \param[in] overrides Key/value hash table to override resource parameters
*/
static void
-calculate_secure_digest(op_digest_cache_t *data, const pe_resource_t *rsc,
+calculate_secure_digest(op_digest_cache_t *data, const pcmk_resource_t *rsc,
GHashTable *params, const xmlNode *xml_op,
const char *op_version, GHashTable *overrides)
{
@@ -288,17 +296,17 @@ calculate_restart_digest(op_digest_cache_t *data, const xmlNode *xml_op,
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] overrides Key/value table to override resource parameters
* \param[in] calc_secure Whether to calculate secure digest
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to new digest cache entry (or NULL on memory error)
* \note It is the caller's responsibility to free the result using
* pe__free_digests().
*/
op_digest_cache_t *
-pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
- const pe_node_t *node, const xmlNode *xml_op,
- GHashTable *overrides, bool calc_secure,
- pe_working_set_t *data_set)
+pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
+ guint *interval_ms, const pcmk_node_t *node,
+ const xmlNode *xml_op, GHashTable *overrides,
+ bool calc_secure, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t));
const char *op_version = NULL;
@@ -308,23 +316,23 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
return NULL;
}
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
if (xml_op != NULL) {
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
}
- if (op_version == NULL && data_set != NULL && data_set->input != NULL) {
- op_version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ if (op_version == NULL && scheduler != NULL && scheduler->input != NULL) {
+ op_version = crm_element_value(scheduler->input, XML_ATTR_CRM_VERSION);
}
if (op_version == NULL) {
op_version = CRM_FEATURE_SET;
}
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op,
- op_version, overrides, data_set);
+ op_version, overrides, scheduler);
if (calc_secure) {
calculate_secure_digest(data, rsc, params, xml_op, op_version,
overrides);
@@ -343,14 +351,14 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
* \param[in,out] node Node action was performed on
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] calc_secure Whether to calculate secure digest
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry
*/
static op_digest_cache_t *
-rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
- pe_node_t *node, const xmlNode *xml_op,
- bool calc_secure, pe_working_set_t *data_set)
+rsc_action_digest(pcmk_resource_t *rsc, const char *task, guint interval_ms,
+ pcmk_node_t *node, const xmlNode *xml_op,
+ bool calc_secure, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = NULL;
char *key = pcmk__op_key(rsc->id, task, interval_ms);
@@ -358,7 +366,7 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
data = g_hash_table_lookup(node->details->digest_cache, key);
if (data == NULL) {
data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
- NULL, calc_secure, data_set);
+ NULL, calc_secure, scheduler);
CRM_ASSERT(data != NULL);
g_hash_table_insert(node->details->digest_cache, strdup(key), data);
}
@@ -370,16 +378,16 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
* \internal
* \brief Calculate operation digests and compare against an XML history entry
*
- * \param[in,out] rsc Resource to check
- * \param[in] xml_op Resource history XML
- * \param[in,out] node Node to use for digest calculation
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Resource to check
+ * \param[in] xml_op Resource history XML
+ * \param[in,out] node Node to use for digest calculation
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry, with comparison result set
*/
op_digest_cache_t *
-rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
- pe_node_t *node, pe_working_set_t *data_set)
+rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = NULL;
guint interval_ms = 0;
@@ -397,8 +405,9 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
data = rsc_action_digest(rsc, task, interval_ms, node, xml_op,
- pcmk_is_set(data_set->flags, pe_flag_sanitized),
- data_set);
+ pcmk_is_set(scheduler->flags,
+ pcmk_sched_sanitized),
+ scheduler);
if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s "
@@ -408,11 +417,11 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
data->digest_restart_calc,
op_version,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
- data->rc = RSC_DIGEST_RESTART;
+ data->rc = pcmk__digest_restart;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
- data->rc = RSC_DIGEST_UNKNOWN;
+ data->rc = pcmk__digest_unknown;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
/* Given a non-recurring operation with extra parameters configured,
@@ -421,11 +430,10 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
* So that it ensures any changes of the extra parameters get applied
* for this specific operation, and the digests calculated for the
* resulting lrm_rsc_op will be correct.
- * Preserve the implied rc RSC_DIGEST_RESTART for the case that the main
- * digest doesn't match.
+ * Preserve the implied rc pcmk__digest_restart for the case that the
+ * main digest doesn't match.
*/
- if (interval_ms == 0
- && data->rc == RSC_DIGEST_RESTART) {
+ if ((interval_ms == 0) && (data->rc == pcmk__digest_restart)) {
pe_rsc_info(rsc, "Parameters containing extra ones to %ums-interval"
" %s action for %s on %s "
"changed: hash was %s vs. now %s (restart:%s) %s",
@@ -442,11 +450,11 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
(interval_ms > 0)? "reschedule" : "reload",
op_version,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
- data->rc = RSC_DIGEST_ALL;
+ data->rc = pcmk__digest_mismatch;
}
} else {
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
}
return data;
}
@@ -522,34 +530,34 @@ unfencing_digest_matches(const char *rsc_id, const char *agent,
* \internal
* \brief Calculate fence device digests and digest comparison result
*
- * \param[in,out] rsc Fence device resource
- * \param[in] agent Fence device's agent type
- * \param[in,out] node Node with digest cache to use
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Fence device resource
+ * \param[in] agent Fence device's agent type
+ * \param[in,out] node Node with digest cache to use
+ * \param[in,out] scheduler Scheduler data
*
* \return Node's digest cache entry
*/
op_digest_cache_t *
-pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
- pe_node_t *node, pe_working_set_t *data_set)
+pe__compare_fencing_digest(pcmk_resource_t *rsc, const char *agent,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
const char *node_summary = NULL;
// Calculate device's current parameter digests
op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U,
- node, NULL, TRUE, data_set);
+ node, NULL, TRUE, scheduler);
// Check whether node has special unfencing summary node attribute
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
if (node_summary == NULL) {
- data->rc = RSC_DIGEST_UNKNOWN;
+ data->rc = pcmk__digest_unknown;
return data;
}
// Check whether full parameter digest matches
if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
node_summary)) {
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
return data;
}
@@ -557,9 +565,9 @@ pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
node_summary)) {
- data->rc = RSC_DIGEST_MATCH;
- if (!pcmk__is_daemon && data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ data->rc = pcmk__digest_match;
+ if (!pcmk__is_daemon && scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
out->info(out, "Only 'private' parameters to %s "
"for unfencing %s changed", rsc->id,
pe__node_name(node));
@@ -568,10 +576,12 @@ pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
}
// Parameters don't match
- data->rc = RSC_DIGEST_ALL;
- if (pcmk_is_set(data_set->flags, pe_flag_sanitized) && data->digest_secure_calc) {
- if (data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ data->rc = pcmk__digest_mismatch;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)
+ && (data->digest_secure_calc != NULL)) {
+
+ if (scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
char *digest = create_unfencing_summary(rsc->id, agent,
data->digest_secure_calc);
diff --git a/lib/pengine/pe_health.c b/lib/pengine/pe_health.c
index 6419fdf..93028ae 100644
--- a/lib/pengine/pe_health.c
+++ b/lib/pengine/pe_health.c
@@ -17,12 +17,12 @@
* \internal
* \brief Set the node health values to use for "red", "yellow", and "green"
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pe__unpack_node_health_scores(pe_working_set_t *data_set)
+pe__unpack_node_health_scores(pcmk_scheduler_t *scheduler)
{
- switch (pe__health_strategy(data_set)) {
+ switch (pe__health_strategy(scheduler)) {
case pcmk__health_strategy_none:
pcmk__score_red = 0;
pcmk__score_yellow = 0;
@@ -43,11 +43,11 @@ pe__unpack_node_health_scores(pe_working_set_t *data_set)
default: // progressive or custom
pcmk__score_red = pe__health_score(PCMK__OPT_NODE_HEALTH_RED,
- data_set);
+ scheduler);
pcmk__score_green = pe__health_score(PCMK__OPT_NODE_HEALTH_GREEN,
- data_set);
+ scheduler);
pcmk__score_yellow = pe__health_score(PCMK__OPT_NODE_HEALTH_YELLOW,
- data_set);
+ scheduler);
break;
}
@@ -93,7 +93,7 @@ add_node_health_value(gpointer key, gpointer value, gpointer user_data)
* \return Sum of all health attribute scores of \p node plus \p base_health
*/
int
-pe__sum_node_health_scores(const pe_node_t *node, int base_health)
+pe__sum_node_health_scores(const pcmk_node_t *node, int base_health)
{
CRM_ASSERT(node != NULL);
g_hash_table_foreach(node->details->attrs, add_node_health_value,
@@ -111,7 +111,7 @@ pe__sum_node_health_scores(const pe_node_t *node, int base_health)
* otherwise 0 if any attribute is yellow, otherwise a positive value.
*/
int
-pe__node_health(pe_node_t *node)
+pe__node_health(pcmk_node_t *node)
{
GHashTableIter iter;
const char *name = NULL;
diff --git a/lib/pengine/pe_notif.c b/lib/pengine/pe_notif.c
index 7ed490f..0e1e239 100644
--- a/lib/pengine/pe_notif.c
+++ b/lib/pengine/pe_notif.c
@@ -9,13 +9,15 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
+
+#include <crm/pengine/internal.h>
#include <pacemaker-internal.h>
#include "pe_status_private.h"
typedef struct notify_entry_s {
- const pe_resource_t *rsc;
- const pe_node_t *node;
+ const pcmk_resource_t *rsc;
+ const pcmk_node_t *node;
} notify_entry_t;
/*!
@@ -105,7 +107,7 @@ dup_notify_entry(const notify_entry_t *entry)
* \internal
* \brief Given a list of nodes, create strings with node names
*
- * \param[in] list List of nodes (as pe_node_t *)
+ * \param[in] list List of nodes (as pcmk_node_t *)
* \param[out] all_node_names If not NULL, will be set to space-separated list
* of the names of all nodes in \p list
* \param[out] host_node_names Same as \p all_node_names, except active
@@ -126,7 +128,7 @@ get_node_names(const GList *list, GString **all_node_names,
}
for (const GList *iter = list; iter != NULL; iter = iter->next) {
- const pe_node_t *node = (const pe_node_t *) iter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
if (node->details->uname == NULL) {
continue;
@@ -242,7 +244,7 @@ notify_entries_to_strings(GList *list, GString **rsc_names,
static void
copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
{
- pe_action_t *notify = (pe_action_t *) user_data;
+ pcmk_action_t *notify = (pcmk_action_t *) user_data;
/* Any existing meta-attributes (for example, the action timeout) are for
* the notify action itself, so don't override those.
@@ -256,7 +258,8 @@ copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
}
static void
-add_notify_data_to_action_meta(const notify_data_t *n_data, pe_action_t *action)
+add_notify_data_to_action_meta(const notify_data_t *n_data,
+ pcmk_action_t *action)
{
for (const GSList *item = n_data->keys; item; item = item->next) {
const pcmk_nvpair_t *nvpair = (const pcmk_nvpair_t *) item->data;
@@ -271,23 +274,23 @@ add_notify_data_to_action_meta(const notify_data_t *n_data, pe_action_t *action)
*
* \param[in,out] rsc Clone resource that notification is for
* \param[in] action Action to use in notify action key
- * \param[in] notif_action RSC_NOTIFY or RSC_NOTIFIED
+ * \param[in] notif_action PCMK_ACTION_NOTIFY or PCMK_ACTION_NOTIFIED
* \param[in] notif_type "pre", "post", "confirmed-pre", "confirmed-post"
*
* \return Newly created notify pseudo-action
*/
-static pe_action_t *
-new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action,
+static pcmk_action_t *
+new_notify_pseudo_action(pcmk_resource_t *rsc, const pcmk_action_t *action,
const char *notif_action, const char *notif_type)
{
- pe_action_t *notify = NULL;
+ pcmk_action_t *notify = NULL;
notify = custom_action(rsc,
pcmk__notify_key(rsc->id, notif_type, action->task),
notif_action, NULL,
- pcmk_is_set(action->flags, pe_action_optional),
- TRUE, rsc->cluster);
- pe__set_action_flags(notify, pe_action_pseudo);
+ pcmk_is_set(action->flags, pcmk_action_optional),
+ rsc->cluster);
+ pe__set_action_flags(notify, pcmk_action_pseudo);
add_hash_param(notify->meta, "notify_key_type", notif_type);
add_hash_param(notify->meta, "notify_key_operation", action->task);
return notify;
@@ -305,12 +308,13 @@ new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action,
*
* \return Newly created notify action
*/
-static pe_action_t *
-new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
- pe_action_t *notify_done, const notify_data_t *n_data)
+static pcmk_action_t *
+new_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_action_t *op, pcmk_action_t *notify_done,
+ const notify_data_t *n_data)
{
char *key = NULL;
- pe_action_t *notify_action = NULL;
+ pcmk_action_t *notify_action = NULL;
const char *value = NULL;
const char *task = NULL;
const char *skip_reason = NULL;
@@ -324,7 +328,7 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
skip_reason = "no parent notification";
} else if (!node->details->online) {
skip_reason = "node offline";
- } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
skip_reason = "original action not runnable";
}
if (skip_reason != NULL) {
@@ -342,16 +346,16 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
// Create the notify action
key = pcmk__notify_key(rsc->id, value, task);
notify_action = custom_action(rsc, key, op->task, node,
- pcmk_is_set(op->flags, pe_action_optional),
- TRUE, rsc->cluster);
+ pcmk_is_set(op->flags, pcmk_action_optional),
+ rsc->cluster);
// Add meta-data to notify action
g_hash_table_foreach(op->meta, copy_meta_to_notify, notify_action);
add_notify_data_to_action_meta(n_data, notify_action);
// Order notify after original action and before parent notification
- order_actions(op, notify_action, pe_order_optional);
- order_actions(notify_action, notify_done, pe_order_optional);
+ order_actions(op, notify_action, pcmk__ar_ordered);
+ order_actions(notify_action, notify_done, pcmk__ar_ordered);
return notify_action;
}
@@ -364,10 +368,10 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
* \param[in,out] n_data Notification values to add to action meta-data
*/
static void
-new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
+new_post_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
notify_data_t *n_data)
{
- pe_action_t *notify = NULL;
+ pcmk_action_t *notify = NULL;
CRM_ASSERT(n_data != NULL);
@@ -383,16 +387,16 @@ new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
return;
}
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *mon = (pe_action_t *) iter->data;
+ pcmk_action_t *mon = (pcmk_action_t *) iter->data;
const char *interval_ms_s = NULL;
interval_ms_s = g_hash_table_lookup(mon->meta,
XML_LRM_ATTR_INTERVAL_MS);
if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
- || pcmk__str_eq(mon->task, RSC_CANCEL, pcmk__str_none)) {
+ || pcmk__str_eq(mon->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
continue; // Not a recurring monitor
}
- order_actions(n_data->post_done, mon, pe_order_optional);
+ order_actions(n_data->post_done, mon, pcmk__ar_ordered);
}
}
@@ -428,12 +432,12 @@ new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
* \return Newly created notification data
*/
notify_data_t *
-pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
- pe_action_t *action, pe_action_t *complete)
+pe__action_notif_pseudo_ops(pcmk_resource_t *rsc, const char *task,
+ pcmk_action_t *action, pcmk_action_t *complete)
{
notify_data_t *n_data = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_notify)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_notify)) {
return NULL;
}
@@ -445,60 +449,63 @@ pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
if (action != NULL) { // Need "pre-" pseudo-actions
// Create "pre-" notify pseudo-action for clone
- n_data->pre = new_notify_pseudo_action(rsc, action, RSC_NOTIFY, "pre");
- pe__set_action_flags(n_data->pre, pe_action_runnable);
+ n_data->pre = new_notify_pseudo_action(rsc, action, PCMK_ACTION_NOTIFY,
+ "pre");
+ pe__set_action_flags(n_data->pre, pcmk_action_runnable);
add_hash_param(n_data->pre->meta, "notify_type", "pre");
add_hash_param(n_data->pre->meta, "notify_operation", n_data->action);
// Create "pre-" notifications complete pseudo-action for clone
- n_data->pre_done = new_notify_pseudo_action(rsc, action, RSC_NOTIFIED,
+ n_data->pre_done = new_notify_pseudo_action(rsc, action,
+ PCMK_ACTION_NOTIFIED,
"confirmed-pre");
- pe__set_action_flags(n_data->pre_done, pe_action_runnable);
+ pe__set_action_flags(n_data->pre_done, pcmk_action_runnable);
add_hash_param(n_data->pre_done->meta, "notify_type", "pre");
add_hash_param(n_data->pre_done->meta,
"notify_operation", n_data->action);
// Order "pre-" -> "pre-" complete -> original action
- order_actions(n_data->pre, n_data->pre_done, pe_order_optional);
- order_actions(n_data->pre_done, action, pe_order_optional);
+ order_actions(n_data->pre, n_data->pre_done, pcmk__ar_ordered);
+ order_actions(n_data->pre_done, action, pcmk__ar_ordered);
}
if (complete != NULL) { // Need "post-" pseudo-actions
// Create "post-" notify pseudo-action for clone
- n_data->post = new_notify_pseudo_action(rsc, complete, RSC_NOTIFY,
- "post");
+ n_data->post = new_notify_pseudo_action(rsc, complete,
+ PCMK_ACTION_NOTIFY, "post");
n_data->post->priority = INFINITY;
- if (pcmk_is_set(complete->flags, pe_action_runnable)) {
- pe__set_action_flags(n_data->post, pe_action_runnable);
+ if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
+ pe__set_action_flags(n_data->post, pcmk_action_runnable);
} else {
- pe__clear_action_flags(n_data->post, pe_action_runnable);
+ pe__clear_action_flags(n_data->post, pcmk_action_runnable);
}
add_hash_param(n_data->post->meta, "notify_type", "post");
add_hash_param(n_data->post->meta, "notify_operation", n_data->action);
// Create "post-" notifications complete pseudo-action for clone
n_data->post_done = new_notify_pseudo_action(rsc, complete,
- RSC_NOTIFIED,
+ PCMK_ACTION_NOTIFIED,
"confirmed-post");
n_data->post_done->priority = INFINITY;
- if (pcmk_is_set(complete->flags, pe_action_runnable)) {
- pe__set_action_flags(n_data->post_done, pe_action_runnable);
+ if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
+ pe__set_action_flags(n_data->post_done, pcmk_action_runnable);
} else {
- pe__clear_action_flags(n_data->post_done, pe_action_runnable);
+ pe__clear_action_flags(n_data->post_done, pcmk_action_runnable);
}
add_hash_param(n_data->post_done->meta, "notify_type", "post");
add_hash_param(n_data->post_done->meta,
"notify_operation", n_data->action);
// Order original action complete -> "post-" -> "post-" complete
- order_actions(complete, n_data->post, pe_order_implies_then);
- order_actions(n_data->post, n_data->post_done, pe_order_implies_then);
+ order_actions(complete, n_data->post, pcmk__ar_first_implies_then);
+ order_actions(n_data->post, n_data->post_done,
+ pcmk__ar_first_implies_then);
}
// If we created both, order "pre-" complete -> "post-"
if ((action != NULL) && (complete != NULL)) {
- order_actions(n_data->pre_done, n_data->post, pe_order_optional);
+ order_actions(n_data->pre_done, n_data->post, pcmk__ar_ordered);
}
return n_data;
}
@@ -514,7 +521,7 @@ pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
* \note The caller is responsible for freeing the return value.
*/
static notify_entry_t *
-new_notify_entry(const pe_resource_t *rsc, const pe_node_t *node)
+new_notify_entry(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
notify_entry_t *entry = calloc(1, sizeof(notify_entry_t));
@@ -533,12 +540,12 @@ new_notify_entry(const pe_resource_t *rsc, const pe_node_t *node)
* \param[in,out] n_data Notification data for clone
*/
static void
-collect_resource_data(const pe_resource_t *rsc, bool activity,
+collect_resource_data(const pcmk_resource_t *rsc, bool activity,
notify_data_t *n_data)
{
const GList *iter = NULL;
notify_entry_t *entry = NULL;
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
if (n_data == NULL) {
return;
@@ -551,7 +558,7 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// If this is a clone, call recursively for each instance
if (rsc->children != NULL) {
for (iter = rsc->children; iter != NULL; iter = iter->next) {
- const pe_resource_t *child = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
collect_resource_data(child, activity, n_data);
}
@@ -567,21 +574,21 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// Add notification indicating the resource state
switch (rsc->role) {
- case RSC_ROLE_STOPPED:
+ case pcmk_role_stopped:
n_data->inactive = g_list_prepend(n_data->inactive, entry);
break;
- case RSC_ROLE_STARTED:
+ case pcmk_role_started:
n_data->active = g_list_prepend(n_data->active, entry);
break;
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_unpromoted:
n_data->unpromoted = g_list_prepend(n_data->unpromoted, entry);
n_data->active = g_list_prepend(n_data->active,
dup_notify_entry(entry));
break;
- case RSC_ROLE_PROMOTED:
+ case pcmk_role_promoted:
n_data->promoted = g_list_prepend(n_data->promoted, entry);
n_data->active = g_list_prepend(n_data->active,
dup_notify_entry(entry));
@@ -601,30 +608,31 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// Add notification entries for each of the resource's actions
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- const pe_action_t *op = (const pe_action_t *) iter->data;
+ const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
- if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ if (!pcmk_is_set(op->flags, pcmk_action_optional)
+ && (op->node != NULL)) {
enum action_tasks task = text2task(op->task);
- if ((task == stop_rsc) && op->node->details->unclean) {
+ if ((task == pcmk_action_stop) && op->node->details->unclean) {
// Create anyway (additional noise if node can't be fenced)
- } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
continue;
}
entry = new_notify_entry(rsc, op->node);
switch (task) {
- case start_rsc:
+ case pcmk_action_start:
n_data->start = g_list_prepend(n_data->start, entry);
break;
- case stop_rsc:
+ case pcmk_action_stop:
n_data->stop = g_list_prepend(n_data->stop, entry);
break;
- case action_promote:
+ case pcmk_action_promote:
n_data->promote = g_list_prepend(n_data->promote, entry);
break;
- case action_demote:
+ case pcmk_action_demote:
n_data->demote = g_list_prepend(n_data->demote, entry);
break;
default:
@@ -661,7 +669,7 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
* \param[in,out] n_data Notification data
*/
static void
-add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
+add_notif_keys(const pcmk_resource_t *rsc, notify_data_t *n_data)
{
bool required = false; // Whether to make notify actions required
GString *rsc_list = NULL;
@@ -673,14 +681,14 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
n_data->stop = notify_entries_to_strings(n_data->stop,
&rsc_list, &node_list);
if ((strcmp(" ", (const char *) rsc_list->str) != 0)
- && pcmk__str_eq(n_data->action, RSC_STOP, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_STOP, pcmk__str_none)) {
required = true;
}
add_notify_env_free_gs(n_data, "notify_stop_resource", rsc_list);
add_notify_env_free_gs(n_data, "notify_stop_uname", node_list);
if ((n_data->start != NULL)
- && pcmk__str_eq(n_data->action, RSC_START, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_START, pcmk__str_none)) {
required = true;
}
n_data->start = notify_entries_to_strings(n_data->start,
@@ -689,7 +697,7 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_start_uname", node_list);
if ((n_data->demote != NULL)
- && pcmk__str_eq(n_data->action, RSC_DEMOTE, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
required = true;
}
n_data->demote = notify_entries_to_strings(n_data->demote,
@@ -698,7 +706,7 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_demote_uname", node_list);
if ((n_data->promote != NULL)
- && pcmk__str_eq(n_data->action, RSC_PROMOTE, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
required = true;
}
n_data->promote = notify_entries_to_strings(n_data->promote,
@@ -755,13 +763,13 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_all_uname", node_list);
if (required && (n_data->pre != NULL)) {
- pe__clear_action_flags(n_data->pre, pe_action_optional);
- pe__clear_action_flags(n_data->pre_done, pe_action_optional);
+ pe__clear_action_flags(n_data->pre, pcmk_action_optional);
+ pe__clear_action_flags(n_data->pre_done, pcmk_action_optional);
}
if (required && (n_data->post != NULL)) {
- pe__clear_action_flags(n_data->post, pe_action_optional);
- pe__clear_action_flags(n_data->post_done, pe_action_optional);
+ pe__clear_action_flags(n_data->post, pcmk_action_optional);
+ pe__clear_action_flags(n_data->post_done, pcmk_action_optional);
}
}
@@ -773,14 +781,15 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
*
* \return If action is behind a remote connection, connection's start
*/
-static pe_action_t *
-find_remote_start(pe_action_t *action)
+static pcmk_action_t *
+find_remote_start(pcmk_action_t *action)
{
if ((action != NULL) && (action->node != NULL)) {
- pe_resource_t *remote_rsc = action->node->details->remote_rsc;
+ pcmk_resource_t *remote_rsc = action->node->details->remote_rsc;
if (remote_rsc != NULL) {
- return find_first_action(remote_rsc->actions, NULL, RSC_START,
+ return find_first_action(remote_rsc->actions, NULL,
+ PCMK_ACTION_START,
NULL);
}
}
@@ -795,11 +804,11 @@ find_remote_start(pe_action_t *action)
* \param[in,out] n_data Clone notification data for some action
*/
static void
-create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
+create_notify_actions(pcmk_resource_t *rsc, notify_data_t *n_data)
{
GList *iter = NULL;
- pe_action_t *stop = NULL;
- pe_action_t *start = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *start = NULL;
enum action_tasks task = text2task(n_data->action);
// If this is a clone, call recursively for each instance
@@ -810,14 +819,15 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
// Add notification meta-attributes to original actions
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *op = (pe_action_t *) iter->data;
+ pcmk_action_t *op = (pcmk_action_t *) iter->data;
- if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ if (!pcmk_is_set(op->flags, pcmk_action_optional)
+ && (op->node != NULL)) {
switch (text2task(op->task)) {
- case start_rsc:
- case stop_rsc:
- case action_promote:
- case action_demote:
+ case pcmk_action_start:
+ case pcmk_action_stop:
+ case pcmk_action_promote:
+ case pcmk_action_demote:
add_notify_data_to_action_meta(n_data, op);
break;
default:
@@ -828,7 +838,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
// Skip notify action itself if original action was not needed
switch (task) {
- case start_rsc:
+ case pcmk_action_start:
if (n_data->start == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -836,7 +846,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
}
break;
- case action_promote:
+ case pcmk_action_promote:
if (n_data->promote == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -844,7 +854,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
}
break;
- case action_demote:
+ case pcmk_action_demote:
if (n_data->demote == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -861,18 +871,19 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
rsc->id, n_data->action);
// Create notify actions for stop or demote
- if ((rsc->role != RSC_ROLE_STOPPED)
- && ((task == stop_rsc) || (task == action_demote))) {
+ if ((rsc->role != pcmk_role_stopped)
+ && ((task == pcmk_action_stop) || (task == pcmk_action_demote))) {
- stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL);
+ stop = find_first_action(rsc->actions, NULL, PCMK_ACTION_STOP, NULL);
for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *current_node = (pe_node_t *) iter->data;
+ pcmk_node_t *current_node = (pcmk_node_t *) iter->data;
/* If a stop is a pseudo-action implied by fencing, don't try to
* notify the node getting fenced.
*/
- if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)
+ if ((stop != NULL)
+ && pcmk_is_set(stop->flags, pcmk_action_pseudo)
&& (current_node->details->unclean
|| current_node->details->remote_requires_reset)) {
continue;
@@ -881,23 +892,23 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
new_notify_action(rsc, current_node, n_data->pre,
n_data->pre_done, n_data);
- if ((task == action_demote) || (stop == NULL)
- || pcmk_is_set(stop->flags, pe_action_optional)) {
+ if ((task == pcmk_action_demote) || (stop == NULL)
+ || pcmk_is_set(stop->flags, pcmk_action_optional)) {
new_post_notify_action(rsc, current_node, n_data);
}
}
}
// Create notify actions for start or promote
- if ((rsc->next_role != RSC_ROLE_STOPPED)
- && ((task == start_rsc) || (task == action_promote))) {
+ if ((rsc->next_role != pcmk_role_stopped)
+ && ((task == pcmk_action_start) || (task == pcmk_action_promote))) {
- start = find_first_action(rsc->actions, NULL, RSC_START, NULL);
+ start = find_first_action(rsc->actions, NULL, PCMK_ACTION_START, NULL);
if (start != NULL) {
- pe_action_t *remote_start = find_remote_start(start);
+ pcmk_action_t *remote_start = find_remote_start(start);
if ((remote_start != NULL)
- && !pcmk_is_set(remote_start->flags, pe_action_runnable)) {
+ && !pcmk_is_set(remote_start->flags, pcmk_action_runnable)) {
/* Start and promote actions for a clone instance behind
* a Pacemaker Remote connection happen after the
* connection starts. If the connection start is blocked, do
@@ -911,8 +922,8 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
role2text(rsc->next_role), rsc->id);
return;
}
- if ((task != start_rsc) || (start == NULL)
- || pcmk_is_set(start->flags, pe_action_optional)) {
+ if ((task != pcmk_action_start) || (start == NULL)
+ || pcmk_is_set(start->flags, pcmk_action_optional)) {
new_notify_action(rsc, rsc->allocated_to, n_data->pre,
n_data->pre_done, n_data);
@@ -929,7 +940,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
* \param[in,out] n_data Clone notification data for some action
*/
void
-pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data)
+pe__create_action_notifications(pcmk_resource_t *rsc, notify_data_t *n_data)
{
if ((rsc == NULL) || (n_data == NULL)) {
return;
@@ -978,13 +989,14 @@ pe__free_action_notification_data(notify_data_t *n_data)
* \param[in,out] stonith_op Fencing action that implies \p stop
*/
void
-pe__order_notifs_after_fencing(const pe_action_t *stop, pe_resource_t *rsc,
- pe_action_t *stonith_op)
+pe__order_notifs_after_fencing(const pcmk_action_t *stop, pcmk_resource_t *rsc,
+ pcmk_action_t *stonith_op)
{
notify_data_t *n_data;
crm_info("Ordering notifications for implied %s after fencing", stop->uuid);
- n_data = pe__action_notif_pseudo_ops(rsc, RSC_STOP, NULL, stonith_op);
+ n_data = pe__action_notif_pseudo_ops(rsc, PCMK_ACTION_STOP, NULL,
+ stonith_op);
if (n_data != NULL) {
collect_resource_data(rsc, false, n_data);
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index 68cc867..65f3c18 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -8,28 +8,31 @@
*/
#include <crm_internal.h>
+
#include <stdint.h>
+
#include <crm/common/xml_internal.h>
#include <crm/common/output.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/cib/util.h>
#include <crm/msg_xml.h>
#include <crm/pengine/internal.h>
const char *
-pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts)
+pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts)
{
const char * desc = NULL;
// User-supplied description
- if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
- || pcmk__list_of_multiple(rsc->running_on)) {
+ if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)) {
desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
}
return desc;
}
/* Never display node attributes whose name starts with one of these prefixes */
-#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
- "shutdown", "terminate", "standby", "#", NULL }
+#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
+ "shutdown", PCMK_NODE_ATTR_TERMINATE, "standby", "#", \
+ NULL }
static int
compare_attribute(gconstpointer a, gconstpointer b)
@@ -47,7 +50,7 @@ compare_attribute(gconstpointer a, gconstpointer b)
*
* \param[in] node Node that ran this resource
* \param[in,out] rsc_list List of resources for this node
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in] attrname Attribute to find
* \param[out] expected_score Expected value for this attribute
*
@@ -57,19 +60,20 @@ compare_attribute(gconstpointer a, gconstpointer b)
* or degraded.
*/
static bool
-add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_set,
- const char *attrname, int *expected_score)
+add_extra_info(const pcmk_node_t *node, GList *rsc_list,
+ pcmk_scheduler_t *scheduler, const char *attrname,
+ int *expected_score)
{
GList *gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
const char *name = NULL;
GHashTable *params = NULL;
if (rsc->children != NULL) {
- if (add_extra_info(node, rsc->children, data_set, attrname,
+ if (add_extra_info(node, rsc->children, scheduler, attrname,
expected_score)) {
return true;
}
@@ -79,7 +83,7 @@ add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_se
continue;
}
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
name = g_hash_table_lookup(params, "name");
if (name == NULL) {
@@ -150,13 +154,15 @@ get_operation_list(xmlNode *rsc_entry) {
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
- if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
/* Ignore notifies and some probes */
- if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
+ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)
+ || (pcmk__str_eq(task, "probe", pcmk__str_none)
+ && (op_rc_i == CRM_EX_NOT_RUNNING))) {
continue;
}
@@ -188,10 +194,10 @@ append_dump_text(gpointer key, gpointer value, gpointer user_data)
}
static const char *
-get_cluster_stack(pe_working_set_t *data_set)
+get_cluster_stack(pcmk_scheduler_t *scheduler)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
@@ -290,7 +296,7 @@ op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
}
static char *
-resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
+resource_history_string(pcmk_resource_t *rsc, const char *rsc_id, bool all,
int failcount, time_t last_failure) {
char *buf = NULL;
@@ -325,27 +331,39 @@ resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
return buf;
}
+/*!
+ * \internal
+ * \brief Get a node's feature set for status display purposes
+ *
+ * \param[in] node Node to check
+ *
+ * \return String representation of feature set if the node is fully up (using
+ * "<3.15.1" for older nodes that don't set the #feature-set attribute),
+ * otherwise NULL
+ */
static const char *
-get_node_feature_set(pe_node_t *node) {
- const char *feature_set = NULL;
+get_node_feature_set(const pcmk_node_t *node)
+{
+ if (node->details->online && node->details->expected_up
+ && !pe__is_guest_or_remote_node(node)) {
- if (node->details->online && !pe__is_guest_or_remote_node(node)) {
- feature_set = g_hash_table_lookup(node->details->attrs,
- CRM_ATTR_FEATURE_SET);
- /* The feature set attribute is present since 3.15.1. If it is missing
- * then the node must be running an earlier version. */
- if (feature_set == NULL) {
- feature_set = "<3.15.1";
- }
+ const char *feature_set = g_hash_table_lookup(node->details->attrs,
+ CRM_ATTR_FEATURE_SET);
+
+ /* The feature set attribute is present since 3.15.1. If it is missing,
+ * then the node must be running an earlier version.
+ */
+ return pcmk__s(feature_set, "<3.15.1");
}
- return feature_set;
+ return NULL;
}
static bool
-is_mixed_version(pe_working_set_t *data_set) {
+is_mixed_version(pcmk_scheduler_t *scheduler)
+{
const char *feature_set = NULL;
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *node = gIter->data;
const char *node_feature_set = get_node_feature_set(node);
if (node_feature_set != NULL) {
if (feature_set == NULL) {
@@ -359,7 +377,7 @@ is_mixed_version(pe_working_set_t *data_set) {
}
static char *
-formatted_xml_buf(pe_resource_t *rsc, bool raw)
+formatted_xml_buf(const pcmk_resource_t *rsc, bool raw)
{
if (raw) {
return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
@@ -368,18 +386,18 @@ formatted_xml_buf(pe_resource_t *rsc, bool raw)
}
}
-PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
static int
cluster_summary(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
- const char *stack_s = get_cluster_stack(data_set);
+ const char *stack_s = get_cluster_stack(scheduler);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
@@ -388,47 +406,52 @@ cluster_summary(pcmk__output_t *out, va_list args) {
if (pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
- const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
- char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
- bool mixed_version = is_mixed_version(data_set);
+ const char *quorum = crm_element_value(scheduler->input,
+ XML_ATTR_HAVE_QUORUM);
+ char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(scheduler);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ out->message(out, "cluster-dc", scheduler->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
- const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
- const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
- const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
- const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+ const char *last_written = crm_element_value(scheduler->input,
+ XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times",
- data_set->localhost, last_written, user, client, origin);
+ scheduler->localhost, last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-counts", g_list_length(data_set->nodes),
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ out->message(out, "cluster-counts", g_list_length(scheduler->nodes),
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-options", data_set);
+ out->message(out, "cluster-options", scheduler);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
- if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -436,18 +459,18 @@ cluster_summary(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
static int
cluster_summary_html(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
- const char *stack_s = get_cluster_stack(data_set);
+ const char *stack_s = get_cluster_stack(scheduler);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
@@ -455,38 +478,44 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
}
/* Always print DC if none, even if not requested */
- if (data_set->dc_node == NULL || pcmk_is_set(section_opts, pcmk_section_dc)) {
+ if ((scheduler->dc_node == NULL)
+ || pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
- const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
- char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
- bool mixed_version = is_mixed_version(data_set);
+ const char *quorum = crm_element_value(scheduler->input,
+ XML_ATTR_HAVE_QUORUM);
+ char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(scheduler);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ out->message(out, "cluster-dc", scheduler->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
- const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
- const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
- const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
- const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+ const char *last_written = crm_element_value(scheduler->input,
+ XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times",
- data_set->localhost, last_written, user, client, origin);
+ scheduler->localhost, last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-counts", g_list_length(data_set->nodes),
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ out->message(out, "cluster-counts", g_list_length(scheduler->nodes),
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
@@ -497,13 +526,13 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
PCMK__OUTPUT_LIST_FOOTER(out, rc);
out->begin_list(out, NULL, NULL, "Config Options");
- out->message(out, "cluster-options", data_set);
+ out->message(out, "cluster-options", scheduler);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
- if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -512,7 +541,7 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
}
char *
-pe__node_display_name(pe_node_t *node, bool print_detail)
+pe__node_display_name(pcmk_node_t *node, bool print_detail)
{
char *node_name;
const char *node_host = NULL;
@@ -523,8 +552,8 @@ pe__node_display_name(pe_node_t *node, bool print_detail)
/* Host is displayed only if this is a guest node and detail is requested */
if (print_detail && pe__is_guest_node(node)) {
- const pe_resource_t *container = node->details->remote_rsc->container;
- const pe_node_t *host_node = pe__current_node(container);
+ const pcmk_resource_t *container = node->details->remote_rsc->container;
+ const pcmk_node_t *host_node = pe__current_node(container);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
@@ -575,9 +604,7 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
xml_node = pcmk__output_xml_peek_parent(out);
CRM_ASSERT(xml_node != NULL);
- xml_node = is_list
- ? create_xml_node(xml_node, tag_name)
- : xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
+ xml_node = create_xml_node(xml_node, tag_name);
va_start(args, pairs_count);
while(pairs_count--) {
@@ -598,20 +625,20 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
static const char *
role_desc(enum rsc_role_e role)
{
- if (role == RSC_ROLE_PROMOTED) {
+ if (role == pcmk_role_promoted) {
#ifdef PCMK__COMPAT_2_0
- return "as " RSC_ROLE_PROMOTED_LEGACY_S " ";
+ return "as " PCMK__ROLE_PROMOTED_LEGACY " ";
#else
- return "in " RSC_ROLE_PROMOTED_S " role ";
+ return "in " PCMK__ROLE_PROMOTED " role ";
#endif
}
return "";
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_html(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -628,10 +655,10 @@ ban_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_text(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -645,14 +672,14 @@ ban_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
- const char *promoted_only = pcmk__btoa(location->role_filter == RSC_ROLE_PROMOTED);
+ const char *promoted_only = pcmk__btoa(location->role_filter == pcmk_role_promoted);
char *weight_s = pcmk__itoa(pe_node->weight);
pcmk__output_create_xml_node(out, "ban",
@@ -674,11 +701,11 @@ ban_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban-list", "pe_working_set_t *", "const char *", "GList *",
+PCMK__OUTPUT_ARGS("ban-list", "pcmk_scheduler_t *", "const char *", "GList *",
"uint32_t", "bool")
static int
ban_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
const char *prefix = va_arg(args, const char *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -688,9 +715,10 @@ ban_list(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
/* Print each ban */
- for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
+ for (gIter = scheduler->placement_constraints;
+ gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
- const pe_resource_t *rsc = location->rsc_lh;
+ const pcmk_resource_t *rsc = location->rsc_lh;
if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) {
continue;
@@ -704,7 +732,7 @@ ban_list(pcmk__output_t *out, va_list args) {
}
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_node_t *node = (pe_node_t *) gIter2->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
if (node->weight < 0) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
@@ -843,11 +871,11 @@ cluster_counts_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_html(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
@@ -881,11 +909,11 @@ cluster_dc_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_text(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
@@ -904,11 +932,11 @@ cluster_dc_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
@@ -937,11 +965,11 @@ static int
cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
unsigned long long flags = va_arg(args, unsigned long long);
- if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(flags, pcmk_sched_in_maintenance)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n");
return pcmk_rc_ok;
- } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(flags, pcmk_sched_stop_all)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will keep all resources stopped\n");
return pcmk_rc_ok;
@@ -950,48 +978,54 @@ cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
}
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_html(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- out->list_item(out, NULL, "STONITH of failed nodes %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ out->list_item(out, NULL, "STONITH of failed nodes enabled");
+ } else {
+ out->list_item(out, NULL, "STONITH of failed nodes disabled");
+ }
- out->list_item(out, NULL, "Cluster is %s",
- pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
+ out->list_item(out, NULL, "Cluster is symmetric");
+ } else {
+ out->list_item(out, NULL, "Cluster is asymmetric");
+ }
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will not attempt to start, stop, or recover services)");
- } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
@@ -1005,50 +1039,56 @@ cluster_options_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_log(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
return out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
- } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
return out->info(out, "Resource management is DISABLED. The cluster has stopped all resources.");
} else {
return pcmk_rc_no_output;
}
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_text(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- out->list_item(out, NULL, "STONITH of failed nodes %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ out->list_item(out, NULL, "STONITH of failed nodes enabled");
+ } else {
+ out->list_item(out, NULL, "STONITH of failed nodes disabled");
+ }
- out->list_item(out, NULL, "Cluster is %s",
- pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
+ out->list_item(out, NULL, "Cluster is symmetric");
+ } else {
+ out->list_item(out, NULL, "Cluster is asymmetric");
+ }
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
@@ -1056,43 +1096,48 @@ cluster_options_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+#define bv(flag) pcmk__btoa(pcmk_is_set(scheduler->flags, (flag)))
+
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_xml(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
const char *no_quorum_policy = NULL;
- char *stonith_timeout_str = pcmk__itoa(data_set->stonith_timeout);
- char *priority_fencing_delay_str = pcmk__itoa(data_set->priority_fencing_delay * 1000);
+ char *stonith_timeout_str = pcmk__itoa(scheduler->stonith_timeout);
+ char *priority_fencing_delay_str = pcmk__itoa(scheduler->priority_fencing_delay * 1000);
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
no_quorum_policy = "freeze";
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
no_quorum_policy = "stop";
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
no_quorum_policy = "demote";
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
no_quorum_policy = "ignore";
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
no_quorum_policy = "suicide";
break;
}
pcmk__output_create_xml_node(out, "cluster_options",
- "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
- "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
+ "stonith-enabled",
+ bv(pcmk_sched_fencing_enabled),
+ "symmetric-cluster",
+ bv(pcmk_sched_symmetric_cluster),
"no-quorum-policy", no_quorum_policy,
- "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
- "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
+ "maintenance-mode",
+ bv(pcmk_sched_in_maintenance),
+ "stop-all-resources", bv(pcmk_sched_stop_all),
"stonith-timeout-ms", stonith_timeout_str,
"priority-fencing-delay-ms", priority_fencing_delay_str,
NULL);
@@ -1288,8 +1333,8 @@ failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op,
pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ",
NULL);
}
- pcmk__g_strcat(str, crm_action_str(task, interval_ms), " on ", node_name,
- NULL);
+ pcmk__g_strcat(str, pcmk__readable_action(task, interval_ms), " on ",
+ node_name, NULL);
if (status == PCMK_EXEC_DONE) {
pcmk__g_strcat(str, " returned '", services_ocf_exitcode_str(rc), "'",
@@ -1496,11 +1541,11 @@ failed_action_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("failed-action-list", "pe_working_set_t *", "GList *",
+PCMK__OUTPUT_ARGS("failed-action-list", "pcmk_scheduler_t *", "GList *",
"GList *", "uint32_t", "bool")
static int
failed_action_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -1509,11 +1554,11 @@ failed_action_list(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = NULL;
int rc = pcmk_rc_no_output;
- if (xmlChildElementCount(data_set->failed) == 0) {
+ if (xmlChildElementCount(scheduler->failed) == 0) {
return rc;
}
- for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
+ for (xml_op = pcmk__xml_first_child(scheduler->failed); xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
char *rsc = NULL;
@@ -1546,7 +1591,7 @@ failed_action_list(pcmk__output_t *out, va_list args) {
}
static void
-status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
+status_node(pcmk_node_t *node, xmlNodePtr parent, uint32_t show_opts)
{
int health = pe__node_health(node);
@@ -1598,11 +1643,11 @@ status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
}
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool",
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool",
"GList *", "GList *")
static int
node_html(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1641,7 +1686,7 @@ node_html(pcmk__output_t *out, va_list args) {
status_node(node, item_node, show_opts);
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc2->data;
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources");
show_opts |= pcmk_show_rsc_only;
@@ -1679,7 +1724,7 @@ node_html(pcmk__output_t *out, va_list args) {
* \return String representation of node's status
*/
static const char *
-node_text_status(const pe_node_t *node)
+node_text_status(const pcmk_node_t *node)
{
if (node->details->unclean) {
if (node->details->online) {
@@ -1723,10 +1768,11 @@ node_text_status(const pe_node_t *node)
return "OFFLINE";
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *",
+ "GList *")
static int
node_text(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1784,7 +1830,7 @@ node_text(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "Resources");
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter2->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
@@ -1809,10 +1855,11 @@ node_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *",
+ "GList *")
static int
node_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1826,10 +1873,10 @@ node_xml(pcmk__output_t *out, va_list args) {
const char *feature_set;
switch (node->details->type) {
- case node_member:
+ case pcmk_node_variant_cluster:
node_type = "member";
break;
- case node_remote:
+ case pcmk_node_variant_remote:
node_type = "remote";
break;
case node_ping:
@@ -1873,7 +1920,7 @@ node_xml(pcmk__output_t *out, va_list args) {
GList *lpc = NULL;
for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
@@ -1959,13 +2006,13 @@ node_attribute_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr")
static int
node_and_op(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
gchar *node_str = NULL;
char *last_change_str = NULL;
@@ -1976,10 +2023,10 @@ node_and_op(pcmk__output_t *out, va_list args) {
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
&status, PCMK_EXEC_UNKNOWN);
- rsc = pe_find_resource(data_set->resources, op_rsc);
+ rsc = pe_find_resource(scheduler->resources, op_rsc);
if (rsc) {
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending;
@@ -2014,13 +2061,13 @@ node_and_op(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr")
static int
node_and_op_xml(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *op_rsc = crm_element_value(xml_op, "resource");
int status;
time_t last_change = 0;
@@ -2036,7 +2083,7 @@ node_and_op_xml(pcmk__output_t *out, va_list args) {
"status", pcmk_exec_status_str(status),
NULL);
- rsc = pe_find_resource(data_set->resources, op_rsc);
+ rsc = pe_find_resource(scheduler->resources, op_rsc);
if (rsc) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -2086,11 +2133,11 @@ node_attribute_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-attribute-list", "pe_working_set_t *", "uint32_t",
+PCMK__OUTPUT_ARGS("node-attribute-list", "pcmk_scheduler_t *", "uint32_t",
"bool", "GList *", "GList *")
static int
node_attribute_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -2099,8 +2146,8 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
/* Display each node's attributes */
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *node = gIter->data;
GList *attr_list = NULL;
GHashTableIter iter;
@@ -2137,7 +2184,7 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
value = pe_node_attribute_raw(node, name);
add_extra = add_extra_info(node, node->details->running_rsc,
- data_set, name, &expected_score);
+ scheduler, name, &expected_score);
/* Print attribute name and value */
out->message(out, "node-attribute", name, value, add_extra,
@@ -2152,11 +2199,11 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *")
static int
node_capacity(pcmk__output_t *out, va_list args)
{
- const pe_node_t *node = va_arg(args, pe_node_t *);
+ const pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *comment = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s capacity:",
@@ -2169,11 +2216,11 @@ node_capacity(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *")
static int
node_capacity_xml(pcmk__output_t *out, va_list args)
{
- const pe_node_t *node = va_arg(args, pe_node_t *);
+ const pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *comment = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity",
@@ -2185,12 +2232,12 @@ node_capacity_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-history-list", "pe_working_set_t *", "pe_node_t *", "xmlNodePtr",
- "GList *", "GList *", "uint32_t", "uint32_t")
+PCMK__OUTPUT_ARGS("node-history-list", "pcmk_scheduler_t *", "pcmk_node_t *",
+ "xmlNodePtr", "GList *", "GList *", "uint32_t", "uint32_t")
static int
node_history_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
xmlNode *node_state = va_arg(args, xmlNode *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -2208,8 +2255,8 @@ node_history_list(pcmk__output_t *out, va_list args) {
for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
- pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id);
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
/* We can't use is_filtered here to filter group resources. For is_filtered,
* we have to decide whether to check the parent or not. If we check the
@@ -2219,7 +2266,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
*
* For other resource types, is_filtered is okay.
*/
- if (parent->variant == pe_group) {
+ if (parent->variant == pcmk_rsc_variant_group) {
if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
pcmk__str_star_matches)
&& !pcmk__str_in_list(rsc_printable_id(parent), only_rsc,
@@ -2234,8 +2281,8 @@ node_history_list(pcmk__output_t *out, va_list args) {
if (!pcmk_is_set(section_opts, pcmk_section_operations)) {
time_t last_failure = 0;
- int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
- NULL);
+ int failcount = pe_get_failcount(node, rsc, &last_failure,
+ pcmk__fc_default, NULL);
if (failcount <= 0) {
continue;
@@ -2251,7 +2298,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
failcount, last_failure, false);
} else {
GList *op_list = get_operation_list(rsc_entry);
- pe_resource_t *rsc = pe_find_resource(data_set->resources,
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources,
crm_element_value(rsc_entry, XML_ATTR_ID));
if (op_list == NULL) {
@@ -2264,7 +2311,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
only_rsc);
}
- out->message(out, "resource-operation-list", data_set, rsc, node,
+ out->message(out, "resource-operation-list", scheduler, rsc, node,
op_list, show_opts);
}
}
@@ -2285,7 +2332,7 @@ node_list_html(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -2320,7 +2367,7 @@ node_list_text(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
if (!pcmk__str_in_list(node->details->uname, only_node,
@@ -2416,7 +2463,7 @@ node_list_xml(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "nodes");
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -2430,11 +2477,11 @@ node_list_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-summary", "pe_working_set_t *", "GList *", "GList *",
+PCMK__OUTPUT_ARGS("node-summary", "pcmk_scheduler_t *", "GList *", "GList *",
"uint32_t", "uint32_t", "bool")
static int
node_summary(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t section_opts = va_arg(args, uint32_t);
@@ -2442,7 +2489,7 @@ node_summary(pcmk__output_t *out, va_list args) {
bool print_spacer = va_arg(args, int);
xmlNode *node_state = NULL;
- xmlNode *cib_status = pcmk_find_cib_element(data_set->input,
+ xmlNode *cib_status = pcmk_find_cib_element(scheduler->input,
XML_CIB_TAG_STATUS);
int rc = pcmk_rc_no_output;
@@ -2452,7 +2499,7 @@ node_summary(pcmk__output_t *out, va_list args) {
for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE);
node_state != NULL; node_state = crm_next_same_xml(node_state)) {
- pe_node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
+ pcmk_node_t *node = pe_find_node_id(scheduler->nodes, ID(node_state));
if (!node || !node->details || !node->details->online) {
continue;
@@ -2466,7 +2513,7 @@ node_summary(pcmk__output_t *out, va_list args) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc,
pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary");
- out->message(out, "node-history-list", data_set, node, node_state,
+ out->message(out, "node-history-list", scheduler, node, node_state,
only_node, only_rsc, section_opts, show_opts);
}
@@ -2474,12 +2521,12 @@ node_summary(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *",
"const char *", "const char *")
static int
node_weight(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
@@ -2494,12 +2541,12 @@ node_weight(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *",
"const char *", "const char *")
static int
node_weight_xml(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
@@ -2587,12 +2634,13 @@ op_history_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
promotion_score(pcmk__output_t *out, va_list args)
{
- pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
- pe_node_t *chosen = va_arg(args, pe_node_t *);
+ pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *chosen = va_arg(args, pcmk_node_t *);
const char *score = va_arg(args, const char *);
out->list_item(out, NULL, "%s promotion score on %s: %s",
@@ -2602,12 +2650,13 @@ promotion_score(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
promotion_score_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
- pe_node_t *chosen = va_arg(args, pe_node_t *);
+ pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *chosen = va_arg(args, pcmk_node_t *);
const char *score = va_arg(args, const char *);
xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score",
@@ -2622,10 +2671,10 @@ promotion_score_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool")
static int
resource_config(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
@@ -2636,10 +2685,10 @@ resource_config(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool")
static int
resource_config_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
@@ -2651,10 +2700,11 @@ resource_config_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *",
+ "bool", "int", "time_t", "bool")
static int
resource_history_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
@@ -2673,10 +2723,11 @@ resource_history_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *",
+ "bool", "int", "time_t", "bool")
static int
resource_history_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
@@ -2733,12 +2784,12 @@ print_resource_header(pcmk__output_t *out, uint32_t show_opts)
}
-PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "uint32_t", "bool",
+PCMK__OUTPUT_ARGS("resource-list", "pcmk_scheduler_t *", "uint32_t", "bool",
"GList *", "GList *", "bool")
static int
resource_list(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_summary = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -2759,8 +2810,9 @@ resource_list(pcmk__output_t *out, va_list args)
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
- if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
- GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
+ if (pcmk_is_set(show_opts, pcmk_show_brief)
+ && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ GList *rscs = pe__filter_rsc_list(scheduler->resources, only_rsc);
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
print_resource_header(out, show_opts);
@@ -2771,8 +2823,8 @@ resource_list(pcmk__output_t *out, va_list args)
}
/* For each resource, display it if appropriate */
- for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
+ for (rsc_iter = scheduler->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) rsc_iter->data;
int x;
/* Complex resources may have some sub-resources active and some inactive */
@@ -2780,7 +2832,7 @@ resource_list(pcmk__output_t *out, va_list args)
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
@@ -2790,7 +2842,8 @@ resource_list(pcmk__output_t *out, va_list args)
}
/* Skip primitives already counted in a brief summary */
- } else if (pcmk_is_set(show_opts, pcmk_show_brief) && (rsc->variant == pe_native)) {
+ } else if (pcmk_is_set(show_opts, pcmk_show_brief)
+ && (rsc->variant == pcmk_rsc_variant_primitive)) {
continue;
/* Skip resources that aren't at least partially active,
@@ -2840,14 +2893,15 @@ resource_list(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("resource-operation-list", "pe_working_set_t *", "pe_resource_t *",
- "pe_node_t *", "GList *", "uint32_t")
+PCMK__OUTPUT_ARGS("resource-operation-list", "pcmk_scheduler_t *",
+ "pcmk_resource_t *", "pcmk_node_t *", "GList *", "uint32_t")
static int
resource_operation_list(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_scheduler_t *scheduler G_GNUC_UNUSED = va_arg(args,
+ pcmk_scheduler_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
GList *op_list = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -2866,7 +2920,7 @@ resource_operation_list(pcmk__output_t *out, va_list args)
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
- if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
@@ -2874,8 +2928,8 @@ resource_operation_list(pcmk__output_t *out, va_list args)
/* If this is the first printed operation, print heading for resource */
if (rc == pcmk_rc_no_output) {
time_t last_failure = 0;
- int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
- NULL);
+ int failcount = pe_get_failcount(node, rsc, &last_failure,
+ pcmk__fc_default, NULL);
out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true,
failcount, last_failure, true);
@@ -2894,12 +2948,13 @@ resource_operation_list(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
resource_util(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *fn = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
@@ -2912,12 +2967,13 @@ resource_util(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
resource_util_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *fn = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization",
@@ -2930,10 +2986,10 @@ resource_util_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_html(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
if (ticket->last_granted > -1) {
char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
@@ -2952,10 +3008,10 @@ ticket_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_text(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
if (ticket->last_granted > -1) {
char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
@@ -2974,10 +3030,10 @@ ticket_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_xml(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
xmlNodePtr node = NULL;
@@ -2997,16 +3053,16 @@ ticket_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket-list", "pe_working_set_t *", "bool")
+PCMK__OUTPUT_ARGS("ticket-list", "pcmk_scheduler_t *", "bool")
static int
ticket_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
bool print_spacer = va_arg(args, int);
GHashTableIter iter;
gpointer key, value;
- if (g_hash_table_size(data_set->tickets) == 0) {
+ if (g_hash_table_size(scheduler->tickets) == 0) {
return pcmk_rc_no_output;
}
@@ -3016,9 +3072,9 @@ ticket_list(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "Tickets");
/* Print each ticket */
- g_hash_table_iter_init(&iter, data_set->tickets);
+ g_hash_table_iter_init(&iter, scheduler->tickets);
while (g_hash_table_iter_next(&iter, &key, &value)) {
- pe_ticket_t *ticket = (pe_ticket_t *) value;
+ pcmk_ticket_t *ticket = (pcmk_ticket_t *) value;
out->message(out, "ticket", ticket);
}
diff --git a/lib/pengine/pe_status_private.h b/lib/pengine/pe_status_private.h
index ae8d131..bb0ee4e 100644
--- a/lib/pengine/pe_status_private.h
+++ b/lib/pengine/pe_status_private.h
@@ -19,6 +19,11 @@
#define G_GNUC_INTERNAL
#endif
+#include <glib.h> // GSList, GList, GHashTable
+#include <libxml/tree.h> // xmlNode
+
+#include <crm/pengine/status.h> // pcmk_action_t, pcmk_resource_t, etc.
+
/*!
* \internal
* \deprecated This macro will be removed in a future release
@@ -43,10 +48,10 @@ typedef struct notify_data_s {
const char *action;
- pe_action_t *pre;
- pe_action_t *post;
- pe_action_t *pre_done;
- pe_action_t *post_done;
+ pcmk_action_t *pre;
+ pcmk_action_t *post;
+ pcmk_action_t *pre_done;
+ pcmk_action_t *post_done;
GList *active; /* notify_entry_t* */
GList *inactive; /* notify_entry_t* */
@@ -60,62 +65,86 @@ typedef struct notify_data_s {
} notify_data_t;
G_GNUC_INTERNAL
-pe_resource_t *pe__create_clone_child(pe_resource_t *rsc,
- pe_working_set_t *data_set);
+pcmk_resource_t *pe__create_clone_child(pcmk_resource_t *rsc,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data);
+void pe__create_action_notifications(pcmk_resource_t *rsc,
+ notify_data_t *n_data);
G_GNUC_INTERNAL
void pe__free_action_notification_data(notify_data_t *n_data);
G_GNUC_INTERNAL
-notify_data_t *pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
- pe_action_t *action,
- pe_action_t *complete);
+notify_data_t *pe__action_notif_pseudo_ops(pcmk_resource_t *rsc,
+ const char *task,
+ pcmk_action_t *action,
+ pcmk_action_t *complete);
G_GNUC_INTERNAL
-void pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
- pe_working_set_t *data_set);
+void pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gint pe__cmp_rsc_priority(gconstpointer a, gconstpointer b);
G_GNUC_INTERNAL
-gboolean pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
- pe_resource_t *parent, pe_working_set_t *data_set);
+gboolean pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
+ pcmk_resource_t *parent,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_remote_nodes(xmlNode *xml_resources, pe_working_set_t *data_set);
+gboolean unpack_remote_nodes(xmlNode *xml_resources,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_resources(const xmlNode *xml_resources,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_config(xmlNode *config, pe_working_set_t *data_set);
+gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_nodes(xmlNode *xml_nodes, pe_working_set_t *data_set);
+gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_tags(xmlNode *xml_tags, pe_working_set_t *data_set);
+gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_status(xmlNode *status, pe_working_set_t *data_set);
+gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-op_digest_cache_t *pe__compare_fencing_digest(pe_resource_t *rsc,
+op_digest_cache_t *pe__compare_fencing_digest(pcmk_resource_t *rsc,
const char *agent,
- pe_node_t *node,
- pe_working_set_t *data_set);
+ pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler);
+
+G_GNUC_INTERNAL
+void pe__unpack_node_health_scores(pcmk_scheduler_t *scheduler);
+
+// Primitive resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__primitive_max_per_node(const pcmk_resource_t *rsc);
+
+// Group resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__group_max_per_node(const pcmk_resource_t *rsc);
+
+// Clone resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__clone_max_per_node(const pcmk_resource_t *rsc);
+
+// Bundle resource methods
G_GNUC_INTERNAL
-void pe__unpack_node_health_scores(pe_working_set_t *data_set);
+pcmk_node_t *pe__bundle_active_node(const pcmk_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
G_GNUC_INTERNAL
-pe_node_t *pe__bundle_active_node(const pe_resource_t *rsc,
- unsigned int *count_all,
- unsigned int *count_clean);
+unsigned int pe__bundle_max_per_node(const pcmk_resource_t *rsc);
#endif // PE_STATUS_PRIVATE__H
diff --git a/lib/pengine/remote.c b/lib/pengine/remote.c
index 769635f..6b5058c 100644
--- a/lib/pengine/remote.c
+++ b/lib/pengine/remote.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2013-2022 the Pacemaker project contributors
+ * Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,41 +10,41 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
#include <glib.h>
bool
-pe__resource_is_remote_conn(const pe_resource_t *rsc,
- const pe_working_set_t *data_set)
+pe__resource_is_remote_conn(const pcmk_resource_t *rsc)
{
return (rsc != NULL) && rsc->is_remote_node
- && pe__is_remote_node(pe_find_node(data_set->nodes, rsc->id));
+ && pe__is_remote_node(pe_find_node(rsc->cluster->nodes, rsc->id));
}
bool
-pe__is_remote_node(const pe_node_t *node)
+pe__is_remote_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote)
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote)
&& ((node->details->remote_rsc == NULL)
|| (node->details->remote_rsc->container == NULL));
}
bool
-pe__is_guest_node(const pe_node_t *node)
+pe__is_guest_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote)
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote)
&& (node->details->remote_rsc != NULL)
&& (node->details->remote_rsc->container != NULL);
}
bool
-pe__is_guest_or_remote_node(const pe_node_t *node)
+pe__is_guest_or_remote_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote);
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote);
}
bool
-pe__is_bundle_node(const pe_node_t *node)
+pe__is_bundle_node(const pcmk_node_t *node)
{
return pe__is_guest_node(node)
&& pe_rsc_is_bundled(node->details->remote_rsc);
@@ -57,20 +57,20 @@ pe__is_bundle_node(const pe_node_t *node)
* If a given resource contains a filler resource that is a remote connection,
* return that filler resource (or NULL if none is found).
*
- * \param[in] data_set Working set of cluster
- * \param[in] rsc Resource to check
+ * \param[in] scheduler Scheduler data
+ * \param[in] rsc Resource to check
*
* \return Filler resource with remote connection, or NULL if none found
*/
-pe_resource_t *
-pe__resource_contains_guest_node(const pe_working_set_t *data_set,
- const pe_resource_t *rsc)
+pcmk_resource_t *
+pe__resource_contains_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_resource_t *rsc)
{
- if ((rsc != NULL) && (data_set != NULL)
- && pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ if ((rsc != NULL) && (scheduler != NULL)
+ && pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
for (GList *gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *filler = gIter->data;
+ pcmk_resource_t *filler = gIter->data;
if (filler->is_remote_node) {
return filler;
@@ -111,26 +111,28 @@ xml_contains_remote_node(xmlNode *xml)
* \internal
* \brief Execute a supplied function for each guest node running on a host
*
- * \param[in] data_set Working set for cluster
+ * \param[in] scheduler Scheduler data
* \param[in] host Host node to check
* \param[in] helper Function to call for each guest node
* \param[in,out] user_data Pointer to pass to helper function
*/
void
-pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
- void (*helper)(const pe_node_t*, void*), void *user_data)
+pe_foreach_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_node_t *host,
+ void (*helper)(const pcmk_node_t*, void*),
+ void *user_data)
{
GList *iter;
- CRM_CHECK(data_set && host && host->details && helper, return);
- if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ CRM_CHECK(scheduler && host && host->details && helper, return);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
return;
}
for (iter = host->details->running_rsc; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->is_remote_node && (rsc->container != NULL)) {
- pe_node_t *guest_node = pe_find_node(data_set->nodes, rsc->id);
+ pcmk_node_t *guest_node = pe_find_node(scheduler->nodes, rsc->id);
if (guest_node) {
(*helper)(guest_node, user_data);
@@ -203,29 +205,30 @@ pe_create_remote_xml(xmlNode *parent, const char *uname,
// Add operations
xml_sub = create_xml_node(remote, "operations");
- crm_create_op_xml(xml_sub, uname, "monitor", "30s", "30s");
+ crm_create_op_xml(xml_sub, uname, PCMK_ACTION_MONITOR, "30s", "30s");
if (start_timeout) {
- crm_create_op_xml(xml_sub, uname, "start", "0", start_timeout);
+ crm_create_op_xml(xml_sub, uname, PCMK_ACTION_START, "0",
+ start_timeout);
}
return remote;
}
// History entry to be checked for fail count clearing
struct check_op {
- const xmlNode *rsc_op; // History entry XML
- pe_resource_t *rsc; // Known resource corresponding to history entry
- pe_node_t *node; // Known node corresponding to history entry
- enum pe_check_parameters check_type; // What needs checking
+ const xmlNode *rsc_op; // History entry XML
+ pcmk_resource_t *rsc; // Known resource corresponding to history entry
+ pcmk_node_t *node; // Known node corresponding to history entry
+ enum pcmk__check_parameters check_type; // What needs checking
};
void
-pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
- pe_node_t *node, enum pe_check_parameters flag,
- pe_working_set_t *data_set)
+pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
+ pcmk_node_t *node, enum pcmk__check_parameters flag,
+ pcmk_scheduler_t *scheduler)
{
struct check_op *check_op = NULL;
- CRM_CHECK(data_set && rsc_op && rsc && node, return);
+ CRM_CHECK(scheduler && rsc_op && rsc && node, return);
check_op = calloc(1, sizeof(struct check_op));
CRM_ASSERT(check_op != NULL);
@@ -235,24 +238,25 @@ pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
check_op->rsc = rsc;
check_op->node = node;
check_op->check_type = flag;
- data_set->param_check = g_list_prepend(data_set->param_check, check_op);
+ scheduler->param_check = g_list_prepend(scheduler->param_check, check_op);
}
/*!
* \internal
* \brief Call a function for each action to be checked for addr substitution
*
- * \param[in,out] data_set Working set for cluster
- * \param[in] cb Function to be called
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] cb Function to be called
*/
void
-pe__foreach_param_check(pe_working_set_t *data_set,
- void (*cb)(pe_resource_t*, pe_node_t*, const xmlNode*,
- enum pe_check_parameters))
+pe__foreach_param_check(pcmk_scheduler_t *scheduler,
+ void (*cb)(pcmk_resource_t*, pcmk_node_t*,
+ const xmlNode*, enum pcmk__check_parameters))
{
- CRM_CHECK(data_set && cb, return);
+ CRM_CHECK(scheduler && cb, return);
- for (GList *item = data_set->param_check; item != NULL; item = item->next) {
+ for (GList *item = scheduler->param_check;
+ item != NULL; item = item->next) {
struct check_op *check_op = item->data;
cb(check_op->rsc, check_op->node, check_op->rsc_op,
@@ -261,10 +265,10 @@ pe__foreach_param_check(pe_working_set_t *data_set,
}
void
-pe__free_param_checks(pe_working_set_t *data_set)
+pe__free_param_checks(pcmk_scheduler_t *scheduler)
{
- if (data_set && data_set->param_check) {
- g_list_free_full(data_set->param_check, free);
- data_set->param_check = NULL;
+ if (scheduler && scheduler->param_check) {
+ g_list_free_full(scheduler->param_check, free);
+ scheduler->param_check = NULL;
}
}
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 7021d3c..50f9f64 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -41,7 +41,7 @@ pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -104,25 +104,23 @@ pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
enum expression_type
find_expression_type(xmlNode * expr)
{
- const char *tag = NULL;
const char *attr = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
- tag = crm_element_name(expr);
- if (pcmk__str_eq(tag, PCMK_XE_DATE_EXPRESSION, pcmk__str_none)) {
+ if (pcmk__xe_is(expr, PCMK_XE_DATE_EXPRESSION)) {
return time_expr;
- } else if (pcmk__str_eq(tag, PCMK_XE_RSC_EXPRESSION, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, PCMK_XE_RSC_EXPRESSION)) {
return rsc_expr;
- } else if (pcmk__str_eq(tag, PCMK_XE_OP_EXPRESSION, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, PCMK_XE_OP_EXPRESSION)) {
return op_expr;
- } else if (pcmk__str_eq(tag, XML_TAG_RULE, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, XML_TAG_RULE)) {
return nested_rule;
- } else if (!pcmk__str_eq(tag, XML_TAG_EXPRESSION, pcmk__str_none)) {
+ } else if (!pcmk__xe_is(expr, XML_TAG_EXPRESSION)) {
return not_expr;
} else if (pcmk__str_any_of(attr, CRM_ATTR_UNAME, CRM_ATTR_KIND, CRM_ATTR_ID, NULL)) {
@@ -320,6 +318,7 @@ typedef struct sorted_set_s {
const char *name; // This block's ID
const char *special_name; // ID that should sort first
xmlNode *attr_set; // This block
+ gboolean overwrite; // Whether existing values will be overwritten
} sorted_set_t;
static gint
@@ -343,10 +342,14 @@ sort_pairs(gconstpointer a, gconstpointer b)
return 1;
}
+ /* If we're overwriting values, we want lowest score first, so the highest
+ * score is processed last; if we're not overwriting values, we want highest
+ * score first, so nothing else overwrites it.
+ */
if (pair_a->score < pair_b->score) {
- return 1;
+ return pair_a->overwrite? -1 : 1;
} else if (pair_a->score > pair_b->score) {
- return -1;
+ return pair_a->overwrite? 1 : -1;
}
return 0;
}
@@ -360,8 +363,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN
xmlNode *list = nvpair_list;
xmlNode *an_attr = NULL;
- name = crm_element_name(list->children);
- if (pcmk__str_eq(XML_TAG_ATTRS, name, pcmk__str_casei)) {
+ if (pcmk__xe_is(list->children, XML_TAG_ATTRS)) {
list = list->children;
}
@@ -446,7 +448,7 @@ unpack_attr_set(gpointer data, gpointer user_data)
*/
static GList *
make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
- const char *always_first)
+ const char *always_first, gboolean overwrite)
{
GList *unsorted = NULL;
@@ -471,6 +473,7 @@ make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
pair->name = ID(expanded_attr_set);
pair->special_name = always_first;
pair->attr_set = expanded_attr_set;
+ pair->overwrite = overwrite;
score = crm_element_value(expanded_attr_set, XML_RULE_ATTR_SCORE);
pair->score = char2score(score);
@@ -499,7 +502,7 @@ pe_eval_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
const char *always_first, gboolean overwrite,
crm_time_t *next_change)
{
- GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
+ GList *pairs = make_pairs(top, xml_obj, set_name, always_first, overwrite);
if (pairs) {
unpack_data_t data = {
@@ -536,7 +539,7 @@ pe_unpack_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -1161,7 +1164,7 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
const char *op = NULL;
const char *value = NULL;
- if (rule_data->role == RSC_ROLE_UNKNOWN) {
+ if (rule_data->role == pcmk_role_unknown) {
return accept;
}
@@ -1169,13 +1172,13 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
- if (rule_data->role > RSC_ROLE_STARTED) {
+ if (rule_data->role > pcmk_role_started) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
- if ((rule_data->role > RSC_ROLE_UNKNOWN)
- && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ if ((rule_data->role > pcmk_role_unknown)
+ && (rule_data->role < pcmk_role_unpromoted)) {
accept = TRUE;
}
@@ -1186,8 +1189,8 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
// Test "ne" only with promotable clone roles
- if ((rule_data->role > RSC_ROLE_UNKNOWN)
- && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ if ((rule_data->role > pcmk_role_unknown)
+ && (rule_data->role < pcmk_role_unpromoted)) {
accept = FALSE;
} else if (text2role(value) != rule_data->role) {
@@ -1301,7 +1304,7 @@ unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
diff --git a/lib/pengine/rules_alerts.c b/lib/pengine/rules_alerts.c
index 073b0c1..9eed7ff 100644
--- a/lib/pengine/rules_alerts.c
+++ b/lib/pengine/rules_alerts.c
@@ -123,21 +123,16 @@ unpack_alert_filter(xmlNode *basenode, pcmk__alert_t *entry)
for (event_type = pcmk__xe_first_child(select); event_type != NULL;
event_type = pcmk__xe_next(event_type)) {
- const char *tagname = crm_element_name(event_type);
-
- if (tagname == NULL) {
- continue;
-
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_FENCING)) {
+ if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_FENCING)) {
flags |= pcmk__alert_fencing;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_NODES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_NODES)) {
flags |= pcmk__alert_node;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_RESOURCES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_RESOURCES)) {
flags |= pcmk__alert_resource;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_ATTRIBUTES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_ATTRIBUTES)) {
xmlNode *attr;
const char *attr_name;
int nattrs = 0;
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index b1144eb..e6ec237 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -21,38 +21,38 @@
#include <pe_status_private.h>
/*!
- * \brief Create a new working set
+ * \brief Create a new object to hold scheduler data
*
- * \return New, initialized working set on success, else NULL (and set errno)
- * \note Only pe_working_set_t objects created with this function (as opposed
+ * \return New, initialized scheduler data on success, else NULL (and set errno)
+ * \note Only pcmk_scheduler_t objects created with this function (as opposed
* to statically declared or directly allocated) should be used with the
* functions in this library, to allow for future extensions to the
* data type. The caller is responsible for freeing the memory with
* pe_free_working_set() when the instance is no longer needed.
*/
-pe_working_set_t *
+pcmk_scheduler_t *
pe_new_working_set(void)
{
- pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+ pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
- if (data_set != NULL) {
- set_working_set_defaults(data_set);
+ if (scheduler != NULL) {
+ set_working_set_defaults(scheduler);
}
- return data_set;
+ return scheduler;
}
/*!
- * \brief Free a working set
+ * \brief Free scheduler data
*
- * \param[in,out] data_set Working set to free
+ * \param[in,out] scheduler Scheduler data to free
*/
void
-pe_free_working_set(pe_working_set_t *data_set)
+pe_free_working_set(pcmk_scheduler_t *scheduler)
{
- if (data_set != NULL) {
- pe_reset_working_set(data_set);
- data_set->priv = NULL;
- free(data_set);
+ if (scheduler != NULL) {
+ pe_reset_working_set(scheduler);
+ scheduler->priv = NULL;
+ free(scheduler);
}
}
@@ -68,105 +68,105 @@ pe_free_working_set(pe_working_set_t *data_set)
* - A list of the possible stop/start actions (without dependencies)
*/
gboolean
-cluster_status(pe_working_set_t * data_set)
+cluster_status(pcmk_scheduler_t * scheduler)
{
xmlNode *section = NULL;
- if ((data_set == NULL) || (data_set->input == NULL)) {
+ if ((scheduler == NULL) || (scheduler->input == NULL)) {
return FALSE;
}
crm_trace("Beginning unpack");
- if (data_set->failed != NULL) {
- free_xml(data_set->failed);
+ if (scheduler->failed != NULL) {
+ free_xml(scheduler->failed);
}
- data_set->failed = create_xml_node(NULL, "failed-ops");
+ scheduler->failed = create_xml_node(NULL, "failed-ops");
- if (data_set->now == NULL) {
- data_set->now = crm_time_new(NULL);
+ if (scheduler->now == NULL) {
+ scheduler->now = crm_time_new(NULL);
}
- if (data_set->dc_uuid == NULL) {
- data_set->dc_uuid = crm_element_value_copy(data_set->input,
- XML_ATTR_DC_UUID);
+ if (scheduler->dc_uuid == NULL) {
+ scheduler->dc_uuid = crm_element_value_copy(scheduler->input,
+ XML_ATTR_DC_UUID);
}
- if (pcmk__xe_attr_is_true(data_set->input, XML_ATTR_HAVE_QUORUM)) {
- pe__set_working_set_flags(data_set, pe_flag_have_quorum);
+ if (pcmk__xe_attr_is_true(scheduler->input, XML_ATTR_HAVE_QUORUM)) {
+ pe__set_working_set_flags(scheduler, pcmk_sched_quorate);
} else {
- pe__clear_working_set_flags(data_set, pe_flag_have_quorum);
+ pe__clear_working_set_flags(scheduler, pcmk_sched_quorate);
}
- data_set->op_defaults = get_xpath_object("//" XML_CIB_TAG_OPCONFIG,
- data_set->input, LOG_NEVER);
- data_set->rsc_defaults = get_xpath_object("//" XML_CIB_TAG_RSCCONFIG,
- data_set->input, LOG_NEVER);
+ scheduler->op_defaults = get_xpath_object("//" XML_CIB_TAG_OPCONFIG,
+ scheduler->input, LOG_NEVER);
+ scheduler->rsc_defaults = get_xpath_object("//" XML_CIB_TAG_RSCCONFIG,
+ scheduler->input, LOG_NEVER);
- section = get_xpath_object("//" XML_CIB_TAG_CRMCONFIG, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_CRMCONFIG, scheduler->input,
LOG_TRACE);
- unpack_config(section, data_set);
+ unpack_config(section, scheduler);
- if (!pcmk_any_flags_set(data_set->flags,
- pe_flag_quick_location|pe_flag_have_quorum)
- && (data_set->no_quorum_policy != no_quorum_ignore)) {
+ if (!pcmk_any_flags_set(scheduler->flags,
+ pcmk_sched_location_only|pcmk_sched_quorate)
+ && (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) {
crm_warn("Fencing and resource management disabled due to lack of quorum");
}
- section = get_xpath_object("//" XML_CIB_TAG_NODES, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_NODES, scheduler->input,
LOG_TRACE);
- unpack_nodes(section, data_set);
+ unpack_nodes(section, scheduler);
- section = get_xpath_object("//" XML_CIB_TAG_RESOURCES, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_RESOURCES, scheduler->input,
LOG_TRACE);
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
- unpack_remote_nodes(section, data_set);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
+ unpack_remote_nodes(section, scheduler);
}
- unpack_resources(section, data_set);
+ unpack_resources(section, scheduler);
- section = get_xpath_object("//" XML_CIB_TAG_TAGS, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_TAGS, scheduler->input,
LOG_NEVER);
- unpack_tags(section, data_set);
+ unpack_tags(section, scheduler);
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
- section = get_xpath_object("//"XML_CIB_TAG_STATUS, data_set->input,
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
+ section = get_xpath_object("//"XML_CIB_TAG_STATUS, scheduler->input,
LOG_TRACE);
- unpack_status(section, data_set);
+ unpack_status(section, scheduler);
}
- if (!pcmk_is_set(data_set->flags, pe_flag_no_counts)) {
- for (GList *item = data_set->resources; item != NULL;
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_no_counts)) {
+ for (GList *item = scheduler->resources; item != NULL;
item = item->next) {
- ((pe_resource_t *) (item->data))->fns->count(item->data);
+ ((pcmk_resource_t *) (item->data))->fns->count(item->data);
}
crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
- pe__set_working_set_flags(data_set, pe_flag_have_status);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_status);
return TRUE;
}
/*!
* \internal
- * \brief Free a list of pe_resource_t
+ * \brief Free a list of pcmk_resource_t
*
* \param[in,out] resources List to free
*
- * \note When a working set's resource list is freed, that includes the original
+ * \note When the scheduler's resource list is freed, that includes the original
* storage for the uname and id of any Pacemaker Remote nodes in the
- * working set's node list, so take care not to use those afterward.
- * \todo Refactor pe_node_t to strdup() the node name.
+ * scheduler's node list, so take care not to use those afterward.
+ * \todo Refactor pcmk_node_t to strdup() the node name.
*/
static void
pe_free_resources(GList *resources)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
GList *iterator = resources;
while (iterator != NULL) {
- rsc = (pe_resource_t *) iterator->data;
+ rsc = (pcmk_resource_t *) iterator->data;
iterator = iterator->next;
rsc->fns->free(rsc);
}
@@ -193,7 +193,7 @@ static void
pe_free_nodes(GList *nodes)
{
for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
- pe_node_t *node = (pe_node_t *) iterator->data;
+ pcmk_node_t *node = (pcmk_node_t *) iterator->data;
// Shouldn't be possible, but to be safe ...
if (node == NULL) {
@@ -268,140 +268,140 @@ pe__free_location(GList *constraints)
}
/*!
- * \brief Reset working set to default state without freeing it or constraints
+ * \brief Reset scheduler data to defaults without freeing it or constraints
*
- * \param[in,out] data_set Working set to reset
+ * \param[in,out] scheduler Scheduler data to reset
*
* \deprecated This function is deprecated as part of the API;
* pe_reset_working_set() should be used instead.
*/
void
-cleanup_calculations(pe_working_set_t * data_set)
+cleanup_calculations(pcmk_scheduler_t *scheduler)
{
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return;
}
- pe__clear_working_set_flags(data_set, pe_flag_have_status);
- if (data_set->config_hash != NULL) {
- g_hash_table_destroy(data_set->config_hash);
+ pe__clear_working_set_flags(scheduler, pcmk_sched_have_status);
+ if (scheduler->config_hash != NULL) {
+ g_hash_table_destroy(scheduler->config_hash);
}
- if (data_set->singletons != NULL) {
- g_hash_table_destroy(data_set->singletons);
+ if (scheduler->singletons != NULL) {
+ g_hash_table_destroy(scheduler->singletons);
}
- if (data_set->tickets) {
- g_hash_table_destroy(data_set->tickets);
+ if (scheduler->tickets) {
+ g_hash_table_destroy(scheduler->tickets);
}
- if (data_set->template_rsc_sets) {
- g_hash_table_destroy(data_set->template_rsc_sets);
+ if (scheduler->template_rsc_sets) {
+ g_hash_table_destroy(scheduler->template_rsc_sets);
}
- if (data_set->tags) {
- g_hash_table_destroy(data_set->tags);
+ if (scheduler->tags) {
+ g_hash_table_destroy(scheduler->tags);
}
- free(data_set->dc_uuid);
+ free(scheduler->dc_uuid);
crm_trace("deleting resources");
- pe_free_resources(data_set->resources);
+ pe_free_resources(scheduler->resources);
crm_trace("deleting actions");
- pe_free_actions(data_set->actions);
+ pe_free_actions(scheduler->actions);
crm_trace("deleting nodes");
- pe_free_nodes(data_set->nodes);
+ pe_free_nodes(scheduler->nodes);
- pe__free_param_checks(data_set);
- g_list_free(data_set->stop_needed);
- free_xml(data_set->graph);
- crm_time_free(data_set->now);
- free_xml(data_set->input);
- free_xml(data_set->failed);
+ pe__free_param_checks(scheduler);
+ g_list_free(scheduler->stop_needed);
+ free_xml(scheduler->graph);
+ crm_time_free(scheduler->now);
+ free_xml(scheduler->input);
+ free_xml(scheduler->failed);
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
- CRM_CHECK(data_set->ordering_constraints == NULL,;
+ CRM_CHECK(scheduler->ordering_constraints == NULL,;
);
- CRM_CHECK(data_set->placement_constraints == NULL,;
+ CRM_CHECK(scheduler->placement_constraints == NULL,;
);
}
/*!
- * \brief Reset a working set to default state without freeing it
+ * \brief Reset scheduler data to default state without freeing it
*
- * \param[in,out] data_set Working set to reset
+ * \param[in,out] scheduler Scheduler data to reset
*/
void
-pe_reset_working_set(pe_working_set_t *data_set)
+pe_reset_working_set(pcmk_scheduler_t *scheduler)
{
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return;
}
crm_trace("Deleting %d ordering constraints",
- g_list_length(data_set->ordering_constraints));
- pe__free_ordering(data_set->ordering_constraints);
- data_set->ordering_constraints = NULL;
+ g_list_length(scheduler->ordering_constraints));
+ pe__free_ordering(scheduler->ordering_constraints);
+ scheduler->ordering_constraints = NULL;
crm_trace("Deleting %d location constraints",
- g_list_length(data_set->placement_constraints));
- pe__free_location(data_set->placement_constraints);
- data_set->placement_constraints = NULL;
+ g_list_length(scheduler->placement_constraints));
+ pe__free_location(scheduler->placement_constraints);
+ scheduler->placement_constraints = NULL;
crm_trace("Deleting %d colocation constraints",
- g_list_length(data_set->colocation_constraints));
- g_list_free_full(data_set->colocation_constraints, free);
- data_set->colocation_constraints = NULL;
+ g_list_length(scheduler->colocation_constraints));
+ g_list_free_full(scheduler->colocation_constraints, free);
+ scheduler->colocation_constraints = NULL;
crm_trace("Deleting %d ticket constraints",
- g_list_length(data_set->ticket_constraints));
- g_list_free_full(data_set->ticket_constraints, free);
- data_set->ticket_constraints = NULL;
+ g_list_length(scheduler->ticket_constraints));
+ g_list_free_full(scheduler->ticket_constraints, free);
+ scheduler->ticket_constraints = NULL;
- cleanup_calculations(data_set);
+ cleanup_calculations(scheduler);
}
void
-set_working_set_defaults(pe_working_set_t * data_set)
+set_working_set_defaults(pcmk_scheduler_t *scheduler)
{
- void *priv = data_set->priv;
+ void *priv = scheduler->priv;
- memset(data_set, 0, sizeof(pe_working_set_t));
+ memset(scheduler, 0, sizeof(pcmk_scheduler_t));
- data_set->priv = priv;
- data_set->order_id = 1;
- data_set->action_id = 1;
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->priv = priv;
+ scheduler->order_id = 1;
+ scheduler->action_id = 1;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
- data_set->flags = 0x0ULL;
+ scheduler->flags = 0x0ULL;
- pe__set_working_set_flags(data_set,
- pe_flag_stop_rsc_orphans
- |pe_flag_symmetric_cluster
- |pe_flag_stop_action_orphans);
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_symmetric_cluster
+ |pcmk_sched_stop_removed_resources
+ |pcmk_sched_cancel_removed_actions);
if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
- pe__set_working_set_flags(data_set, pe_flag_concurrent_fencing);
+ pe__set_working_set_flags(scheduler, pcmk_sched_concurrent_fencing);
}
}
-pe_resource_t *
+pcmk_resource_t *
pe_find_resource(GList *rsc_list, const char *id)
{
- return pe_find_resource_with_flags(rsc_list, id, pe_find_renamed);
+ return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history);
}
-pe_resource_t *
+pcmk_resource_t *
pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
{
GList *rIter = NULL;
for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
- pe_resource_t *parent = rIter->data;
+ pcmk_resource_t *parent = rIter->data;
- pe_resource_t *match =
+ pcmk_resource_t *match =
parent->fns->find_rsc(parent, id, NULL, flags);
if (match != NULL) {
return match;
@@ -414,7 +414,7 @@ pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
/*!
* \brief Find a node by name or ID in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id If not NULL, ID of node to find
* \param[in] node_name If not NULL, name of node to find
*
@@ -422,10 +422,10 @@ pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
* otherwise node from \p nodes that matches \p uname if any,
* otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node_any(const GList *nodes, const char *id, const char *uname)
{
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
if (id != NULL) {
match = pe_find_node_id(nodes, id);
@@ -439,16 +439,16 @@ pe_find_node_any(const GList *nodes, const char *id, const char *uname)
/*!
* \brief Find a node by ID in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id ID of node to find
*
* \return Node from \p nodes that matches \p id if any, otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node_id(const GList *nodes, const char *id)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* @TODO Whether node IDs should be considered case-sensitive should
* probably depend on the node type, so functionizing the comparison
@@ -464,16 +464,16 @@ pe_find_node_id(const GList *nodes, const char *id)
/*!
* \brief Find a node by name in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] node_name Name of node to find
*
* \return Node from \p nodes that matches \p node_name if any, otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node(const GList *nodes, const char *node_name)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
if (pcmk__str_eq(node->details->uname, node_name, pcmk__str_casei)) {
return node;
diff --git a/lib/pengine/tags.c b/lib/pengine/tags.c
index 81c27e4..d8d8ac9 100644
--- a/lib/pengine/tags.c
+++ b/lib/pengine/tags.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2021 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -13,29 +13,30 @@
#include <stdbool.h>
#include <crm/common/util.h>
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
-#include <crm/pengine/pe_types.h>
GList *
-pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name)
+pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name)
{
gpointer value;
GList *retval = NULL;
- if (data_set->tags == NULL) {
+ if (scheduler->tags == NULL) {
return retval;
}
- value = g_hash_table_lookup(data_set->tags, tag_name);
+ value = g_hash_table_lookup(scheduler->tags, tag_name);
if (value == NULL) {
return retval;
}
- for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ for (GList *refs = ((pcmk_tag_t *) value)->refs; refs; refs = refs->next) {
const char *id = (const char *) refs->data;
- pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, id,
- pe_find_renamed|pe_find_any);
+ const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
+ pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
+ id, flags);
if (!rsc) {
continue;
@@ -48,26 +49,26 @@ pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name)
}
GList *
-pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name)
+pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name)
{
gpointer value;
GList *retval = NULL;
- if (data_set->tags == NULL) {
+ if (scheduler->tags == NULL) {
return retval;
}
- value = g_hash_table_lookup(data_set->tags, tag_name);
+ value = g_hash_table_lookup(scheduler->tags, tag_name);
if (value == NULL) {
return retval;
}
/* Iterate over the list of node IDs. */
- for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ for (GList *refs = ((pcmk_tag_t *) value)->refs; refs; refs = refs->next) {
/* Find the node that has this ID. */
const char *id = (const char *) refs->data;
- pe_node_t *node = pe_find_node_id(data_set->nodes, id);
+ pcmk_node_t *node = pe_find_node_id(scheduler->nodes, id);
if (!node) {
continue;
@@ -81,9 +82,10 @@ pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name)
}
bool
-pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc_name, const char *tag_name)
+pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc_name,
+ const char *tag_name)
{
- GList *rscs = pe__rscs_with_tag(data_set, tag_name);
+ GList *rscs = pe__rscs_with_tag(scheduler, tag_name);
bool retval = false;
if (rscs == NULL) {
@@ -96,9 +98,10 @@ pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc_name, const char *ta
}
bool
-pe__uname_has_tag(pe_working_set_t *data_set, const char *node_name, const char *tag_name)
+pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node_name,
+ const char *tag_name)
{
- GList *unames = pe__unames_with_tag(data_set, tag_name);
+ GList *unames = pe__unames_with_tag(scheduler, tag_name);
bool retval = false;
if (unames == NULL) {
diff --git a/lib/pengine/tests/Makefile.am b/lib/pengine/tests/Makefile.am
index 4986ef2..48ec5b4 100644
--- a/lib/pengine/tests/Makefile.am
+++ b/lib/pengine/tests/Makefile.am
@@ -1 +1,14 @@
-SUBDIRS = rules native status unpack utils
+#
+# Copyright 2020-2023 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+SUBDIRS = rules \
+ native \
+ status \
+ unpack \
+ utils
diff --git a/lib/pengine/tests/native/Makefile.am b/lib/pengine/tests/native/Makefile.am
index 5046ff1..07cc1a1 100644
--- a/lib/pengine/tests/native/Makefile.am
+++ b/lib/pengine/tests/native/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -17,6 +17,6 @@ AM_TESTS_ENVIRONMENT += PCMK_CTS_CLI_DIR=$(top_srcdir)/cts/cli
# Add "_test" to the end of all test program names to simplify .gitignore.
check_PROGRAMS = native_find_rsc_test \
- pe_base_name_eq_test
+ pe_base_name_eq_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/native/native_find_rsc_test.c b/lib/pengine/tests/native/native_find_rsc_test.c
index 22aaf41..b85ca24 100644
--- a/lib/pengine/tests/native/native_find_rsc_test.c
+++ b/lib/pengine/tests/native/native_find_rsc_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,21 +10,18 @@
#include <crm_internal.h>
#include <crm/common/unittest_internal.h>
+#include <crm/common/scheduler.h>
#include <crm/common/xml.h>
#include <crm/pengine/internal.h>
#include <crm/pengine/status.h>
-#include <crm/pengine/pe_types.h>
-
-/* Needed to access replicas inside a bundle. */
-#define PE__VARIANT_BUNDLE 1
-#include <lib/pengine/variant.h>
xmlNode *input = NULL;
-pe_working_set_t *data_set = NULL;
+pcmk_scheduler_t *scheduler = NULL;
-pe_node_t *cluster01, *cluster02, *httpd_bundle_0;
-pe_resource_t *exim_group, *inactive_group, *promotable_clone, *inactive_clone;
-pe_resource_t *httpd_bundle, *mysql_clone_group;
+pcmk_node_t *cluster01, *cluster02, *httpd_bundle_0;
+pcmk_resource_t *exim_group, *inactive_group;
+pcmk_resource_t *promotable_clone, *inactive_clone;
+pcmk_resource_t *httpd_bundle, *mysql_clone_group;
static int
setup(void **state) {
@@ -40,25 +37,26 @@ setup(void **state) {
return 1;
}
- data_set = pe_new_working_set();
+ scheduler = pe_new_working_set();
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return 1;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- data_set->input = input;
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ scheduler->input = input;
- cluster_status(data_set);
+ cluster_status(scheduler);
/* Get references to the cluster nodes so we don't have to find them repeatedly. */
- cluster01 = pe_find_node(data_set->nodes, "cluster01");
- cluster02 = pe_find_node(data_set->nodes, "cluster02");
- httpd_bundle_0 = pe_find_node(data_set->nodes, "httpd-bundle-0");
+ cluster01 = pe_find_node(scheduler->nodes, "cluster01");
+ cluster02 = pe_find_node(scheduler->nodes, "cluster02");
+ httpd_bundle_0 = pe_find_node(scheduler->nodes, "httpd-bundle-0");
/* Get references to several resources we use frequently. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "exim-group") == 0) {
exim_group = rsc;
@@ -80,14 +78,14 @@ setup(void **state) {
static int
teardown(void **state) {
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return 0;
}
static void
bad_args(void **state) {
- pe_resource_t *rsc = (pe_resource_t *) g_list_first(data_set->resources)->data;
+ pcmk_resource_t *rsc = g_list_first(scheduler->resources)->data;
char *id = rsc->id;
char *name = NULL;
@@ -117,11 +115,11 @@ bad_args(void **state) {
static void
primitive_rsc(void **state) {
- pe_resource_t *dummy = NULL;
+ pcmk_resource_t *dummy = NULL;
/* Find the "dummy" resource, which is the only one with that ID in the set. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
@@ -133,20 +131,27 @@ primitive_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, 0));
- assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, pe_find_current));
+ assert_ptr_equal(dummy,
+ native_find_rsc(dummy, "dummy", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(dummy, "dummy", NULL, pe_find_clone));
- assert_null(native_find_rsc(dummy, "dummy", cluster02, pe_find_clone));
+ assert_null(native_find_rsc(dummy, "dummy", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(dummy, "dummy", cluster02,
+ pcmk_rsc_match_clone_only));
/* Fails because dummy is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(dummy, "dummy", cluster01, pe_find_current));
+ assert_null(native_find_rsc(dummy, "dummy", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(dummy, "dummy", cluster02, 0));
/* Passes because dummy is running on cluster02. */
- assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", cluster02, pe_find_current));
+ assert_ptr_equal(dummy,
+ native_find_rsc(dummy, "dummy", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -155,20 +160,27 @@ group_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, 0));
- assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, pe_find_current));
+ assert_ptr_equal(exim_group,
+ native_find_rsc(exim_group, "exim-group", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(exim_group, "exim-group", NULL, pe_find_clone));
- assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(exim_group, "exim-group", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because none of exim-group's children are running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_current));
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(exim_group, "exim-group", cluster01, 0));
/* Passes because one of exim-group's children is running on cluster02. */
- assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", cluster02, pe_find_current));
+ assert_ptr_equal(exim_group,
+ native_find_rsc(exim_group, "exim-group", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -177,30 +189,30 @@ inactive_group_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, 0));
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_current));
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_inactive));
+ assert_ptr_equal(inactive_group,
+ native_find_rsc(inactive_group, "inactive-group", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_clone));
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because none of inactive-group's children are running. */
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_current));
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_current));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
group_member_rsc(void **state) {
- pe_resource_t *public_ip = NULL;
+ pcmk_resource_t *public_ip = NULL;
/* Find the "Public-IP" resource, a member of "exim-group". */
for (GList *iter = exim_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "Public-IP") == 0) {
public_ip = rsc;
@@ -212,29 +224,36 @@ group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, 0));
- assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, pe_find_current));
+ assert_ptr_equal(public_ip,
+ native_find_rsc(public_ip, "Public-IP", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(public_ip, "Public-IP", NULL, pe_find_clone));
- assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_clone));
+ assert_null(native_find_rsc(public_ip, "Public-IP", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster02,
+ pcmk_rsc_match_clone_only));
/* Fails because Public-IP is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(public_ip, "Public-IP", cluster01, pe_find_current));
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, 0));
/* Passes because Public-IP is running on cluster02. */
- assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_current));
+ assert_ptr_equal(public_ip,
+ native_find_rsc(public_ip, "Public-IP", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
inactive_group_member_rsc(void **state) {
- pe_resource_t *inactive_dummy_1 = NULL;
+ pcmk_resource_t *inactive_dummy_1 = NULL;
/* Find the "inactive-dummy-1" resource, a member of "inactive-group". */
for (GList *iter = inactive_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "inactive-dummy-1") == 0) {
inactive_dummy_1 = rsc;
@@ -246,20 +265,21 @@ inactive_group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, 0));
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_current));
+ assert_ptr_equal(inactive_dummy_1,
+ native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_clone));
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because inactive-dummy-1 is not running. */
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_current));
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_current));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -268,24 +288,40 @@ clone_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, 0));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_current));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_clone));
-
- /* Fails because pe_find_current is required if a node is given. */
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone", NULL,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone", NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster01, 0));
/* Passes because one of ping-clone's children is running on cluster01. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_current));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster01, pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster02, 0));
/* Passes because one of ping_clone's children is running on cluster02. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_current));
-
- /* Passes for previous reasons, plus includes pe_find_clone check. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_clone|pe_find_current));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster02, pcmk_rsc_match_current_node));
+
+ // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster01,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
}
static void
@@ -294,28 +330,30 @@ inactive_clone_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, 0));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_current));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_clone));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_inactive));
+ assert_ptr_equal(inactive_clone,
+ native_find_rsc(inactive_clone, "inactive-clone", NULL,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(inactive_clone,
+ native_find_rsc(inactive_clone, "inactive-clone", NULL,
+ pcmk_rsc_match_clone_only));
/* Fails because none of inactive-clone's children are running. */
- assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_current|pe_find_clone));
- assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_current|pe_find_clone));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01,
+ pcmk_rsc_match_current_node
+ |pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02,
+ pcmk_rsc_match_current_node
+ |pcmk_rsc_match_clone_only));
}
static void
clone_instance_rsc(void **state) {
- pe_resource_t *promotable_0 = NULL;
- pe_resource_t *promotable_1 = NULL;
+ pcmk_resource_t *promotable_0 = NULL;
+ pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
@@ -329,70 +367,132 @@ clone_instance_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, 0));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc:0", NULL,
+ pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, 0));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, pe_find_current));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc:1", NULL,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01, pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02, pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02,
+ pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and primitive name was given, with correct flags. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_clone));
-
- /* Passes because pe_find_any matches any instance's base name. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_any));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_anon));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_anon));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Passes because pcmk_rsc_match_basename matches any instance's base name
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_basename));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_anon_basename));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with primitive name. */
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_current));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing promotable_clone
* instead of promotable_X as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
}
static void
renamed_rsc(void **state) {
- pe_resource_t *promotable_0 = NULL;
- pe_resource_t *promotable_1 = NULL;
+ pcmk_resource_t *promotable_0 = NULL;
+ pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
@@ -404,9 +504,13 @@ renamed_rsc(void **state) {
assert_non_null(promotable_0);
assert_non_null(promotable_1);
- /* Passes because pe_find_renamed means the base name matches clone_name. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_renamed));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_renamed));
+ // Passes because pcmk_rsc_match_history means base name matches clone_name
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_history));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_history));
}
static void
@@ -415,36 +519,32 @@ bundle_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, 0));
- assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_current));
+ assert_ptr_equal(httpd_bundle,
+ native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_clone));
- assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
+ pcmk_rsc_match_clone_only));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, 0));
/* Passes because one of httpd_bundle's children is running on cluster01. */
- assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_current));
+ assert_ptr_equal(httpd_bundle,
+ native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
+ pcmk_rsc_match_current_node));
}
-static void
-bundle_replica_rsc(void **state) {
- pe__bundle_variant_data_t *bundle_data = NULL;
- pe__bundle_replica_t *replica_0 = NULL;
-
- pe_resource_t *ip_0 = NULL;
- pe_resource_t *child_0 = NULL;
- pe_resource_t *container_0 = NULL;
- pe_resource_t *remote_0 = NULL;
-
- get_bundle_variant_data(bundle_data, httpd_bundle);
- replica_0 = (pe__bundle_replica_t *) bundle_data->replicas->data;
-
- ip_0 = replica_0->ip;
- child_0 = replica_0->child;
- container_0 = replica_0->container;
- remote_0 = replica_0->remote;
+static bool
+bundle_first_replica(pe__bundle_replica_t *replica, void *user_data)
+{
+ pcmk_resource_t *ip_0 = replica->ip;
+ pcmk_resource_t *child_0 = replica->child;
+ pcmk_resource_t *container_0 = replica->container;
+ pcmk_resource_t *remote_0 = replica->remote;
assert_non_null(ip_0);
assert_non_null(child_0);
@@ -457,58 +557,109 @@ bundle_replica_rsc(void **state) {
assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", NULL, 0));
assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", NULL, 0));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(child_0, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
- assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster02, pe_find_current));
- assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", httpd_bundle_0, pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd:0", cluster01, pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd:0", cluster02, pe_find_current));
- assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, pe_find_current));
- assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02, pe_find_current));
- assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", cluster01, pe_find_current));
- assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02, pe_find_current));
- assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0, pe_find_current));
-
- /* Passes because pe_find_any matches any replica's base name. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_anon));
+ assert_ptr_equal(ip_0,
+ native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ httpd_bundle_0, pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd:0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(container_0,
+ native_find_rsc(container_0, "httpd-bundle-docker-0",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0",
+ httpd_bundle_0, pcmk_rsc_match_current_node));
+ assert_ptr_equal(remote_0,
+ native_find_rsc(remote_0, "httpd-bundle-0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+
+ // Passes because pcmk_rsc_match_basename matches any replica's base name
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_any|pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", httpd_bundle_0,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", httpd_bundle_0,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
- assert_null(native_find_rsc(child_0, "httpd", NULL, pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing httpd-bundle
* instead of X_0 as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(ip_0, native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(container_0, native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, pe_find_current));
- assert_ptr_equal(remote_0, native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, pe_find_current));
+ assert_ptr_equal(ip_0,
+ native_find_rsc(httpd_bundle,
+ "httpd-bundle-ip-192.168.122.131",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(container_0,
+ native_find_rsc(httpd_bundle, "httpd-bundle-docker-0",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(remote_0,
+ native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01,
+ pcmk_rsc_match_current_node));
+ return false; // Do not iterate through any further replicas
+}
+
+static void
+bundle_replica_rsc(void **state)
+{
+ pe__foreach_bundle_replica(httpd_bundle, bundle_first_replica, NULL);
}
static void
@@ -517,34 +668,50 @@ clone_group_rsc(void **rsc) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, 0));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_current));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_clone));
-
- /* Fails because pe_find_current is required if a node is given. */
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ NULL, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ NULL, pcmk_rsc_match_clone_only));
+
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, 0));
/* Passes because one of mysql-clone-group's children is running on cluster01. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_current));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster01, pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, 0));
/* Passes because one of mysql-clone-group's children is running on cluster02. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_current));
-
- /* Passes for previous reasons, plus includes pe_find_clone check. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_clone|pe_find_current));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster02, pcmk_rsc_match_current_node));
+
+ // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster01,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
}
static void
clone_group_instance_rsc(void **rsc) {
- pe_resource_t *mysql_group_0 = NULL;
- pe_resource_t *mysql_group_1 = NULL;
+ pcmk_resource_t *mysql_group_0 = NULL;
+ pcmk_resource_t *mysql_group_1 = NULL;
/* Find the "mysql-group:0" and "mysql-group:1" resources, members of "mysql-clone-group". */
for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
mysql_group_0 = rsc;
@@ -558,73 +725,135 @@ clone_group_instance_rsc(void **rsc) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, 0));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group:0", NULL,
+ pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, 0));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, pe_find_current));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group:1", NULL,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01, pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02, pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group:0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group:1", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02,
+ pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and base name was given, with correct flags. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_clone));
-
- /* Passes because pe_find_any matches any base name. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_any));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_anon));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_anon));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Passes because pcmk_rsc_match_basename matches any base name
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_basename));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group" , NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_anon_basename));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group" , NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_anon|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL, pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL, pe_find_current));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing mysql_clone_group
* instead of mysql_group_X as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_anon|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
}
static void
clone_group_member_rsc(void **state) {
- pe_resource_t *mysql_proxy = NULL;
+ pcmk_resource_t *mysql_proxy = NULL;
/* Find the "mysql-proxy" resource, a member of "mysql-group". */
for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
for (GList *iter2 = rsc->children; iter2 != NULL; iter2 = iter2->next) {
- pe_resource_t *child = (pe_resource_t *) iter2->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter2->data;
if (strcmp(child->id, "mysql-proxy:0") == 0) {
mysql_proxy = child;
@@ -640,24 +869,35 @@ clone_group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, 0));
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
+ pcmk_rsc_match_current_node));
/* Passes because resource's parent is a clone. */
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_clone));
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
/* Fails because mysql-proxy:0 is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01, pe_find_current));
+ assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, 0));
/* Passes because mysql-proxy:0 is running on cluster02. */
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
+ pcmk_rsc_match_current_node));
}
-/* TODO: Add tests for finding on allocated node (passing a node without
- * pe_find_current, after scheduling, for a resource that is starting/stopping/moving.
+/* TODO: Add tests for finding on assigned node (passing a node without
+ * pcmk_rsc_match_current_node, after scheduling, for a resource that is
+ * starting/stopping/moving.
*/
PCMK__UNIT_TEST(setup, teardown,
cmocka_unit_test(bad_args),
diff --git a/lib/pengine/tests/native/pe_base_name_eq_test.c b/lib/pengine/tests/native/pe_base_name_eq_test.c
index 67a62f8..cb3c908 100644
--- a/lib/pengine/tests/native/pe_base_name_eq_test.c
+++ b/lib/pengine/tests/native/pe_base_name_eq_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -12,15 +12,15 @@
#include <crm/common/unittest_internal.h>
#include <crm/common/xml.h>
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
#include <crm/pengine/status.h>
-#include <crm/pengine/pe_types.h>
xmlNode *input = NULL;
-pe_working_set_t *data_set = NULL;
+pcmk_scheduler_t *scheduler = NULL;
-pe_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
-pe_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
+pcmk_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
+pcmk_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
static int
setup(void **state) {
@@ -36,20 +36,21 @@ setup(void **state) {
return 1;
}
- data_set = pe_new_working_set();
+ scheduler = pe_new_working_set();
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return 1;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- data_set->input = input;
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ scheduler->input = input;
- cluster_status(data_set);
+ cluster_status(scheduler);
/* Get references to several resources we use frequently. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
@@ -59,7 +60,7 @@ setup(void **state) {
httpd_bundle = rsc;
} else if (strcmp(rsc->id, "mysql-clone-group") == 0) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "mysql-group:0") == 0) {
mysql_group_0 = child;
@@ -69,7 +70,7 @@ setup(void **state) {
}
} else if (strcmp(rsc->id, "promotable-clone") == 0) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "promotable-rsc:0") == 0) {
promotable_0 = child;
@@ -85,7 +86,7 @@ setup(void **state) {
static int
teardown(void **state) {
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return 0;
}
diff --git a/lib/pengine/tests/status/Makefile.am b/lib/pengine/tests/status/Makefile.am
index 3f95496..c7ddb70 100644
--- a/lib/pengine/tests/status/Makefile.am
+++ b/lib/pengine/tests/status/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,10 +13,10 @@ include $(top_srcdir)/mk/unittest.mk
LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pe_find_node_any_test \
- pe_find_node_id_test \
- pe_find_node_test \
- pe_new_working_set_test \
- set_working_set_defaults_test
+check_PROGRAMS = pe_find_node_any_test \
+ pe_find_node_id_test \
+ pe_find_node_test \
+ pe_new_working_set_test \
+ set_working_set_defaults_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/status/pe_find_node_any_test.c b/lib/pengine/tests/status/pe_find_node_any_test.c
index b911424..5f5a27e 100644
--- a/lib/pengine/tests/status/pe_find_node_any_test.c
+++ b/lib/pengine/tests/status/pe_find_node_any_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -24,8 +24,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->uname = "cluster1";
diff --git a/lib/pengine/tests/status/pe_find_node_id_test.c b/lib/pengine/tests/status/pe_find_node_id_test.c
index 832a40a..c6b8773 100644
--- a/lib/pengine/tests/status/pe_find_node_id_test.c
+++ b/lib/pengine/tests/status/pe_find_node_id_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,8 +22,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->id = "id1";
diff --git a/lib/pengine/tests/status/pe_find_node_test.c b/lib/pengine/tests/status/pe_find_node_test.c
index 7c7ea30..305ddc9 100644
--- a/lib/pengine/tests/status/pe_find_node_test.c
+++ b/lib/pengine/tests/status/pe_find_node_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,8 +22,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->uname = "cluster1";
diff --git a/lib/pengine/tests/status/pe_new_working_set_test.c b/lib/pengine/tests/status/pe_new_working_set_test.c
index cf2df4f..b385f9c 100644
--- a/lib/pengine/tests/status/pe_new_working_set_test.c
+++ b/lib/pengine/tests/status/pe_new_working_set_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -19,7 +19,7 @@ calloc_fails(void **state) {
pcmk__mock_calloc = true; // calloc() will return NULL
expect_value(__wrap_calloc, nmemb, 1);
- expect_value(__wrap_calloc, size, sizeof(pe_working_set_t));
+ expect_value(__wrap_calloc, size, sizeof(pcmk_scheduler_t));
assert_null(pe_new_working_set());
pcmk__mock_calloc = false; // Use real calloc()
@@ -27,18 +27,18 @@ calloc_fails(void **state) {
static void
calloc_succeeds(void **state) {
- pe_working_set_t *data_set = pe_new_working_set();
+ pcmk_scheduler_t *scheduler = pe_new_working_set();
/* Nothing else to test about this function, as all it does is call
* set_working_set_defaults which is also a public function and should
* get its own unit test.
*/
- assert_non_null(data_set);
+ assert_non_null(scheduler);
/* Avoid calling pe_free_working_set here so we don't artificially
* inflate the coverage numbers.
*/
- free(data_set);
+ free(scheduler);
}
PCMK__UNIT_TEST(NULL, NULL,
diff --git a/lib/pengine/tests/status/set_working_set_defaults_test.c b/lib/pengine/tests/status/set_working_set_defaults_test.c
index c822278..7045a33 100644
--- a/lib/pengine/tests/status/set_working_set_defaults_test.c
+++ b/lib/pengine/tests/status/set_working_set_defaults_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,8 +10,9 @@
#include <crm_internal.h>
#include <crm/common/unittest_internal.h>
+
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
-#include <crm/pengine/pe_types.h>
#include <crm/pengine/status.h>
#include "mock_private.h"
@@ -19,27 +20,29 @@
static void
check_defaults(void **state) {
uint32_t flags;
- pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+ pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
- flags = pe_flag_stop_rsc_orphans|pe_flag_symmetric_cluster|pe_flag_stop_action_orphans;
+ flags = pcmk_sched_symmetric_cluster
+ |pcmk_sched_stop_removed_resources
+ |pcmk_sched_cancel_removed_actions;
if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
- flags |= pe_flag_concurrent_fencing;
+ flags |= pcmk_sched_concurrent_fencing;
}
- assert_null(data_set->priv);
- assert_int_equal(data_set->order_id, 1);
- assert_int_equal(data_set->action_id, 1);
- assert_int_equal(data_set->no_quorum_policy, no_quorum_stop);
- assert_int_equal(data_set->flags, flags);
+ assert_null(scheduler->priv);
+ assert_int_equal(scheduler->order_id, 1);
+ assert_int_equal(scheduler->action_id, 1);
+ assert_int_equal(scheduler->no_quorum_policy, pcmk_no_quorum_stop);
+ assert_int_equal(scheduler->flags, flags);
/* Avoid calling pe_free_working_set here so we don't artificially
* inflate the coverage numbers.
*/
- free(data_set);
+ free(scheduler);
}
PCMK__UNIT_TEST(NULL, NULL,
diff --git a/lib/pengine/tests/utils/Makefile.am b/lib/pengine/tests/utils/Makefile.am
index 4a3e8a2..64421e2 100644
--- a/lib/pengine/tests/utils/Makefile.am
+++ b/lib/pengine/tests/utils/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -14,8 +14,7 @@ AM_CPPFLAGS += -I$(top_srcdir)/lib/pengine
LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pe__cmp_node_name_test \
+check_PROGRAMS = pe__cmp_node_name_test \
pe__cmp_rsc_priority_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/utils/pe__cmp_node_name_test.c b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
index 45d87ee..4d602e4 100644
--- a/lib/pengine/tests/utils/pe__cmp_node_name_test.c
+++ b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,8 +15,8 @@
struct pe_node_shared_s node1_details;
struct pe_node_shared_s node2_details;
-pe_node_t node1 = {.details = &node1_details};
-pe_node_t node2 = {.details = &node2_details};
+pcmk_node_t node1 = { .details = &node1_details };
+pcmk_node_t node2 = { .details = &node2_details };
static void
nodes_equal(void **state)
diff --git a/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
index 669e7a9..24c1731 100644
--- a/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
+++ b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
@@ -14,8 +14,8 @@
#include "pe_status_private.h"
-pe_resource_t rsc1;
-pe_resource_t rsc2;
+pcmk_resource_t rsc1;
+pcmk_resource_t rsc2;
static void
rscs_equal(void **state)
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 2bd6707..3429d56 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -29,8 +29,8 @@ CRM_TRACE_INIT_DATA(pe_status);
// A (parsed) resource action history entry
struct action_history {
- pe_resource_t *rsc; // Resource that history is for
- pe_node_t *node; // Node that history is for
+ pcmk_resource_t *rsc; // Resource that history is for
+ pcmk_node_t *node; // Node that history is for
xmlNode *xml; // History entry XML
// Parsed from entry XML
@@ -49,43 +49,40 @@ struct action_history {
* use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the
* flag is stringified more readably in log messages.
*/
-#define set_config_flag(data_set, option, flag) do { \
- const char *scf_value = pe_pref((data_set)->config_hash, (option)); \
- if (scf_value != NULL) { \
- if (crm_is_true(scf_value)) { \
- (data_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
- LOG_TRACE, "Working set", \
- crm_system_name, (data_set)->flags, \
- (flag), #flag); \
- } else { \
- (data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\
- LOG_TRACE, "Working set", \
- crm_system_name, (data_set)->flags, \
- (flag), #flag); \
- } \
- } \
+#define set_config_flag(scheduler, option, flag) do { \
+ const char *scf_value = pe_pref((scheduler)->config_hash, (option)); \
+ if (scf_value != NULL) { \
+ if (crm_is_true(scf_value)) { \
+ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", \
+ crm_system_name, (scheduler)->flags, \
+ (flag), #flag); \
+ } else { \
+ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", \
+ crm_system_name, (scheduler)->flags, \
+ (flag), #flag); \
+ } \
+ } \
} while(0)
-static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
- xmlNode **last_failure,
+static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
+ xmlNode *xml_op, xmlNode **last_failure,
enum action_fail_response *failed);
-static void determine_remote_online_status(pe_working_set_t *data_set,
- pe_node_t *this_node);
-static void add_node_attrs(const xmlNode *xml_obj, pe_node_t *node,
- bool overwrite, pe_working_set_t *data_set);
+static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
+ pcmk_node_t *this_node);
+static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
+ bool overwrite, pcmk_scheduler_t *scheduler);
static void determine_online_status(const xmlNode *node_state,
- pe_node_t *this_node,
- pe_working_set_t *data_set);
+ pcmk_node_t *this_node,
+ pcmk_scheduler_t *scheduler);
-static void unpack_node_lrm(pe_node_t *node, const xmlNode *xml,
- pe_working_set_t *data_set);
+static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
+ pcmk_scheduler_t *scheduler);
-// Bitmask for warnings we only want to print once
-uint32_t pe_wo = 0;
-
static gboolean
-is_dangling_guest_node(pe_node_t *node)
+is_dangling_guest_node(pcmk_node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
@@ -94,7 +91,7 @@ is_dangling_guest_node(pe_node_t *node)
node->details->remote_rsc &&
node->details->remote_rsc->container == NULL &&
pcmk_is_set(node->details->remote_rsc->flags,
- pe_rsc_orphan_container_filler)) {
+ pcmk_rsc_removed_filler)) {
return TRUE;
}
@@ -104,23 +101,23 @@ is_dangling_guest_node(pe_node_t *node)
/*!
* \brief Schedule a fence action for a node
*
- * \param[in,out] data_set Current working set of cluster
- * \param[in,out] node Node to fence
- * \param[in] reason Text description of why fencing is needed
+ * \param[in,out] scheduler Scheduler data
+ * \param[in,out] node Node to fence
+ * \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider `priority-fencing-delay`
*/
void
-pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
+pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (pe__is_guest_node(node)) {
- pe_resource_t *rsc = node->details->remote_rsc->container;
+ pcmk_resource_t *rsc = node->details->remote_rsc->container;
- if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
@@ -135,7 +132,8 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
}
@@ -145,12 +143,12 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
"and guest resource no longer exists",
pe__node_name(node), reason);
pe__set_resource_flags(node->details->remote_rsc,
- pe_rsc_failed|pe_rsc_stop);
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
} else if (pe__is_remote_node(node)) {
- pe_resource_t *rsc = node->details->remote_rsc;
+ pcmk_resource_t *rsc = node->details->remote_rsc;
- if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pe__node_name(node), reason);
@@ -158,26 +156,26 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
node->details->remote_requires_reset = TRUE;
crm_warn("Remote node %s %s: %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply `priority-fencing-delay` for remote nodes
- pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set);
+ pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean",
+ pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
reason);
} else {
crm_warn("Cluster node %s %s: %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
- pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set);
+ pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
}
}
@@ -197,215 +195,258 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
"/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR
static void
-set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set)
+set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
{
xmlXPathObjectPtr result = NULL;
- if (!pcmk_is_set(data_set->flags, flag)) {
- result = xpath_search(data_set->input, xpath);
+ if (!pcmk_is_set(scheduler->flags, flag)) {
+ result = xpath_search(scheduler->input, xpath);
if (result && (numXpathResults(result) > 0)) {
- pe__set_working_set_flags(data_set, flag);
+ pe__set_working_set_flags(scheduler, flag);
}
freeXpathObject(result);
}
}
gboolean
-unpack_config(xmlNode * config, pe_working_set_t * data_set)
+unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
GHashTable *config_hash = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
- data_set->config_hash = config_hash;
+ scheduler->config_hash = config_hash;
pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
- CIB_OPTIONS_FIRST, FALSE, data_set);
+ CIB_OPTIONS_FIRST, FALSE, scheduler);
- verify_pe_options(data_set->config_hash);
+ verify_pe_options(scheduler->config_hash);
- set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
- if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
+ set_config_flag(scheduler, "enable-startup-probes",
+ pcmk_sched_probe_resources);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_info("Startup probes: disabled (dangerous)");
}
- value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
+ value = pe_pref(scheduler->config_hash, XML_ATTR_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and stonith-watchdog-timeout is nonzero");
- pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_fencing);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
- set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set);
+ set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
+ scheduler);
- value = pe_pref(data_set->config_hash, "stonith-timeout");
- data_set->stonith_timeout = (int) crm_parse_interval_spec(value);
- crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
+ value = pe_pref(scheduler->config_hash, "stonith-timeout");
+ scheduler->stonith_timeout = (int) crm_parse_interval_spec(value);
+ crm_debug("STONITH timeout: %d", scheduler->stonith_timeout);
- set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
- crm_debug("STONITH of failed nodes is %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
+ set_config_flag(scheduler, "stonith-enabled", pcmk_sched_fencing_enabled);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ crm_debug("STONITH of failed nodes is enabled");
+ } else {
+ crm_debug("STONITH of failed nodes is disabled");
+ }
- data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
- if (!strcmp(data_set->stonith_action, "poweroff")) {
- pe_warn_once(pe_wo_poweroff,
+ scheduler->stonith_action = pe_pref(scheduler->config_hash,
+ "stonith-action");
+ if (!strcmp(scheduler->stonith_action, "poweroff")) {
+ pe_warn_once(pcmk__wo_poweroff,
"Support for stonith-action of 'poweroff' is deprecated "
"and will be removed in a future release (use 'off' instead)");
- data_set->stonith_action = "off";
+ scheduler->stonith_action = PCMK_ACTION_OFF;
}
- crm_trace("STONITH will %s nodes", data_set->stonith_action);
+ crm_trace("STONITH will %s nodes", scheduler->stonith_action);
- set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
- crm_debug("Concurrent fencing is %s",
- pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled");
+ set_config_flag(scheduler, "concurrent-fencing",
+ pcmk_sched_concurrent_fencing);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
+ crm_debug("Concurrent fencing is enabled");
+ } else {
+ crm_debug("Concurrent fencing is disabled");
+ }
- value = pe_pref(data_set->config_hash,
+ value = pe_pref(scheduler->config_hash,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY);
if (value) {
- data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000;
- crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay);
+ scheduler->priority_fencing_delay = crm_parse_interval_spec(value)
+ / 1000;
+ crm_trace("Priority fencing delay is %ds",
+ scheduler->priority_fencing_delay);
}
- set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
+ set_config_flag(scheduler, "stop-all-resources", pcmk_sched_stop_all);
crm_debug("Stop all active resources: %s",
- pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
+ pcmk__btoa(pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)));
- set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
- if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ set_config_flag(scheduler, "symmetric-cluster",
+ pcmk_sched_symmetric_cluster);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
- value = pe_pref(data_set->config_hash, "no-quorum-policy");
+ value = pe_pref(scheduler->config_hash, "no-quorum-policy");
if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_ignore;
+ scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
} else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_freeze;
+ scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_demote;
+ scheduler->no_quorum_policy = pcmk_no_quorum_demote;
} else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
int do_panic = 0;
- crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC,
+ crm_element_value_int(scheduler->input, XML_ATTR_QUORUM_PANIC,
&do_panic);
- if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- data_set->no_quorum_policy = no_quorum_suicide;
+ if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
+ scheduler->no_quorum_policy = pcmk_no_quorum_fence;
} else {
crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum");
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
pcmk__config_err("Resetting no-quorum-policy to 'stop' because "
"fencing is disabled");
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
- set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
- crm_trace("Orphan resources are %s",
- pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored");
+ set_config_flag(scheduler, "stop-orphan-resources",
+ pcmk_sched_stop_removed_resources);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ crm_trace("Orphan resources are stopped");
+ } else {
+ crm_trace("Orphan resources are ignored");
+ }
- set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
- crm_trace("Orphan resource actions are %s",
- pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored");
+ set_config_flag(scheduler, "stop-orphan-actions",
+ pcmk_sched_cancel_removed_actions);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) {
+ crm_trace("Orphan resource actions are stopped");
+ } else {
+ crm_trace("Orphan resource actions are ignored");
+ }
- value = pe_pref(data_set->config_hash, "remove-after-stop");
+ value = pe_pref(scheduler->config_hash, "remove-after-stop");
if (value != NULL) {
if (crm_is_true(value)) {
- pe__set_working_set_flags(data_set, pe_flag_remove_after_stop);
+ pe__set_working_set_flags(scheduler, pcmk_sched_remove_after_stop);
#ifndef PCMK__COMPAT_2_0
- pe_warn_once(pe_wo_remove_after,
+ pe_warn_once(pcmk__wo_remove_after,
"Support for the remove-after-stop cluster property is"
" deprecated and will be removed in a future release");
#endif
} else {
- pe__clear_working_set_flags(data_set, pe_flag_remove_after_stop);
+ pe__clear_working_set_flags(scheduler,
+ pcmk_sched_remove_after_stop);
}
}
- set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
+ set_config_flag(scheduler, "maintenance-mode", pcmk_sched_in_maintenance);
crm_trace("Maintenance mode: %s",
- pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
+ pcmk__btoa(pcmk_is_set(scheduler->flags,
+ pcmk_sched_in_maintenance)));
- set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
- crm_trace("Start failures are %s",
- pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount");
+ set_config_flag(scheduler, "start-failure-is-fatal",
+ pcmk_sched_start_failure_fatal);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
+ crm_trace("Start failures are always fatal");
+ } else {
+ crm_trace("Start failures are handled by failcount");
+ }
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ set_config_flag(scheduler, "startup-fencing",
+ pcmk_sched_startup_fencing);
}
- if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
- pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes");
+ pe_warn_once(pcmk__wo_blind, "Blind faith: not fencing unseen nodes");
}
- pe__unpack_node_health_scores(data_set);
+ pe__unpack_node_health_scores(scheduler);
- data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
- crm_trace("Placement strategy: %s", data_set->placement_strategy);
+ scheduler->placement_strategy = pe_pref(scheduler->config_hash,
+ "placement-strategy");
+ crm_trace("Placement strategy: %s", scheduler->placement_strategy);
- set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock);
- crm_trace("Resources will%s be locked to cleanly shut down nodes",
- (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not"));
- if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
- value = pe_pref(data_set->config_hash,
+ set_config_flag(scheduler, "shutdown-lock", pcmk_sched_shutdown_lock);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
+ value = pe_pref(scheduler->config_hash,
XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT);
- data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000;
- crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock);
+ scheduler->shutdown_lock = crm_parse_interval_spec(value) / 1000;
+ crm_trace("Resources will be locked to nodes that were cleanly "
+ "shut down (locks expire after %s)",
+ pcmk__readable_interval(scheduler->shutdown_lock));
+ } else {
+ crm_trace("Resources will not be locked to nodes that were cleanly "
+ "shut down");
+ }
+
+ value = pe_pref(scheduler->config_hash,
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT);
+ scheduler->node_pending_timeout = crm_parse_interval_spec(value) / 1000;
+ if (scheduler->node_pending_timeout == 0) {
+ crm_trace("Do not fence pending nodes");
+ } else {
+ crm_trace("Fence pending nodes after %s",
+ pcmk__readable_interval(scheduler->node_pending_timeout
+ * 1000));
}
return TRUE;
}
-pe_node_t *
+pcmk_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
- const char *score, pe_working_set_t * data_set)
+ const char *score, pcmk_scheduler_t *scheduler)
{
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
- if (pe_find_node(data_set->nodes, uname) != NULL) {
+ if (pe_find_node(scheduler->nodes, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
- new_node = calloc(1, sizeof(pe_node_t));
+ new_node = calloc(1, sizeof(pcmk_node_t));
if (new_node == NULL) {
return NULL;
}
@@ -425,14 +466,14 @@ pe_create_node(const char *id, const char *uname, const char *type,
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
- new_node->details->data_set = data_set;
+ new_node->details->data_set = scheduler;
if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) {
- new_node->details->type = node_member;
+ new_node->details->type = pcmk_node_variant_cluster;
} else if (pcmk__str_eq(type, "remote", pcmk__str_casei)) {
- new_node->details->type = node_remote;
- pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes);
+ new_node->details->type = pcmk_node_variant_remote;
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_remote_nodes);
} else {
/* @COMPAT 'ping' is the default for backward compatibility, but it
@@ -443,7 +484,7 @@ pe_create_node(const char *id, const char *uname, const char *type,
"assuming 'ping'", pcmk__s(uname, "without name"),
type);
}
- pe_warn_once(pe_wo_ping_node,
+ pe_warn_once(pcmk__wo_ping_node,
"Support for nodes of type 'ping' (such as %s) is "
"deprecated and will be removed in a future release",
pcmk__s(uname, "unnamed node"));
@@ -464,13 +505,13 @@ pe_create_node(const char *id, const char *uname, const char *type,
new_node->details->digest_cache = pcmk__strkey_table(free,
pe__free_digests);
- data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node,
- pe__cmp_node_name);
+ scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
+ pe__cmp_node_name);
return new_node;
}
static const char *
-expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
+expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
@@ -527,9 +568,10 @@ expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data
}
static void
-handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
+handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
{
- if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
+ if ((new_node->details->type == pcmk_node_variant_remote)
+ && (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
@@ -537,7 +579,7 @@ handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
@@ -552,10 +594,10 @@ handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
}
gboolean
-unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
+unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
@@ -578,46 +620,48 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
"> entry in configuration without id");
continue;
}
- new_node = pe_create_node(id, uname, type, score, data_set);
+ new_node = pe_create_node(id, uname, type, score, scheduler);
if (new_node == NULL) {
return FALSE;
}
- handle_startup_fencing(data_set, new_node);
+ handle_startup_fencing(scheduler, new_node);
- add_node_attrs(xml_obj, new_node, FALSE, data_set);
+ add_node_attrs(xml_obj, new_node, FALSE, scheduler);
crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
}
}
- if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
+ if (scheduler->localhost
+ && (pe_find_node(scheduler->nodes, scheduler->localhost) == NULL)) {
crm_info("Creating a fake local node");
- pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
- data_set);
+ pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0,
+ scheduler);
}
return TRUE;
}
static void
-setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
+setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *container_id = NULL;
if (rsc->children) {
- g_list_foreach(rsc->children, (GFunc) setup_container, data_set);
+ g_list_foreach(rsc->children, (GFunc) setup_container, scheduler);
return;
}
container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
- pe_resource_t *container = pe_find_resource(data_set->resources, container_id);
+ pcmk_resource_t *container = pe_find_resource(scheduler->resources,
+ container_id);
if (container) {
rsc->container = container;
- pe__set_resource_flags(container, pe_rsc_is_container);
+ pe__set_resource_flags(container, pcmk_rsc_has_filler);
container->fillers = g_list_append(container->fillers, rsc);
pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
} else {
@@ -627,7 +671,7 @@ setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
+unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
@@ -646,11 +690,12 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
new_node_id = ID(xml_obj);
/* The "pe_find_node" check is here to make sure we don't iterate over
* an expanded node that has already been added to the node list. */
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
continue;
}
@@ -663,12 +708,14 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
* configuration for the guest node's connection, to be unpacked
* later.
*/
- new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
+ scheduler);
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
continue;
}
@@ -681,13 +728,15 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL;
xml_obj2 = pcmk__xe_next(xml_obj2)) {
- new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
+ new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
+ scheduler);
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, ID(xml_obj2), ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
}
}
@@ -704,20 +753,20 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
* easy access to the connection resource during the scheduler calculations.
*/
static void
-link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
+link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
{
- pe_node_t *remote_node = NULL;
+ pcmk_node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
- remote_node = pe_find_node(data_set->nodes, new_rsc->id);
+ remote_node = pe_find_node(scheduler->nodes, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pe_rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
@@ -728,7 +777,7 @@ link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
- handle_startup_fencing(data_set, remote_node);
+ handle_startup_fencing(scheduler, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
@@ -742,7 +791,7 @@ link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
static void
destroy_tag(gpointer data)
{
- pe_tag_t *tag = data;
+ pcmk_tag_t *tag = data;
if (tag) {
free(tag->id);
@@ -756,7 +805,7 @@ destroy_tag(gpointer data)
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
- * \param[in,out] data_set Where to put resource information
+ * \param[in,out] scheduler Scheduler data
*
* \return TRUE
*
@@ -764,63 +813,64 @@ destroy_tag(gpointer data)
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
-unpack_resources(const xmlNode *xml_resources, pe_working_set_t * data_set)
+unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
- data_set->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
+ scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
xml_obj = pcmk__xe_next(xml_obj)) {
- pe_resource_t *new_rsc = NULL;
+ pcmk_resource_t *new_rsc = NULL;
const char *id = ID(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
- crm_element_name(xml_obj));
+ xml_obj->name);
continue;
}
if (pcmk__str_eq((const char *) xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE,
pcmk__str_none)) {
- if (g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
+ if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
- g_hash_table_insert(data_set->template_rsc_sets, strdup(id), NULL);
+ g_hash_table_insert(scheduler->template_rsc_sets, strdup(id),
+ NULL);
}
continue;
}
crm_trace("Unpacking <%s " XML_ATTR_ID "='%s'>",
- crm_element_name(xml_obj), id);
+ xml_obj->name, id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
- data_set) == pcmk_rc_ok) {
- data_set->resources = g_list_append(data_set->resources, new_rsc);
+ scheduler) == pcmk_rc_ok) {
+ scheduler->resources = g_list_append(scheduler->resources, new_rsc);
pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
- crm_element_name(xml_obj), id);
+ xml_obj->name, id);
}
}
- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
- setup_container(rsc, data_set);
- link_rsc2remotenode(data_set, rsc);
+ setup_container(rsc, scheduler);
+ link_rsc2remotenode(scheduler, rsc);
}
- data_set->resources = g_list_sort(data_set->resources,
+ scheduler->resources = g_list_sort(scheduler->resources,
pe__cmp_rsc_priority);
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* Ignore */
- } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
- && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
+ && !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option");
@@ -831,11 +881,11 @@ unpack_resources(const xmlNode *xml_resources, pe_working_set_t * data_set)
}
gboolean
-unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
+unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_tag = NULL;
- data_set->tags = pcmk__strkey_table(free, destroy_tag);
+ scheduler->tags = pcmk__strkey_table(free, destroy_tag);
for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL;
xml_tag = pcmk__xe_next(xml_tag)) {
@@ -849,7 +899,7 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID,
- crm_element_name(xml_tag));
+ (const char *) xml_tag->name);
continue;
}
@@ -864,11 +914,11 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID,
- crm_element_name(xml_obj_ref), tag_id);
+ xml_obj_ref->name, tag_id);
continue;
}
- if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
+ if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
@@ -880,7 +930,7 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
-unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
+unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler)
{
const char *ticket_id = NULL;
const char *granted = NULL;
@@ -888,7 +938,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
ticket_id = ID(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
@@ -897,9 +947,9 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
crm_trace("Processing ticket state for %s", ticket_id);
- ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
- ticket = ticket_new(ticket_id, data_set);
+ ticket = ticket_new(ticket_id, scheduler);
if (ticket == NULL) {
return FALSE;
}
@@ -907,7 +957,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
- const char *prop_value = crm_element_value(xml_ticket, prop_name);
+ const char *prop_value = pcmk__xml_attr_value(xIter);
if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) {
continue;
@@ -948,7 +998,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
}
static gboolean
-unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
+unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
@@ -958,19 +1008,19 @@ unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) {
continue;
}
- unpack_ticket_state(xml_obj, data_set);
+ unpack_ticket_state(xml_obj, scheduler);
}
return TRUE;
}
static void
-unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
- pe_working_set_t *data_set)
+unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
+ pcmk_scheduler_t *scheduler)
{
const char *resource_discovery_enabled = NULL;
const xmlNode *attrs = NULL;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
return;
@@ -990,7 +1040,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
this_node->details->unseen = FALSE;
}
attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
- add_node_attrs(attrs, this_node, TRUE, data_set);
+ add_node_attrs(attrs, this_node, TRUE, scheduler);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pe__node_name(this_node));
@@ -1003,7 +1053,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
}
if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) ||
- ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) {
+ ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) {
crm_info("%s is in maintenance mode", pe__node_name(this_node));
this_node->details->maintenance = TRUE;
}
@@ -1011,7 +1061,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
if (pe__is_remote_node(this_node)
- && !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ && !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
crm_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
@@ -1033,19 +1083,19 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
* \internal
* \brief Unpack a cluster node's transient attributes
*
- * \param[in] state CIB node state XML
- * \param[in,out] node Cluster node whose attributes are being unpacked
- * \param[in,out] data_set Cluster working set
+ * \param[in] state CIB node state XML
+ * \param[in,out] node Cluster node whose attributes are being unpacked
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_transient_attributes(const xmlNode *state, pe_node_t *node,
- pe_working_set_t *data_set)
+unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS,
FALSE);
- add_node_attrs(attrs, node, TRUE, data_set);
+ add_node_attrs(attrs, node, TRUE, scheduler);
if (crm_is_true(pe_node_attribute_raw(node, "standby"))) {
crm_info("%s is in standby mode", pe__node_name(node));
@@ -1074,15 +1124,15 @@ unpack_transient_attributes(const xmlNode *state, pe_node_t *node,
* resource history inside it. Multiple passes through the status are needed to
* fully unpack everything.
*
- * \param[in] state CIB node state XML
- * \param[in,out] data_set Cluster working set
+ * \param[in] state CIB node state XML
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
+unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *uname = NULL;
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
id = crm_element_value(state, XML_ATTR_ID);
if (id == NULL) {
@@ -1093,15 +1143,21 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
uname = crm_element_value(state, XML_ATTR_UNAME);
if (uname == NULL) {
- crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
- XML_ATTR_UNAME);
- return;
+ /* If a joining peer makes the cluster acquire the quorum from corosync
+ * meanwhile it has not joined CPG membership of pacemaker-controld yet,
+ * it's possible that the created node_state entry doesn't have an uname
+ * yet. We should recognize the node as `pending` and wait for it to
+ * join CPG.
+ */
+ crm_trace("Handling " XML_CIB_TAG_STATE " entry with id=\"%s\" without "
+ XML_ATTR_UNAME, id);
}
- this_node = pe_find_node_any(data_set->nodes, id, uname);
+ this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
- pcmk__config_warn("Ignoring recorded node state for '%s' because "
- "it is no longer in the configuration", uname);
+ pcmk__config_warn("Ignoring recorded node state for id=\"%s\" (%s) "
+ "because it is no longer in the configuration",
+ id, pcmk__s(uname, "uname unknown"));
return;
}
@@ -1116,7 +1172,7 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
return;
}
- unpack_transient_attributes(state, this_node, data_set);
+ unpack_transient_attributes(state, this_node, scheduler);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
@@ -1126,16 +1182,16 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
crm_trace("Determining online status of cluster node %s (id %s)",
pe__node_name(this_node), id);
- determine_online_status(state, this_node, data_set);
+ determine_online_status(state, this_node, scheduler);
- if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& this_node->details->online
- && (data_set->no_quorum_policy == no_quorum_suicide)) {
+ && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
- pe_fence_node(data_set, this_node, "cluster does not have quorum",
+ pe_fence_node(scheduler, this_node, "cluster does not have quorum",
FALSE);
}
}
@@ -1150,16 +1206,16 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
* in another node's history, so it might take multiple passes to unpack
* everything.
*
- * \param[in] status CIB XML status section
- * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
- * \param[in,out] data_set Cluster working set
+ * \param[in] status CIB XML status section
+ * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(const xmlNode *status, bool fence,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
@@ -1169,7 +1225,7 @@ unpack_node_history(const xmlNode *status, bool fence,
const char *id = ID(state);
const char *uname = crm_element_value(state, XML_ATTR_UNAME);
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
@@ -1178,7 +1234,7 @@ unpack_node_history(const xmlNode *status, bool fence,
continue;
}
- this_node = pe_find_node_any(data_set->nodes, id, uname);
+ this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
@@ -1200,10 +1256,10 @@ unpack_node_history(const xmlNode *status, bool fence,
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
- pe_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
- if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED)
- || (rsc->container->role != RSC_ROLE_STARTED)) {
+ if ((rsc == NULL) || (rsc->role != pcmk_role_started)
+ || (rsc->container->role != pcmk_role_started)) {
crm_trace("Not unpacking resource history for guest node %s "
"because container and connection are not known to "
"be up", id);
@@ -1216,11 +1272,11 @@ unpack_node_history(const xmlNode *status, bool fence,
* connection is up, with the exception of when shutdown locks are
* in use.
*/
- pe_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL)
- || (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)
- && (rsc->role != RSC_ROLE_STARTED))) {
+ || (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)
+ && (rsc->role != pcmk_role_started))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
@@ -1231,8 +1287,9 @@ unpack_node_history(const xmlNode *status, bool fence,
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
- } else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled
- |pe_flag_shutdown_lock)
+ } else if (!pcmk_any_flags_set(scheduler->flags,
+ pcmk_sched_fencing_enabled
+ |pcmk_sched_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
@@ -1240,15 +1297,15 @@ unpack_node_history(const xmlNode *status, bool fence,
}
if (pe__is_guest_or_remote_node(this_node)) {
- determine_remote_online_status(data_set, this_node);
- unpack_handle_remote_attrs(this_node, state, data_set);
+ determine_remote_online_status(scheduler, this_node);
+ unpack_handle_remote_attrs(this_node, state, scheduler);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
this_node->details->unpacked = TRUE;
- unpack_node_lrm(this_node, state, data_set);
+ unpack_node_lrm(this_node, state, scheduler);
rc = EAGAIN; // Other node histories might depend on this one
}
@@ -1259,172 +1316,324 @@ unpack_node_history(const xmlNode *status, bool fence,
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
-unpack_status(xmlNode * status, pe_working_set_t * data_set)
+unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status); state != NULL;
state = pcmk__xe_next(state)) {
if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) {
- unpack_tickets_state((xmlNode *) state, data_set);
+ unpack_tickets_state((xmlNode *) state, scheduler);
} else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
- unpack_node_state(state, data_set);
+ unpack_node_state(state, scheduler);
}
}
- while (unpack_node_history(status, FALSE, data_set) == EAGAIN) {
+ while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled),
- data_set);
+ pcmk_is_set(scheduler->flags,
+ pcmk_sched_fencing_enabled),
+ scheduler);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
- if (data_set->stop_needed != NULL) {
- for (GList *item = data_set->stop_needed; item; item = item->next) {
- pe_resource_t *container = item->data;
- pe_node_t *node = pe__current_node(container);
+ if (scheduler->stop_needed != NULL) {
+ for (GList *item = scheduler->stop_needed; item; item = item->next) {
+ pcmk_resource_t *container = item->data;
+ pcmk_node_t *node = pe__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
- g_list_free(data_set->stop_needed);
- data_set->stop_needed = NULL;
+ g_list_free(scheduler->stop_needed);
+ scheduler->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *this_node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *this_node = gIter->data;
if (!pe__is_guest_or_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->details->remote_rsc != NULL)) {
- pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED,
+ pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped,
"remote shutdown");
}
if (!this_node->details->unpacked) {
- determine_remote_online_status(data_set, this_node);
+ determine_remote_online_status(scheduler, this_node);
}
}
return TRUE;
}
+/*!
+ * \internal
+ * \brief Unpack node's time when it became a member at the cluster layer
+ *
+ * \param[in] node_state Node's node_state entry
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return Epoch time when node became a cluster member
+ * (or scheduler effective time for legacy entries) if a member,
+ * 0 if not a member, or -1 if no valid information available
+ */
+static long long
+unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
+{
+ const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
+ int member = 0;
+
+ if (member_time == NULL) {
+ return -1LL;
+
+ } else if (crm_str_to_boolean(member_time, &member) == 1) {
+ /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
+ * recorded as a boolean for a DC < 2.1.7, or the node is pending
+ * shutdown and has left the CPG, in which case it was set to 1 to avoid
+ * fencing for node-pending-timeout.
+ *
+ * We return the effective time for in_ccm=1 because what's important to
+ * avoid fencing is that effective time minus this value is less than
+ * the pending node timeout.
+ */
+ return member? (long long) get_effective_time(scheduler) : 0LL;
+
+ } else {
+ long long when_member = 0LL;
+
+ if ((pcmk__scan_ll(member_time, &when_member,
+ 0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
+ crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
+ " in " XML_CIB_TAG_STATE " entry", member_time);
+ return -1LL;
+ }
+ return when_member;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack node's time when it became online in process group
+ *
+ * \param[in] node_state Node's node_state entry
+ *
+ * \return Epoch time when node became online in process group (or 0 if not
+ * online, or 1 for legacy online entries)
+ */
+static long long
+unpack_node_online(const xmlNode *node_state)
+{
+ const char *peer_time = crm_element_value(node_state, PCMK__XA_CRMD);
+
+ // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
+ if (pcmk__str_eq(peer_time, OFFLINESTATUS,
+ pcmk__str_casei|pcmk__str_null_matches)) {
+ return 0LL;
+
+ } else if (pcmk__str_eq(peer_time, ONLINESTATUS, pcmk__str_casei)) {
+ return 1LL;
+
+ } else {
+ long long when_online = 0LL;
+
+ if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
+ || (when_online < 0)) {
+ crm_warn("Unrecognized value '%s' for " PCMK__XA_CRMD " in "
+ XML_CIB_TAG_STATE " entry, assuming offline", peer_time);
+ return 0LL;
+ }
+ return when_online;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack node attribute for user-requested fencing
+ *
+ * \param[in] node Node to check
+ * \param[in] node_state Node's node_state entry in CIB status
+ *
+ * \return \c true if fencing has been requested for \p node, otherwise \c false
+ */
+static bool
+unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
+{
+ long long value = 0LL;
+ int value_i = 0;
+ const char *value_s = pe_node_attribute_raw(node, PCMK_NODE_ATTR_TERMINATE);
+
+ // Value may be boolean or an epoch time
+ if (crm_str_to_boolean(value_s, &value_i) == 1) {
+ return (value_i != 0);
+ }
+ if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) {
+ return (value > 0);
+ }
+ crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
+ "node attribute for %s", value_s, pe__node_name(node));
+ return false;
+}
+
static gboolean
-determine_online_status_no_fencing(pe_working_set_t *data_set,
+determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
- pe_node_t *this_node)
+ pcmk_node_t *this_node)
{
gboolean online = FALSE;
- const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
- const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
- const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
+ long long when_member = unpack_node_member(node_state, scheduler);
+ long long when_online = unpack_node_online(node_state);
- if (!crm_is_true(in_cluster)) {
- crm_trace("Node is down: in_cluster=%s",
- pcmk__s(in_cluster, "<null>"));
+ if (when_member <= 0) {
+ crm_trace("Node %s is %sdown", pe__node_name(this_node),
+ ((when_member < 0)? "presumed " : ""));
- } else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) {
+ } else if (when_online > 0) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
- crm_debug("Node is not ready to run resources: %s", join);
+ crm_debug("Node %s is not ready to run resources: %s",
+ pe__node_name(this_node), join);
}
} else if (this_node->details->expected_up == FALSE) {
- crm_trace("Controller is down: "
- "in_cluster=%s is_peer=%s join=%s expected=%s",
- pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ crm_trace("Node %s controller is down: "
+ "member@%lld online@%lld join=%s expected=%s",
+ pe__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
} else {
/* mark it unclean */
- pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE);
- crm_info("in_cluster=%s is_peer=%s join=%s expected=%s",
- pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
+ crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
+ pe__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
}
return online;
}
-static gboolean
-determine_online_status_fencing(pe_working_set_t *data_set,
- const xmlNode *node_state, pe_node_t *this_node)
+/*!
+ * \internal
+ * \brief Check whether a node has taken too long to join controller group
+ *
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] node Node to check
+ * \param[in] when_member Epoch time when node became a cluster member
+ * \param[in] when_online Epoch time when node joined controller group
+ *
+ * \return true if node has been pending (on the way up) longer than
+ * node-pending-timeout, otherwise false
+ * \note This will also update the cluster's recheck time if appropriate.
+ */
+static inline bool
+pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
+ long long when_member, long long when_online)
{
- gboolean online = FALSE;
- gboolean do_terminate = FALSE;
- bool crmd_online = FALSE;
- const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
- const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
- const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
- const char *terminate = pe_node_attribute_raw(this_node, "terminate");
-
-/*
- - XML_NODE_IN_CLUSTER ::= true|false
- - XML_NODE_IS_PEER ::= online|offline
- - XML_NODE_JOIN_STATE ::= member|down|pending|banned
- - XML_NODE_EXPECTED ::= member|down
-*/
+ if ((scheduler->node_pending_timeout > 0)
+ && (when_member > 0) && (when_online <= 0)) {
+ // There is a timeout on pending nodes, and node is pending
- if (crm_is_true(terminate)) {
- do_terminate = TRUE;
+ time_t timeout = when_member + scheduler->node_pending_timeout;
- } else if (terminate != NULL && strlen(terminate) > 0) {
- /* could be a time() value */
- char t = terminate[0];
-
- if (t != '0' && isdigit(t)) {
- do_terminate = TRUE;
+ if (get_effective_time(node->details->data_set) >= timeout) {
+ return true; // Node has timed out
}
+
+ // Node is pending, but still has time
+ pe__update_recheck_time(timeout, scheduler, "pending node timeout");
}
+ return false;
+}
+
+static bool
+determine_online_status_fencing(pcmk_scheduler_t *scheduler,
+ const xmlNode *node_state,
+ pcmk_node_t *this_node)
+{
+ bool termination_requested = unpack_node_terminate(this_node, node_state);
+ const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
+ long long when_member = unpack_node_member(node_state, scheduler);
+ long long when_online = unpack_node_online(node_state);
+
+/*
+ - PCMK__XA_JOIN ::= member|down|pending|banned
+ - PCMK__XA_EXPECTED ::= member|down
- crm_trace("%s: in_cluster=%s is_peer=%s join=%s expected=%s term=%d",
- pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
- pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
- pcmk__s(exp_state, "<null>"), do_terminate);
+ @COMPAT with entries recorded for DCs < 2.1.7
+ - PCMK__XA_IN_CCM ::= true|false
+ - PCMK__XA_CRMD ::= online|offline
- online = crm_is_true(in_cluster);
- crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei);
- if (exp_state == NULL) {
- exp_state = CRMD_JOINSTATE_DOWN;
- }
+ Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
+ - PCMK__XA_IN_CCM ::= <timestamp>|0
+ Since when node has been a cluster member. A value 0 of means the node is not
+ a cluster member.
+
+ - PCMK__XA_CRMD ::= <timestamp>|0
+ Since when peer has been online in CPG. A value 0 means the peer is offline
+ in CPG.
+*/
+
+ crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
+ pe__node_name(this_node), when_member, when_online,
+ pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
+ (termination_requested? " (termination requested)" : ""));
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pe__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
- online = crmd_online;
+ return (when_online > 0);
+ }
- } else if (in_cluster == NULL) {
- pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE);
+ if (when_member < 0) {
+ pe_fence_node(scheduler, this_node,
+ "peer has not been seen by the cluster", FALSE);
+ return false;
+ }
- } else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) {
- pe_fence_node(data_set, this_node,
+ if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
+ pe_fence_node(scheduler, this_node,
"peer failed Pacemaker membership criteria", FALSE);
- } else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) {
+ } else if (termination_requested) {
+ if ((when_member <= 0) && (when_online <= 0)
+ && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
+ crm_info("%s was fenced as requested", pe__node_name(this_node));
+ return false;
+ }
+ pe_fence_node(scheduler, this_node, "fencing was requested", false);
+
+ } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
+ pcmk__str_null_matches)) {
- if (crm_is_true(in_cluster) || crmd_online) {
+ if (pending_too_long(scheduler, this_node, when_member, when_online)) {
+ pe_fence_node(scheduler, this_node,
+ "peer pending timed out on joining the process group",
+ FALSE);
+
+ } else if ((when_member > 0) || (when_online > 0)) {
crm_info("- %s is not ready to run resources",
pe__node_name(this_node));
this_node->details->standby = TRUE;
@@ -1435,48 +1644,41 @@ determine_online_status_fencing(pe_working_set_t *data_set,
pe__node_name(this_node));
}
- } else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei)
- && crm_is_true(in_cluster) == FALSE && !crmd_online) {
- crm_info("%s was just shot", pe__node_name(this_node));
- online = FALSE;
-
- } else if (crm_is_true(in_cluster) == FALSE) {
+ } else if (when_member <= 0) {
// Consider `priority-fencing-delay` for lost nodes
- pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE);
+ pe_fence_node(scheduler, this_node,
+ "peer is no longer part of the cluster", TRUE);
- } else if (!crmd_online) {
- pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE);
+ } else if (when_online <= 0) {
+ pe_fence_node(scheduler, this_node,
+ "peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
- } else if (do_terminate) {
- pe_fence_node(data_set, this_node, "termination was requested", FALSE);
- } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
crm_info("%s is active", pe__node_name(this_node));
- } else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) {
+ } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
+ CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources", pe__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
- pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE);
- crm_warn("%s: in-cluster=%s is-peer=%s join=%s expected=%s term=%d shutdown=%d",
- pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
- pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
- pcmk__s(exp_state, "<null>"), do_terminate,
- this_node->details->shutdown);
+ pe_fence_node(scheduler, this_node, "peer was in an unknown state",
+ FALSE);
}
- return online;
+ return (when_member > 0);
}
static void
-determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node)
+determine_remote_online_status(pcmk_scheduler_t *scheduler,
+ pcmk_node_t *this_node)
{
- pe_resource_t *rsc = this_node->details->remote_rsc;
- pe_resource_t *container = NULL;
- pe_node_t *host = NULL;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *container = NULL;
+ pcmk_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
@@ -1494,33 +1696,36 @@ determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_nod
}
/* If the resource is currently started, mark it online. */
- if (rsc->role == RSC_ROLE_STARTED) {
+ if (rsc->role == pcmk_role_started) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
- if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
+ if ((rsc->role == pcmk_role_started)
+ && (rsc->next_role == pcmk_role_stopped)) {
+
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
- if(container && pcmk_is_set(container->flags, pe_rsc_failed)) {
+ if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
- } else if (rsc->role == RSC_ROLE_STOPPED
- || (container && container->role == RSC_ROLE_STOPPED)) {
+ } else if ((rsc->role == pcmk_role_stopped)
+ || ((container != NULL)
+ && (container->role == pcmk_role_stopped))) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
@@ -1541,11 +1746,11 @@ remote_online_done:
}
static void
-determine_online_status(const xmlNode *node_state, pe_node_t *this_node,
- pe_working_set_t *data_set)
+determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
+ pcmk_scheduler_t *scheduler)
{
gboolean online = FALSE;
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
CRM_CHECK(this_node != NULL, return);
@@ -1566,11 +1771,13 @@ determine_online_status(const xmlNode *node_state, pe_node_t *this_node,
* Anyone caught abusing this logic will be shot
*/
- } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- online = determine_online_status_no_fencing(data_set, node_state, this_node);
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ online = determine_online_status_no_fencing(scheduler, node_state,
+ this_node);
} else {
- online = determine_online_status_fencing(data_set, node_state, this_node);
+ online = determine_online_status_fencing(scheduler, node_state,
+ this_node);
}
if (online) {
@@ -1692,30 +1899,30 @@ clone_zero(const char *last_rsc_id)
return zero;
}
-static pe_resource_t *
+static pcmk_resource_t *
create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
copy_in_properties(xml_rsc, rsc_entry);
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
- if (pe__unpack_resource(xml_rsc, &rsc, NULL, data_set) != pcmk_rc_ok) {
+ if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
- pe_node_t *node;
+ pcmk_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
- node = pe_find_node(data_set->nodes, rsc_id);
+ node = pe_find_node(scheduler->nodes, rsc_id);
if (node == NULL) {
- node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
+ node = pe_create_node(rsc_id, rsc_id, "remote", NULL, scheduler);
}
- link_rsc2remotenode(data_set, rsc);
+ link_rsc2remotenode(scheduler, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
@@ -1726,10 +1933,10 @@ create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
- pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler);
+ pe__set_resource_flags(rsc, pcmk_rsc_removed_filler);
}
- pe__set_resource_flags(rsc, pe_rsc_orphan);
- data_set->resources = g_list_append(data_set->resources, rsc);
+ pe__set_resource_flags(rsc, pcmk_rsc_removed);
+ scheduler->resources = g_list_append(scheduler->resources, rsc);
return rsc;
}
@@ -1737,21 +1944,22 @@ create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
* \internal
* \brief Create orphan instance for anonymous clone resource history
*
- * \param[in,out] parent Clone resource that orphan will be added to
- * \param[in] rsc_id Orphan's resource ID
- * \param[in] node Where orphan is active (for logging only)
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] parent Clone resource that orphan will be added to
+ * \param[in] rsc_id Orphan's resource ID
+ * \param[in] node Where orphan is active (for logging only)
+ * \param[in,out] scheduler Scheduler data
*
* \return Newly added orphaned instance of \p parent
*/
-static pe_resource_t *
-create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
- const pe_node_t *node, pe_working_set_t *data_set)
+static pcmk_resource_t *
+create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *top = pe__create_clone_child(parent, data_set);
+ pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
// find_rsc() because we might be a cloned group
- pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
+ pcmk_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL,
+ pcmk_rsc_match_clone_only);
pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pe__node_name(node));
@@ -1767,30 +1975,30 @@ create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
* (2) an inactive instance (i.e. within the total of clone-max instances);
* (3) a newly created orphan (i.e. clone-max instances are already active).
*
- * \param[in,out] data_set Cluster information
- * \param[in] node Node on which to check for instance
- * \param[in,out] parent Clone to check
- * \param[in] rsc_id Name of cloned resource in history (without instance)
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] node Node on which to check for instance
+ * \param[in,out] parent Clone to check
+ * \param[in] rsc_id Name of cloned resource in history (no instance)
*/
-static pe_resource_t *
-find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
- pe_resource_t *parent, const char *rsc_id)
+static pcmk_resource_t *
+find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
+ pcmk_resource_t *parent, const char *rsc_id)
{
GList *rIter = NULL;
- pe_resource_t *rsc = NULL;
- pe_resource_t *inactive_instance = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(parent != NULL);
CRM_ASSERT(pe_rsc_is_clone(parent));
- CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique));
+ CRM_ASSERT(!pcmk_is_set(parent->flags, pcmk_rsc_unique));
// Check for active (or partially active, for cloned groups) instance
pe_rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pe__node_name(node), parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GList *locations = NULL;
- pe_resource_t *child = rIter->data;
+ pcmk_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
@@ -1804,8 +2012,8 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if globally-unique
* was flipped from true to false); and
- * (3) when we re-run calculations on the same data set as part of a
- * simulation.
+ * (3) when we re-run calculations on the same scheduler data as part of
+ * a simulation.
*/
child->fns->location(child, &locations, 2);
if (locations) {
@@ -1815,7 +2023,7 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
*/
CRM_LOG_ASSERT(locations->next == NULL);
- if (((pe_node_t *)locations->data)->details == node->details) {
+ if (((pcmk_node_t *) locations->data)->details == node->details) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
@@ -1823,7 +2031,8 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
*
* If the history entry is orphaned, rsc will be NULL.
*/
- rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
+ rsc = parent->fns->find_rsc(child, rsc_id, NULL,
+ pcmk_rsc_match_clone_only);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
@@ -1848,10 +2057,10 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
} else {
pe_rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
- && !pcmk_is_set(child->flags, pe_rsc_block)) {
+ && !pcmk_is_set(child->flags, pcmk_rsc_blocked)) {
// Remember one inactive instance in case we don't find active
inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
- pe_find_clone);
+ pcmk_rsc_match_clone_only);
/* ... but don't use it if it was already associated with a
* pending action on another node
@@ -1881,30 +2090,30 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
- if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pe__is_guest_node(node)
- && !pe__is_universal_clone(parent, data_set)) {
+ && !pe__is_universal_clone(parent, scheduler)) {
rsc = NULL;
}
if (rsc == NULL) {
- rsc = create_anonymous_orphan(parent, rsc_id, node, data_set);
+ rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
pe_rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
-static pe_resource_t *
-unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
+static pcmk_resource_t *
+unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
const char *rsc_id)
{
- pe_resource_t *rsc = NULL;
- pe_resource_t *parent = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
- rsc = pe_find_resource(data_set->resources, rsc_id);
+ rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
@@ -1912,9 +2121,10 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
* a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
- pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id);
+ pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources,
+ clone0_id);
- if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) {
+ if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
@@ -1924,7 +2134,7 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
}
free(clone0_id);
- } else if (rsc->variant > pe_native) {
+ } else if (rsc->variant > pcmk_rsc_variant_primitive) {
crm_trace("Resource history for %s is orphaned because it is no longer primitive",
rsc_id);
return NULL;
@@ -1940,7 +2150,7 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
} else {
char *base = clone_strip(rsc_id);
- rsc = find_anonymous_clone(data_set, node, parent, base);
+ rsc = find_anonymous_clone(scheduler, node, parent, base);
free(base);
CRM_ASSERT(rsc != NULL);
}
@@ -1952,42 +2162,43 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
pcmk__str_update(&rsc->clone_name, rsc_id);
pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pe__node_name(node), rsc->id,
- (pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : ""));
+ (pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : ""));
}
return rsc;
}
-static pe_resource_t *
-process_orphan_resource(const xmlNode *rsc_entry, const pe_node_t *node,
- pe_working_set_t *data_set)
+static pcmk_resource_t *
+process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
crm_debug("Detected orphan resource %s on %s", rsc_id, pe__node_name(node));
- rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
+ rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
if (rsc == NULL) {
return NULL;
}
- if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pe_rsc_trace(rsc, "Added orphan %s", rsc->id);
- resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set);
+ resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__",
+ scheduler);
}
return rsc;
}
static void
-process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
+process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
enum action_fail_response on_fail)
{
- pe_node_t *tmpnode = NULL;
+ pcmk_node_t *tmpnode = NULL;
char *reason = NULL;
- enum action_fail_response save_on_fail = action_fail_ignore;
+ enum action_fail_response save_on_fail = pcmk_on_fail_ignore;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
@@ -1995,12 +2206,12 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
fail2text(on_fail));
/* process current state */
- if (rsc->role != RSC_ROLE_UNKNOWN) {
- pe_resource_t *iter = rsc;
+ if (rsc->role != pcmk_role_unknown) {
+ pcmk_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
- pe_node_t *n = pe__copy_node(node);
+ pcmk_node_t *n = pe__copy_node(node);
pe_rsc_trace(rsc, "%s%s%s known on %s",
rsc->id,
@@ -2009,7 +2220,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
pe__node_name(n));
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
- if (pcmk_is_set(iter->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) {
break;
}
iter = iter->parent;
@@ -2017,10 +2228,10 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
/* If a managed resource is believed to be running, but node is down ... */
- if (rsc->role > RSC_ROLE_STOPPED
+ if ((rsc->role > pcmk_role_stopped)
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
- && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
gboolean should_fence = FALSE;
@@ -2032,12 +2243,15 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
* resource to run again once we are sure we know its state.
*/
if (pe__is_guest_node(node)) {
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
should_fence = TRUE;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
if (pe__is_remote_node(node) && node->details->remote_rsc
- && !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
+ && !pcmk_is_set(node->details->remote_rsc->flags,
+ pcmk_rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
@@ -2070,20 +2284,20 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
- on_fail = action_fail_ignore;
+ on_fail = pcmk_on_fail_ignore;
}
switch (on_fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
/* nothing to do */
break;
- case action_fail_demote:
- pe__set_resource_flags(rsc, pe_rsc_failed);
+ case pcmk_on_fail_demote:
+ pe__set_resource_flags(rsc, pcmk_rsc_failed);
demote_action(rsc, node, FALSE);
break;
- case action_fail_fence:
+ case pcmk_on_fail_fence_node:
/* treat it as if it is still running
* but also mark the node as unclean
*/
@@ -2092,20 +2306,20 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
free(reason);
break;
- case action_fail_standby:
+ case pcmk_on_fail_standby_node:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
- case action_fail_block:
+ case pcmk_on_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
break;
- case action_fail_migrate:
+ case pcmk_on_fail_ban:
/* make sure it comes up somewhere else
* or not at all
*/
@@ -2113,19 +2327,22 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
rsc->cluster);
break;
- case action_fail_stop:
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop");
+ case pcmk_on_fail_stop:
+ pe__set_next_role(rsc, pcmk_role_stopped, "on-fail=stop");
break;
- case action_fail_recover:
- if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ case pcmk_on_fail_restart:
+ if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
stop_action(rsc, node, FALSE);
}
break;
- case action_fail_restart_container:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ case pcmk_on_fail_restart_container:
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if (rsc->container && pe_rsc_is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
@@ -2136,14 +2353,16 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
g_list_prepend(rsc->cluster->stop_needed, rsc->container);
} else if (rsc->container) {
stop_action(rsc->container, node, FALSE);
- } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
+ } else if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
stop_action(rsc, node, FALSE);
}
break;
- case action_fail_reset_remote:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ case pcmk_on_fail_reset_remote:
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
@@ -2161,14 +2380,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
/* require the stop action regardless if fencing is occurring or not. */
- if (rsc->role > RSC_ROLE_STOPPED) {
+ if (rsc->role > pcmk_role_stopped) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_ms) {
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset");
+ pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
}
break;
}
@@ -2177,16 +2396,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
- if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) {
tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
- if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__config_warn("Detected active orphan %s running on %s",
rsc->id, pe__node_name(node));
} else {
@@ -2198,16 +2418,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
native_add_running(rsc, node, rsc->cluster,
- (save_on_fail != action_fail_ignore));
+ (save_on_fail != pcmk_on_fail_ignore));
switch (on_fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
break;
- case action_fail_demote:
- case action_fail_block:
- pe__set_resource_flags(rsc, pe_rsc_failed);
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_block:
+ pe__set_resource_flags(rsc, pcmk_rsc_failed);
break;
default:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
break;
}
@@ -2220,14 +2441,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
rsc->clone_name = NULL;
} else {
- GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP,
- FALSE);
+ GList *possible_matches = pe__resource_actions(rsc, node,
+ PCMK_ACTION_STOP, FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_t *stop = (pe_action_t *) gIter->data;
+ pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
- pe__set_action_flags(stop, pe_action_optional);
+ pe__set_action_flags(stop, pcmk_action_optional);
}
g_list_free(possible_matches);
@@ -2236,21 +2457,21 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
- if (rsc->role == RSC_ROLE_STOPPED
+ if ((rsc->role == pcmk_role_stopped)
&& rsc->partial_migration_source
&& rsc->partial_migration_source->details == node->details
&& rsc->partial_migration_target
&& rsc->running_on) {
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
/* create active recurring operations as optional */
static void
-process_recurring(pe_node_t * node, pe_resource_t * rsc,
+process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
int start_index, int stop_index,
- GList *sorted_op_list, pe_working_set_t * data_set)
+ GList *sorted_op_list, pcmk_scheduler_t *scheduler)
{
int counter = -1;
const char *task = NULL;
@@ -2303,7 +2524,7 @@ process_recurring(pe_node_t * node, pe_resource_t * rsc,
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pe_rsc_trace(rsc, "Creating %s on %s", key, pe__node_name(node));
- custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
+ custom_action(rsc, key, task, node, TRUE, scheduler);
}
}
@@ -2328,20 +2549,24 @@ calculate_active_ops(const GList *sorted_op_list, int *start_index,
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
- if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
- } else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) {
+ } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
*start_index = counter;
- } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ } else if ((implied_monitor_start <= *stop_index)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
- } else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) {
+ } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
@@ -2357,26 +2582,26 @@ calculate_active_ops(const GList *sorted_op_list, int *start_index,
// If resource history entry has shutdown lock, remember lock node and time
static void
-unpack_shutdown_lock(const xmlNode *rsc_entry, pe_resource_t *rsc,
- const pe_node_t *node, pe_working_set_t *data_set)
+unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
- if ((data_set->shutdown_lock > 0)
- && (get_effective_time(data_set)
- > (lock_time + data_set->shutdown_lock))) {
+ if ((scheduler->shutdown_lock > 0)
+ && (get_effective_time(scheduler)
+ > (lock_time + scheduler->shutdown_lock))) {
pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pe__node_name(node));
- pe__clear_resource_history(rsc, node, data_set);
+ pe__clear_resource_history(rsc, node);
} else {
/* @COMPAT I don't like breaking const signatures, but
* rsc->lock_node should really be const -- we just can't change it
* until the next API compatibility break.
*/
- rsc->lock_node = (pe_node_t *) node;
+ rsc->lock_node = (pcmk_node_t *) node;
rsc->lock_time = lock_time;
}
}
@@ -2388,30 +2613,30 @@ unpack_shutdown_lock(const xmlNode *rsc_entry, pe_resource_t *rsc,
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] rsc_entry lrm_resource XML being unpacked
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
-static pe_resource_t *
-unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
- pe_working_set_t *data_set)
+static pcmk_resource_t *
+unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
+ pcmk_scheduler_t *scheduler)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
- enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e req_role = pcmk_role_unknown;
const char *rsc_id = ID(lrm_resource);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
- enum action_fail_response on_fail = action_fail_ignore;
- enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
+ enum action_fail_response on_fail = pcmk_on_fail_ignore;
+ enum rsc_role_e saved_role = pcmk_role_unknown;
if (rsc_id == NULL) {
crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE
@@ -2428,7 +2653,7 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
op_list = g_list_prepend(op_list, rsc_op);
}
- if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
@@ -2436,25 +2661,25 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
}
/* find the resource */
- rsc = unpack_find_resource(data_set, node, rsc_id);
+ rsc = unpack_find_resource(scheduler, node, rsc_id);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
- rsc = process_orphan_resource(lrm_resource, node, data_set);
+ rsc = process_orphan_resource(lrm_resource, node, scheduler);
}
}
CRM_ASSERT(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
- if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
- unpack_shutdown_lock(lrm_resource, rsc, node, data_set);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
+ unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
}
/* process operations */
saved_role = rsc->role;
- rsc->role = RSC_ROLE_UNKNOWN;
+ rsc->role = pcmk_role_unknown;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
@@ -2465,7 +2690,8 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
- process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
+ process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
+ scheduler);
/* no need to free the contents */
g_list_free(sorted_op_list);
@@ -2473,7 +2699,9 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
process_rsc_state(rsc, node, on_fail);
if (get_target_role(rsc, &req_role)) {
- if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
+ if ((rsc->next_role == pcmk_role_unknown)
+ || (req_role < rsc->next_role)) {
+
pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE);
} else if (req_role > rsc->next_role) {
@@ -2492,13 +2720,13 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
static void
handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list);
rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
- pe_resource_t *rsc;
- pe_resource_t *container;
+ pcmk_resource_t *rsc;
+ pcmk_resource_t *container;
const char *rsc_id;
const char *container_id;
@@ -2512,15 +2740,14 @@ handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
continue;
}
- container = pe_find_resource(data_set->resources, container_id);
+ container = pe_find_resource(scheduler->resources, container_id);
if (container == NULL) {
continue;
}
- rsc = pe_find_resource(data_set->resources, rsc_id);
- if (rsc == NULL ||
- !pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) ||
- rsc->container != NULL) {
+ rsc = pe_find_resource(scheduler->resources, rsc_id);
+ if ((rsc == NULL) || (rsc->container != NULL)
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
continue;
}
@@ -2535,12 +2762,13 @@ handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
* \internal
* \brief Unpack one node's lrm status section
*
- * \param[in,out] node Node whose status is being unpacked
- * \param[in] xml CIB node state XML
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] node Node whose status is being unpacked
+ * \param[in] xml CIB node state XML
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
+unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
+ pcmk_scheduler_t *scheduler)
{
bool found_orphaned_container_filler = false;
@@ -2558,10 +2786,10 @@ unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
for (const xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
- pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set);
+ pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
if ((rsc != NULL)
- && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
found_orphaned_container_filler = true;
}
}
@@ -2570,26 +2798,26 @@ unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
* orphaned container fillers to their container resource.
*/
if (found_orphaned_container_filler) {
- handle_orphaned_container_fillers(xml, data_set);
+ handle_orphaned_container_fillers(xml, scheduler);
}
}
static void
-set_active(pe_resource_t * rsc)
+set_active(pcmk_resource_t *rsc)
{
- const pe_resource_t *top = pe__const_top_resource(rsc, false);
+ const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
- if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) {
- rsc->role = RSC_ROLE_UNPROMOTED;
+ if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) {
+ rsc->role = pcmk_role_unpromoted;
} else {
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
- pe_node_t *node = value;
+ pcmk_node_t *node = value;
int *score = user_data;
node->weight = *score;
@@ -2604,7 +2832,7 @@ set_node_score(gpointer key, gpointer value, gpointer user_data)
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
- int target_rc, pe_working_set_t *data_set)
+ int target_rc, pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
@@ -2620,12 +2848,13 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
NULL);
/* Need to check against transition_magic too? */
- if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATE) == 0)) {
+ if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_TARGET "='", source, "']",
NULL);
- } else if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATED) == 0)) {
+ } else if ((source != NULL)
+ && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_SOURCE "='", source, "']",
NULL);
@@ -2633,7 +2862,7 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
g_string_append_c(xpath, ']');
}
- xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
@@ -2652,7 +2881,7 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
@@ -2665,7 +2894,7 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc_id, "']",
NULL);
- xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
@@ -2682,7 +2911,7 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
* \return true if \p rsc_id is unknown on \p node_name, otherwise false
*/
static bool
-unknown_on_node(pe_resource_t *rsc, const char *node_name)
+unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
{
bool result = false;
xmlXPathObjectPtr search;
@@ -2708,20 +2937,20 @@ unknown_on_node(pe_resource_t *rsc, const char *node_name)
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
- xmlNode *monitor = find_lrm_op(rsc_id, CRMD_ACTION_STATUS, node_name,
- NULL, PCMK_OCF_NOT_RUNNING, data_set);
+ xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
+ NULL, PCMK_OCF_NOT_RUNNING, scheduler);
return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
}
@@ -2730,22 +2959,22 @@ monitor_not_running_after(const char *rsc_id, const char *node_name,
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
- * \param[in] rsc_id Resource being checked
- * \param[in] node_name Node being checked
- * \param[in] xml_op Event that non-monitor is being compared to
- * \param[in] same_node Whether the operations are on the same node
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] xml_op Event that non-monitor is being compared to
+ * \param[in] same_node Whether the operations are on the same node
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
xmlNode *lrm_resource = NULL;
- lrm_resource = find_lrm_resource(rsc_id, node_name, data_set);
+ lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
if (lrm_resource == NULL) {
return false;
}
@@ -2760,8 +2989,9 @@ non_monitor_after(const char *rsc_id, const char *node_name,
task = crm_element_value(op, XML_LRM_ATTR_TASK);
- if (pcmk__str_any_of(task, CRMD_ACTION_START, CRMD_ACTION_STOP,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)
+ if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
+ NULL)
&& pe__is_newer_op(op, xml_op, same_node) > 0) {
return true;
}
@@ -2774,11 +3004,11 @@ non_monitor_after(const char *rsc_id, const char *node_name,
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
- * \param[in] rsc_id Resource being checked
- * \param[in] node_name Node being checked
- * \param[in] migrate_to Any migrate_to event that is being compared to
- * \param[in] migrate_from Any migrate_from event that is being compared to
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] migrate_to Any migrate_to event that is being compared to
+ * \param[in] migrate_from Any migrate_from event that is being compared to
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
@@ -2786,7 +3016,7 @@ static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
const xmlNode *migrate_to,
const xmlNode *migrate_from,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const xmlNode *xml_op = migrate_to;
const char *source = NULL;
@@ -2826,9 +3056,9 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
- return non_monitor_after(rsc_id, node_name, xml_op, same_node, data_set)
+ return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler)
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
- data_set);
+ scheduler);
}
/*!
@@ -2844,8 +3074,8 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
* \return Standard Pacemaker return code
*/
static int
-get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
- const pe_node_t *target_node,
+get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
+ const pcmk_node_t *target_node,
const char **source_name, const char **target_name)
{
*source_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_SOURCE);
@@ -2891,11 +3121,11 @@ get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
* \param[in] node Migration source
*/
static void
-add_dangling_migration(pe_resource_t *rsc, const pe_node_t *node)
+add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
pe_rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
rsc->id, pe__node_name(node));
- rsc->role = RSC_ROLE_STOPPED;
+ rsc->role = pcmk_role_stopped;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
(gpointer) node);
}
@@ -2942,7 +3172,7 @@ unpack_migrate_to_success(struct action_history *history)
*/
int from_rc = PCMK_OCF_OK;
int from_status = PCMK_EXEC_PENDING;
- pe_node_t *target_node = NULL;
+ pcmk_node_t *target_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
@@ -2961,8 +3191,8 @@ unpack_migrate_to_success(struct action_history *history)
true, history->rsc->cluster);
// Check for a migrate_from action from this source on the target
- migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED, target,
- source, -1, history->rsc->cluster);
+ migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
+ target, source, -1, history->rsc->cluster);
if (migrate_from != NULL) {
if (source_newer_op) {
/* There's a newer non-monitor operation on the source and a
@@ -2998,7 +3228,7 @@ unpack_migrate_to_success(struct action_history *history)
/* Without newer state, this migrate_to implies the resource is active.
* (Clones are not allowed to migrate, so role can't be promoted.)
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
target_node = pe_find_node(history->rsc->cluster->nodes, target);
active_on_target = !target_newer_state && (target_node != NULL)
@@ -3010,8 +3240,9 @@ unpack_migrate_to_success(struct action_history *history)
TRUE);
} else {
// Mark resource as failed, require recovery, and prevent migration
- pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
- pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(history->rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable);
}
return;
}
@@ -3028,8 +3259,8 @@ unpack_migrate_to_success(struct action_history *history)
}
if (active_on_target) {
- pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
- source);
+ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
native_add_running(history->rsc, target_node, history->rsc->cluster,
FALSE);
@@ -3046,8 +3277,9 @@ unpack_migrate_to_success(struct action_history *history)
} else if (!source_newer_op) {
// Mark resource as failed, require recovery, and prevent migration
- pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
- pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(history->rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable);
}
}
@@ -3073,12 +3305,12 @@ unpack_migrate_to_failure(struct action_history *history)
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
// Check for migrate_from on the target
- target_migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED,
- target, source, PCMK_OCF_OK,
- history->rsc->cluster);
+ target_migrate_from = find_lrm_op(history->rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, target, source,
+ PCMK_OCF_OK, history->rsc->cluster);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
@@ -3096,8 +3328,8 @@ unpack_migrate_to_failure(struct action_history *history)
* active there.
* (if it is up).
*/
- pe_node_t *target_node = pe_find_node(history->rsc->cluster->nodes,
- target);
+ pcmk_node_t *target_node = pe_find_node(history->rsc->cluster->nodes,
+ target);
if (target_node && target_node->details->online) {
native_add_running(history->rsc, target_node, history->rsc->cluster,
@@ -3140,10 +3372,10 @@ unpack_migrate_from_failure(struct action_history *history)
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
// Check for a migrate_to on the source
- source_migrate_to = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATE,
+ source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
source, target, PCMK_OCF_OK,
history->rsc->cluster);
@@ -3162,8 +3394,8 @@ unpack_migrate_from_failure(struct action_history *history)
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
- pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
- source);
+ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
if (source_node && source_node->details->online) {
native_add_running(history->rsc, source_node, history->rsc->cluster,
@@ -3250,38 +3482,38 @@ static int
cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
{
switch (first) {
- case action_fail_demote:
+ case pcmk_on_fail_demote:
switch (second) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
return 1;
- case action_fail_demote:
+ case pcmk_on_fail_demote:
return 0;
default:
return -1;
}
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
switch (second) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
return 1;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
return 0;
default:
return -1;
}
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
switch (second) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_reset_remote:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_reset_remote:
return 1;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
return 0;
default:
return -1;
@@ -3292,26 +3524,26 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
break;
}
switch (second) {
- case action_fail_demote:
- return (first == action_fail_ignore)? -1 : 1;
+ case pcmk_on_fail_demote:
+ return (first == pcmk_on_fail_ignore)? -1 : 1;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
switch (first) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
return -1;
default:
return 1;
}
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
switch (first) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_reset_remote:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_reset_remote:
return -1;
default:
return 1;
@@ -3331,13 +3563,13 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
* \param[in,out] rsc Resource to ban
*/
static void
-ban_from_all_nodes(pe_resource_t *rsc)
+ban_from_all_nodes(pcmk_resource_t *rsc)
{
int score = -INFINITY;
- pe_resource_t *fail_rsc = rsc;
+ pcmk_resource_t *fail_rsc = rsc;
if (fail_rsc->parent != NULL) {
- pe_resource_t *parent = uber_parent(fail_rsc);
+ pcmk_resource_t *parent = uber_parent(fail_rsc);
if (pe_rsc_is_anon_clone(parent)) {
/* For anonymous clones, if an operation with on-fail=stop fails for
@@ -3358,18 +3590,50 @@ ban_from_all_nodes(pe_resource_t *rsc)
/*!
* \internal
+ * \brief Get configured failure handling and role after failure for an action
+ *
+ * \param[in,out] history Unpacked action history entry
+ * \param[out] on_fail Where to set configured failure handling
+ * \param[out] fail_role Where to set to role after failure
+ */
+static void
+unpack_failure_handling(struct action_history *history,
+ enum action_fail_response *on_fail,
+ enum rsc_role_e *fail_role)
+{
+ xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
+ history->interval_ms, true);
+
+ GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
+ history->task,
+ history->interval_ms, config);
+
+ const char *on_fail_str = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+
+ *on_fail = pcmk__parse_on_fail(history->rsc, history->task,
+ history->interval_ms, on_fail_str);
+ *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
+ meta);
+ g_hash_table_destroy(meta);
+}
+
+/*!
+ * \internal
* \brief Update resource role, failure handling, etc., after a failed action
*
- * \param[in,out] history Parsed action result history
- * \param[out] last_failure Set this to action XML
- * \param[in,out] on_fail What should be done about the result
+ * \param[in,out] history Parsed action result history
+ * \param[in] config_on_fail Action failure handling from configuration
+ * \param[in] fail_role Resource's role after failure of this action
+ * \param[out] last_failure This will be set to the history XML
+ * \param[in,out] on_fail Actual handling of action result
*/
static void
-unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
+unpack_rsc_op_failure(struct action_history *history,
+ enum action_fail_response config_on_fail,
+ enum rsc_role_e fail_role, xmlNode **last_failure,
enum action_fail_response *on_fail)
{
bool is_probe = false;
- pe_action_t *action = NULL;
char *last_change_s = NULL;
*last_failure = history->xml;
@@ -3377,7 +3641,7 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
is_probe = pcmk_xe_is_probe(history->xml);
last_change_s = last_change_str(history->xml);
- if (!pcmk_is_set(history->rsc->cluster->flags, pe_flag_symmetric_cluster)
+ if (!pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_symmetric_cluster)
&& (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
@@ -3414,36 +3678,34 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
free(last_change_s);
- action = custom_action(history->rsc, strdup(history->key), history->task,
- NULL, TRUE, FALSE, history->rsc->cluster);
- if (cmp_on_fail(*on_fail, action->on_fail) < 0) {
- pe_rsc_trace(history->rsc, "on-fail %s -> %s for %s (%s)",
- fail2text(*on_fail), fail2text(action->on_fail),
- action->uuid, history->key);
- *on_fail = action->on_fail;
+ if (cmp_on_fail(*on_fail, config_on_fail) < 0) {
+ pe_rsc_trace(history->rsc, "on-fail %s -> %s for %s",
+ fail2text(*on_fail), fail2text(config_on_fail),
+ history->key);
+ *on_fail = config_on_fail;
}
- if (strcmp(history->task, CRMD_ACTION_STOP) == 0) {
+ if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
resource_location(history->rsc, history->node, -INFINITY,
"__stop_fail__", history->rsc->cluster);
- } else if (strcmp(history->task, CRMD_ACTION_MIGRATE) == 0) {
+ } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
unpack_migrate_to_failure(history);
- } else if (strcmp(history->task, CRMD_ACTION_MIGRATED) == 0) {
+ } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
unpack_migrate_from_failure(history);
- } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
+ history->rsc->role = pcmk_role_promoted;
- } else if (strcmp(history->task, CRMD_ACTION_DEMOTE) == 0) {
- if (action->on_fail == action_fail_block) {
- history->rsc->role = RSC_ROLE_PROMOTED;
- pe__set_next_role(history->rsc, RSC_ROLE_STOPPED,
+ } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
+ if (config_on_fail == pcmk_on_fail_block) {
+ history->rsc->role = pcmk_role_promoted;
+ pe__set_next_role(history->rsc, pcmk_role_stopped,
"demote with on-fail=block");
} else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
} else {
/* Staying in the promoted role would put the scheduler and
@@ -3451,16 +3713,16 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
- history->rsc->role = RSC_ROLE_UNPROMOTED;
+ history->rsc->role = pcmk_role_unpromoted;
}
}
if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
/* leave stopped */
pe_rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
- } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ } else if (history->rsc->role < pcmk_role_started) {
pe_rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
set_active(history->rsc);
}
@@ -3469,18 +3731,16 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
"Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
history->rsc->id, role2text(history->rsc->role),
pcmk__btoa(history->node->details->unclean),
- fail2text(action->on_fail), role2text(action->fail_role));
+ fail2text(config_on_fail), role2text(fail_role));
- if ((action->fail_role != RSC_ROLE_STARTED)
- && (history->rsc->next_role < action->fail_role)) {
- pe__set_next_role(history->rsc, action->fail_role, "failure");
+ if ((fail_role != pcmk_role_started)
+ && (history->rsc->next_role < fail_role)) {
+ pe__set_next_role(history->rsc, fail_role, "failure");
}
- if (action->fail_role == RSC_ROLE_STOPPED) {
+ if (fail_role == pcmk_role_stopped) {
ban_from_all_nodes(history->rsc);
}
-
- pe_free_action(action);
}
/*!
@@ -3497,7 +3757,7 @@ block_if_unrecoverable(struct action_history *history)
{
char *last_change_s = NULL;
- if (strcmp(history->task, CRMD_ACTION_STOP) != 0) {
+ if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(history->node->details->data_set, history->node)) {
@@ -3516,8 +3776,8 @@ block_if_unrecoverable(struct action_history *history)
free(last_change_s);
- pe__clear_resource_flags(history->rsc, pe_rsc_managed);
- pe__set_resource_flags(history->rsc, pe_rsc_block);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(history->rsc, pcmk_rsc_blocked);
}
/*!
@@ -3556,8 +3816,8 @@ remap_because(struct action_history *history, const char **why, int value,
* \param[in] expired Whether result is expired
*
* \note If the result is remapped and the node is not shutting down or failed,
- * the operation will be recorded in the data set's list of failed operations
- * to highlight it for the user.
+ * the operation will be recorded in the scheduler data's list of failed
+ * operations to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
@@ -3664,16 +3924,16 @@ remap_operation(struct action_history *history,
case PCMK_OCF_NOT_RUNNING:
if (is_probe
|| (history->expected_exit_status == history->exit_status)
- || !pcmk_is_set(history->rsc->flags, pe_rsc_managed)) {
+ || !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) {
/* For probes, recurring monitors for the Stopped role, and
* unmanaged resources, "not running" is not considered a
* failure.
*/
remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
- history->rsc->role = RSC_ROLE_STOPPED;
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ history->rsc->role = pcmk_role_stopped;
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"not running");
}
break;
@@ -3692,13 +3952,13 @@ remap_operation(struct action_history *history,
}
if (!expired
|| (history->exit_status == history->expected_exit_status)) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
}
break;
case PCMK_OCF_FAILED_PROMOTED:
if (!expired) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
}
remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
break;
@@ -3765,16 +4025,15 @@ remap_done:
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(const xmlNode *xml_op, const char *task,
- pe_resource_t *rsc, pe_node_t *node)
+ pcmk_resource_t *rsc, pcmk_node_t *node)
{
- if (!strcmp(task, "start") || !strcmp(task, "monitor")) {
-
+ if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
- pe__add_param_check(xml_op, rsc, node, pe_check_last_failure,
+ pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
rsc->cluster);
} else {
@@ -3783,13 +4042,13 @@ should_clear_for_param_change(const xmlNode *xml_op, const char *task,
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->cluster);
switch (digest_data->rc) {
- case RSC_DIGEST_UNKNOWN:
+ case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, pe__xe_history_key(xml_op),
node->details->id);
break;
- case RSC_DIGEST_MATCH:
+ case pcmk__digest_match:
break;
default:
return TRUE;
@@ -3801,21 +4060,21 @@ should_clear_for_param_change(const xmlNode *xml_op, const char *task,
// Order action after fencing of remote node, given connection rsc
static void
-order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn,
- pe_working_set_t *data_set)
+order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
+ pcmk_scheduler_t *scheduler)
{
- pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id);
+ pcmk_node_t *remote_node = pe_find_node(scheduler->nodes, remote_conn->id);
if (remote_node) {
- pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
- FALSE, data_set);
+ pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
+ FALSE, scheduler);
- order_actions(fence, action, pe_order_implies_then);
+ order_actions(fence, action, pcmk__ar_first_implies_then);
}
}
static bool
-should_ignore_failure_timeout(const pe_resource_t *rsc, const char *task,
+should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
guint interval_ms, bool is_last_failure)
{
/* Clearing failures of recurring monitors has special concerns. The
@@ -3839,10 +4098,11 @@ should_ignore_failure_timeout(const pe_resource_t *rsc, const char *task,
* if the remote node hasn't been fenced.
*/
if (rsc->remote_reconnect_ms
- && pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
- && (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ && pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)
+ && (interval_ms != 0)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
- pe_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
+ pcmk_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
if (remote_node && !remote_node->details->remote_was_fenced) {
if (is_last_failure) {
@@ -3909,7 +4169,8 @@ check_operation_expiry(struct action_history *history)
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
- &last_failure, pe_fc_effective,
+ &last_failure,
+ pcmk__fc_effective,
history->xml);
// Update scheduler recheck time according to *last* failure
@@ -3920,13 +4181,14 @@ check_operation_expiry(struct action_history *history)
history->rsc->failure_timeout, (long long) last_failure);
last_failure += history->rsc->failure_timeout + 1;
if (unexpired_fail_count && (now < last_failure)) {
- pe__update_recheck_time(last_failure, history->rsc->cluster);
+ pe__update_recheck_time(last_failure, history->rsc->cluster,
+ "fail count expiration");
}
}
if (expired) {
- if (pe_get_failcount(history->node, history->rsc, NULL, pe_fc_default,
- history->xml)) {
+ if (pe_get_failcount(history->node, history->rsc, NULL,
+ pcmk__fc_default, history->xml)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
@@ -3963,12 +4225,14 @@ check_operation_expiry(struct action_history *history)
}
if (clear_reason != NULL) {
+ pcmk_action_t *clear_op = NULL;
+
// Schedule clearing of the fail count
- pe_action_t *clear_op = pe__clear_failcount(history->rsc, history->node,
- clear_reason,
- history->rsc->cluster);
+ clear_op = pe__clear_failcount(history->rsc, history->node,
+ clear_reason, history->rsc->cluster);
- if (pcmk_is_set(history->rsc->cluster->flags, pe_flag_stonith_enabled)
+ if (pcmk_is_set(history->rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)
&& (history->rsc->remote_reconnect_ms != 0)) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
@@ -3987,7 +4251,7 @@ check_operation_expiry(struct action_history *history)
}
if (expired && (history->interval_ms == 0)
- && pcmk__str_eq(history->task, CRMD_ACTION_STATUS, pcmk__str_none)) {
+ && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
switch (history->exit_status) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
@@ -4022,27 +4286,6 @@ pe__target_rc_from_xml(const xmlNode *xml_op)
/*!
* \internal
- * \brief Get the failure handling for an action
- *
- * \param[in,out] history Parsed action history entry
- *
- * \return Failure handling appropriate to action
- */
-static enum action_fail_response
-get_action_on_fail(struct action_history *history)
-{
- enum action_fail_response result = action_fail_recover;
- pe_action_t *action = custom_action(history->rsc, strdup(history->key),
- history->task, NULL, TRUE, FALSE,
- history->rsc->cluster);
-
- result = action->on_fail;
- pe_free_action(action);
- return result;
-}
-
-/*!
- * \internal
* \brief Update a resource's state for an action result
*
* \param[in,out] history Parsed action history entry
@@ -4060,53 +4303,53 @@ update_resource_state(struct action_history *history, int exit_status,
if ((exit_status == PCMK_OCF_NOT_INSTALLED)
|| (!pe_rsc_is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml))) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
} else if (exit_status == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_STATUS,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
if ((last_failure != NULL)
&& pcmk__str_eq(history->key, pe__xe_history_key(last_failure),
pcmk__str_none)) {
clear_past_failure = true;
}
- if (history->rsc->role < RSC_ROLE_STARTED) {
+ if (history->rsc->role < pcmk_role_started) {
set_active(history->rsc);
}
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_START, pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STARTED;
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
+ history->rsc->role = pcmk_role_started;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_STOP, pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
+ history->rsc->role = pcmk_role_stopped;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_PROMOTE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_DEMOTE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
pcmk__str_none)) {
- if (*on_fail == action_fail_demote) {
+ if (*on_fail == pcmk_on_fail_demote) {
// Demote clears an error only if on-fail=demote
clear_past_failure = true;
}
- history->rsc->role = RSC_ROLE_UNPROMOTED;
+ history->rsc->role = pcmk_role_unpromoted;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATED,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
unpack_migrate_to_success(history);
- } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ } else if (history->rsc->role < pcmk_role_started) {
pe_rsc_trace(history->rsc, "%s active on %s",
history->rsc->id, pe__node_name(history->node));
set_active(history->rsc);
@@ -4117,26 +4360,26 @@ update_resource_state(struct action_history *history, int exit_status,
}
switch (*on_fail) {
- case action_fail_stop:
- case action_fail_fence:
- case action_fail_migrate:
- case action_fail_standby:
+ case pcmk_on_fail_stop:
+ case pcmk_on_fail_ban:
+ case pcmk_on_fail_standby_node:
+ case pcmk_on_fail_fence_node:
pe_rsc_trace(history->rsc,
"%s (%s) is not cleared by a completed %s",
history->rsc->id, fail2text(*on_fail), history->task);
break;
- case action_fail_block:
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_restart_container:
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ case pcmk_on_fail_block:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_restart_container:
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures");
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
if (history->rsc->remote_reconnect_ms == 0) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
@@ -4144,8 +4387,8 @@ update_resource_state(struct action_history *history, int exit_status,
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures and reset remote");
}
break;
@@ -4170,14 +4413,14 @@ can_affect_state(struct action_history *history)
* Currently, unknown operations can affect whether a resource is considered
* active and/or failed.
*/
- return pcmk__str_any_of(history->task, CRMD_ACTION_STATUS,
- CRMD_ACTION_START, CRMD_ACTION_STOP,
- CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED,
+ return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_START, PCMK_ACTION_STOP,
+ PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
"asyncmon", NULL);
#else
- return !pcmk__str_any_of(history->task, CRMD_ACTION_NOTIFY,
- CRMD_ACTION_METADATA, NULL);
+ return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY,
+ PCMK_ACTION_META_DATA, NULL);
#endif
}
@@ -4244,8 +4487,8 @@ process_expired_result(struct action_history *history, int orig_exit_status)
&& pcmk_xe_mask_probe_failure(history->xml)
&& (orig_exit_status != history->expected_exit_status)) {
- if (history->rsc->role <= RSC_ROLE_STOPPED) {
- history->rsc->role = RSC_ROLE_UNKNOWN;
+ if (history->rsc->role <= pcmk_role_stopped) {
+ history->rsc->role = pcmk_role_unknown;
}
crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
"Masked failure expired",
@@ -4303,9 +4546,9 @@ mask_probe_failure(struct action_history *history, int orig_exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
- pe_resource_t *ban_rsc = history->rsc;
+ pcmk_resource_t *ban_rsc = history->rsc;
- if (!pcmk_is_set(history->rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) {
ban_rsc = uber_parent(history->rsc);
}
@@ -4392,20 +4635,20 @@ process_pending_action(struct action_history *history,
return;
}
- if (strcmp(history->task, CRMD_ACTION_START) == 0) {
- pe__set_resource_flags(history->rsc, pe_rsc_start_pending);
+ if (strcmp(history->task, PCMK_ACTION_START) == 0) {
+ pe__set_resource_flags(history->rsc, pcmk_rsc_start_pending);
set_active(history->rsc);
- } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
+ history->rsc->role = pcmk_role_promoted;
- } else if ((strcmp(history->task, CRMD_ACTION_MIGRATE) == 0)
+ } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
&& history->node->details->unclean) {
/* A migrate_to action is pending on a unclean source, so force a stop
* on the target.
*/
const char *migrate_target = NULL;
- pe_node_t *target = NULL;
+ pcmk_node_t *target = NULL;
migrate_target = crm_element_value(history->xml,
XML_LRM_ATTR_MIGRATE_TARGET);
@@ -4439,13 +4682,14 @@ process_pending_action(struct action_history *history,
}
static void
-unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
+unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum action_fail_response *on_fail)
{
int old_rc = 0;
bool expired = false;
- pe_resource_t *parent = rsc;
- enum action_fail_response failure_strategy = action_fail_recover;
+ pcmk_resource_t *parent = rsc;
+ enum rsc_role_e fail_role = pcmk_role_unknown;
+ enum action_fail_response failure_strategy = pcmk_on_fail_restart;
struct action_history history = {
.rsc = rsc,
@@ -4514,7 +4758,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
goto done;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
parent = uber_parent(rsc);
}
@@ -4529,25 +4773,29 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
goto done;
case PCMK_EXEC_NOT_INSTALLED:
- failure_strategy = get_action_on_fail(&history);
- if (failure_strategy == action_fail_ignore) {
+ unpack_failure_handling(&history, &failure_strategy, &fail_role);
+ if (failure_strategy == pcmk_on_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
CRM_XS " status=%d rc=%d id=%s",
history.task, rsc->id, pe__node_name(node),
history.execution_status, history.exit_status,
history.id);
- /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
- *on_fail = action_fail_migrate;
+ /* Also for printing it as "FAILED" by marking it as
+ * pcmk_rsc_failed later
+ */
+ *on_fail = pcmk_on_fail_ban;
}
resource_location(parent, node, -INFINITY, "hard-error",
rsc->cluster);
- unpack_rsc_op_failure(&history, last_failure, on_fail);
+ unpack_rsc_op_failure(&history, failure_strategy, fail_role,
+ last_failure, on_fail);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pe__is_guest_or_remote_node(node)
- && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(node->details->remote_rsc->flags,
+ pcmk_rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
@@ -4555,7 +4803,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
* that, ensure the remote connection is considered failed.
*/
pe__set_resource_flags(node->details->remote_rsc,
- pe_rsc_failed|pe_rsc_stop);
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
break; // Not done, do error handling
@@ -4571,10 +4819,10 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
break;
}
- failure_strategy = get_action_on_fail(&history);
- if ((failure_strategy == action_fail_ignore)
- || (failure_strategy == action_fail_restart_container
- && (strcmp(history.task, CRMD_ACTION_STOP) == 0))) {
+ unpack_failure_handling(&history, &failure_strategy, &fail_role);
+ if ((failure_strategy == pcmk_on_fail_ignore)
+ || ((failure_strategy == pcmk_on_fail_restart_container)
+ && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
char *last_change_s = last_change_str(xml_op);
@@ -4589,17 +4837,18 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
update_resource_state(&history, history.expected_exit_status,
*last_failure, on_fail);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
- pe__set_resource_flags(rsc, pe_rsc_failure_ignored);
+ pe__set_resource_flags(rsc, pcmk_rsc_ignore_failure);
record_failed_op(&history);
- if ((failure_strategy == action_fail_restart_container)
- && cmp_on_fail(*on_fail, action_fail_recover) <= 0) {
+ if ((failure_strategy == pcmk_on_fail_restart_container)
+ && cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) {
*on_fail = failure_strategy;
}
} else {
- unpack_rsc_op_failure(&history, last_failure, on_fail);
+ unpack_rsc_op_failure(&history, failure_strategy, fail_role,
+ last_failure, on_fail);
if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
uint8_t log_level = LOG_ERR;
@@ -4635,15 +4884,15 @@ done:
}
static void
-add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
- pe_working_set_t *data_set)
+add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
+ pcmk_scheduler_t *scheduler)
{
const char *cluster_name = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -4654,8 +4903,8 @@ add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID),
strdup(node->details->id));
- if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) {
- data_set->dc_node = node;
+ if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) {
+ scheduler->dc_node = node;
node->details->is_dc = TRUE;
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE));
@@ -4664,18 +4913,19 @@ add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE));
}
- cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
+ cluster_name = g_hash_table_lookup(scheduler->config_hash, "cluster-name");
if (cluster_name) {
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME),
strdup(cluster_name));
}
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
- node->details->attrs, NULL, overwrite, data_set);
+ node->details->attrs, NULL, overwrite,
+ scheduler);
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
node->details->utilization, NULL,
- FALSE, data_set);
+ FALSE, scheduler);
if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
const char *site_name = pe_node_attribute_raw(node, "site-name");
@@ -4760,15 +5010,15 @@ extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gbool
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
- pe_working_set_t * data_set)
+ pcmk_scheduler_t *scheduler)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
- xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
+ xmlNode *status = find_xml_node(scheduler->input, XML_CIB_TAG_STATUS, TRUE);
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
xmlNode *node_state = NULL;
@@ -4782,20 +5032,20 @@ find_operations(const char *rsc, const char *node, gboolean active_filter,
continue;
}
- this_node = pe_find_node(data_set->nodes, uname);
+ this_node = pe_find_node(scheduler->nodes, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pe__is_guest_or_remote_node(this_node)) {
- determine_remote_online_status(data_set, this_node);
+ determine_remote_online_status(scheduler, this_node);
} else {
- determine_online_status(node_state, this_node, data_set);
+ determine_online_status(node_state, this_node, scheduler);
}
if (this_node->details->online
- || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ || pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index ef0a092..4055d6d 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -27,40 +27,40 @@ gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
* \internal
* \brief Check whether we can fence a particular node
*
- * \param[in] data_set Working set for cluster
- * \param[in] node Name of node to check
+ * \param[in] scheduler Scheduler data
+ * \param[in] node Name of node to check
*
* \return true if node can be fenced, false otherwise
*/
bool
-pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
+pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
{
if (pe__is_guest_node(node)) {
/* Guest nodes are fenced by stopping their container resource. We can
* do that if the container's host is either online or fenceable.
*/
- pe_resource_t *rsc = node->details->remote_rsc->container;
+ pcmk_resource_t *rsc = node->details->remote_rsc->container;
for (GList *n = rsc->running_on; n != NULL; n = n->next) {
- pe_node_t *container_node = n->data;
+ pcmk_node_t *container_node = n->data;
if (!container_node->details->online
- && !pe_can_fence(data_set, container_node)) {
+ && !pe_can_fence(scheduler, container_node)) {
return false;
}
}
return true;
- } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
return false; /* Turned off */
- } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
return false; /* No devices */
- } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
return true;
- } else if (data_set->no_quorum_policy == no_quorum_ignore) {
+ } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) {
return true;
} else if(node == NULL) {
@@ -85,65 +85,25 @@ pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
* \return Newly allocated shallow copy of this_node
* \note This function asserts on errors and is guaranteed to return non-NULL.
*/
-pe_node_t *
-pe__copy_node(const pe_node_t *this_node)
+pcmk_node_t *
+pe__copy_node(const pcmk_node_t *this_node)
{
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
CRM_ASSERT(this_node != NULL);
- new_node = calloc(1, sizeof(pe_node_t));
+ new_node = calloc(1, sizeof(pcmk_node_t));
CRM_ASSERT(new_node != NULL);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed; // @COMPAT deprecated and unused
+ new_node->count = this_node->count;
new_node->details = this_node->details;
return new_node;
}
-/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
-void
-node_list_exclude(GHashTable * hash, GList *list, gboolean merge_scores)
-{
- GHashTable *result = hash;
- pe_node_t *other_node = NULL;
- GList *gIter = list;
-
- GHashTableIter iter;
- pe_node_t *node = NULL;
-
- g_hash_table_iter_init(&iter, hash);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
-
- other_node = pe_find_node_id(list, node->details->id);
- if (other_node == NULL) {
- node->weight = -INFINITY;
- crm_trace("Banning dependent from %s (no primary instance)",
- pe__node_name(node));
- } else if (merge_scores) {
- node->weight = pcmk__add_scores(node->weight, other_node->weight);
- crm_trace("Added primary's score %s to dependent's score for %s "
- "(now %s)", pcmk_readable_score(other_node->weight),
- pe__node_name(node), pcmk_readable_score(node->weight));
- }
- }
-
- for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- other_node = pe_hash_table_lookup(result, node->details->id);
-
- if (other_node == NULL) {
- pe_node_t *new_node = pe__copy_node(node);
-
- new_node->weight = -INFINITY;
- g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
- }
- }
-}
-
/*!
* \internal
* \brief Create a node hash table from a node list
@@ -159,8 +119,9 @@ pe__node_list2table(const GList *list)
result = pcmk__strkey_table(NULL, free);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- pe_node_t *new_node = pe__copy_node((const pe_node_t *) gIter->data);
+ pcmk_node_t *new_node = NULL;
+ new_node = pe__copy_node((const pcmk_node_t *) gIter->data);
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
return result;
@@ -184,8 +145,8 @@ pe__node_list2table(const GList *list)
gint
pe__cmp_node_name(gconstpointer a, gconstpointer b)
{
- const pe_node_t *node1 = (const pe_node_t *) a;
- const pe_node_t *node2 = (const pe_node_t *) b;
+ const pcmk_node_t *node1 = (const pcmk_node_t *) a;
+ const pcmk_node_t *node2 = (const pcmk_node_t *) b;
if ((node1 == NULL) && (node2 == NULL)) {
return 0;
@@ -207,23 +168,23 @@ pe__cmp_node_name(gconstpointer a, gconstpointer b)
* \internal
* \brief Output node weights to stdout
*
- * \param[in] rsc Use allowed nodes for this resource
- * \param[in] comment Text description to prefix lines with
- * \param[in] nodes If rsc is not specified, use these nodes
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc Use allowed nodes for this resource
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes If rsc is not specified, use these nodes
+ * \param[in,out] scheduler Scheduler data
*/
static void
-pe__output_node_weights(const pe_resource_t *rsc, const char *comment,
- GHashTable *nodes, pe_working_set_t *data_set)
+pe__output_node_weights(const pcmk_resource_t *rsc, const char *comment,
+ GHashTable *nodes, pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
// Sort the nodes so the output is consistent for regression tests
GList *list = g_list_sort(g_hash_table_get_values(nodes),
pe__cmp_node_name);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- const pe_node_t *node = (const pe_node_t *) gIter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) gIter->data;
out->message(out, "node-weight", rsc, comment, node->details->uname,
pcmk_readable_score(node->weight));
@@ -244,11 +205,11 @@ pe__output_node_weights(const pe_resource_t *rsc, const char *comment,
*/
static void
pe__log_node_weights(const char *file, const char *function, int line,
- const pe_resource_t *rsc, const char *comment,
+ const pcmk_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
// Don't waste time if we're not tracing at this point
pcmk__if_tracing({}, return);
@@ -275,23 +236,23 @@ pe__log_node_weights(const char *file, const char *function, int line,
* \internal
* \brief Log or output node weights
*
- * \param[in] file Caller's filename
- * \param[in] function Caller's function name
- * \param[in] line Caller's line number
- * \param[in] to_log Log if true, otherwise output
- * \param[in] rsc If not NULL, use this resource's ID in logs,
- * and show scores recursively for any children
- * \param[in] comment Text description to prefix lines with
- * \param[in] nodes Nodes whose scores should be shown
- * \param[in,out] data_set Cluster working set
+ * \param[in] file Caller's filename
+ * \param[in] function Caller's function name
+ * \param[in] line Caller's line number
+ * \param[in] to_log Log if true, otherwise output
+ * \param[in] rsc If not NULL, use this resource's ID in logs,
+ * and show scores recursively for any children
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes Nodes whose scores should be shown
+ * \param[in,out] scheduler Scheduler data
*/
void
-pe__show_node_weights_as(const char *file, const char *function, int line,
- bool to_log, const pe_resource_t *rsc,
- const char *comment, GHashTable *nodes,
- pe_working_set_t *data_set)
+pe__show_node_scores_as(const char *file, const char *function, int line,
+ bool to_log, const pcmk_resource_t *rsc,
+ const char *comment, GHashTable *nodes,
+ pcmk_scheduler_t *scheduler)
{
- if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
// Don't show allocation scores for orphans
return;
}
@@ -303,16 +264,16 @@ pe__show_node_weights_as(const char *file, const char *function, int line,
if (to_log) {
pe__log_node_weights(file, function, line, rsc, comment, nodes);
} else {
- pe__output_node_weights(rsc, comment, nodes, data_set);
+ pe__output_node_weights(rsc, comment, nodes, scheduler);
}
// If this resource has children, repeat recursively for each
if (rsc && rsc->children) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
- pe__show_node_weights_as(file, function, line, to_log, child,
- comment, child->allowed_nodes, data_set);
+ pe__show_node_scores_as(file, function, line, to_log, child,
+ comment, child->allowed_nodes, scheduler);
}
}
}
@@ -334,8 +295,8 @@ pe__show_node_weights_as(const char *file, const char *function, int line,
gint
pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
{
- const pe_resource_t *resource1 = (const pe_resource_t *)a;
- const pe_resource_t *resource2 = (const pe_resource_t *)b;
+ const pcmk_resource_t *resource1 = (const pcmk_resource_t *)a;
+ const pcmk_resource_t *resource2 = (const pcmk_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
@@ -359,12 +320,13 @@ pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
}
static void
-resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
+resource_node_score(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
const char *tag)
{
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
- if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
+ if ((rsc->exclusive_discover
+ || (node->rsc_discover_mode == pcmk_probe_never))
&& pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
@@ -376,13 +338,13 @@ resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
- match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
+ match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
@@ -395,24 +357,24 @@ resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
}
void
-resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
- const char *tag, pe_working_set_t *data_set)
+resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
+ const char *tag, pcmk_scheduler_t *scheduler)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
- } else if (data_set != NULL) {
- GList *gIter = data_set->nodes;
+ } else if (scheduler != NULL) {
+ GList *gIter = scheduler->nodes;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node_iter = (pe_node_t *) gIter->data;
+ pcmk_node_t *node_iter = (pcmk_node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
- pe_node_t *node_iter = NULL;
+ pcmk_node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
@@ -431,14 +393,14 @@ resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
}
time_t
-get_effective_time(pe_working_set_t * data_set)
+get_effective_time(pcmk_scheduler_t *scheduler)
{
- if(data_set) {
- if (data_set->now == NULL) {
+ if(scheduler) {
+ if (scheduler->now == NULL) {
crm_trace("Recording a new 'now'");
- data_set->now = crm_time_new(NULL);
+ scheduler->now = crm_time_new(NULL);
}
- return crm_time_get_seconds_since_epoch(data_set->now);
+ return crm_time_get_seconds_since_epoch(scheduler->now);
}
crm_trace("Defaulting to 'now'");
@@ -446,9 +408,9 @@ get_effective_time(pe_working_set_t * data_set)
}
gboolean
-get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
+get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role)
{
- enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e local_role = pcmk_role_unknown;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
@@ -459,15 +421,15 @@ get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
}
local_role = text2role(value);
- if (local_role == RSC_ROLE_UNKNOWN) {
+ if (local_role == pcmk_role_unknown) {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' is not valid", rsc->id, value);
return FALSE;
- } else if (local_role > RSC_ROLE_STARTED) {
+ } else if (local_role > pcmk_role_started) {
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)) {
- if (local_role > RSC_ROLE_UNPROMOTED) {
+ pcmk_rsc_promotable)) {
+ if (local_role > pcmk_role_unpromoted) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
@@ -485,13 +447,14 @@ get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
}
gboolean
-order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
+order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
+ uint32_t flags)
{
GList *gIter = NULL;
- pe_action_wrapper_t *wrapper = NULL;
+ pcmk__related_action_t *wrapper = NULL;
GList *list = NULL;
- if (order == pe_order_none) {
+ if (flags == pcmk__ar_none) {
return FALSE;
}
@@ -508,23 +471,23 @@ order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
+ pcmk__related_action_t *after = gIter->data;
- if (after->action == rh_action && (after->type & order)) {
+ if (after->action == rh_action && (after->type & flags)) {
return FALSE;
}
}
- wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper = calloc(1, sizeof(pcmk__related_action_t));
wrapper->action = rh_action;
- wrapper->type = order;
+ wrapper->type = flags;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
- wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper = calloc(1, sizeof(pcmk__related_action_t));
wrapper->action = lh_action;
- wrapper->type = order;
+ wrapper->type = flags;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
@@ -534,7 +497,7 @@ order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering
void
destroy_ticket(gpointer data)
{
- pe_ticket_t *ticket = data;
+ pcmk_ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
@@ -543,23 +506,23 @@ destroy_ticket(gpointer data)
free(ticket);
}
-pe_ticket_t *
-ticket_new(const char *ticket_id, pe_working_set_t * data_set)
+pcmk_ticket_t *
+ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler)
{
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
if (pcmk__str_empty(ticket_id)) {
return NULL;
}
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
- ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
- ticket = calloc(1, sizeof(pe_ticket_t));
+ ticket = calloc(1, sizeof(pcmk_ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
@@ -573,55 +536,57 @@ ticket_new(const char *ticket_id, pe_working_set_t * data_set)
ticket->standby = FALSE;
ticket->state = pcmk__strkey_table(free, free);
- g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
+ g_hash_table_insert(scheduler->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
const char *
-rsc_printable_id(const pe_resource_t *rsc)
+rsc_printable_id(const pcmk_resource_t *rsc)
{
- return pcmk_is_set(rsc->flags, pe_rsc_unique)? rsc->id : ID(rsc->xml);
+ return pcmk_is_set(rsc->flags, pcmk_rsc_unique)? rsc->id : ID(rsc->xml);
}
void
-pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pe__clear_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ pe__clear_resource_flags_recursive((pcmk_resource_t *) gIter->data,
+ flags);
}
}
void
-pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag)
+pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag)
{
- for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *r = (pe_resource_t *) lpc->data;
+ for (GList *lpc = scheduler->resources; lpc != NULL; lpc = lpc->next) {
+ pcmk_resource_t *r = (pcmk_resource_t *) lpc->data;
pe__clear_resource_flags_recursive(r, flag);
}
}
void
-pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pe__set_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ pe__set_resource_flags_recursive((pcmk_resource_t *) gIter->data,
+ flags);
}
}
void
-trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
- pe_action_t *dependency, pe_working_set_t *data_set)
+trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason,
+ pcmk_action_t *dependency, pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* No resources require it */
return;
} else if ((rsc != NULL)
- && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
@@ -629,10 +594,11 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
- pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
+ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, FALSE,
+ reason, FALSE, scheduler);
if(dependency) {
- order_actions(unfence, dependency, pe_order_optional);
+ order_actions(unfence, dependency, pcmk__ar_ordered);
}
} else if(rsc) {
@@ -641,7 +607,7 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
- trigger_unfencing(rsc, node, reason, dependency, data_set);
+ trigger_unfencing(rsc, node, reason, dependency, scheduler);
}
}
}
@@ -650,7 +616,7 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
- pe_tag_t *tag = NULL;
+ pcmk_tag_t *tag = NULL;
GList *gIter = NULL;
gboolean is_existing = FALSE;
@@ -658,7 +624,7 @@ add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
- tag = calloc(1, sizeof(pe_tag_t));
+ tag = calloc(1, sizeof(pcmk_tag_t));
if (tag == NULL) {
return FALSE;
}
@@ -697,7 +663,7 @@ add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
* shutdown of remote nodes by virtue of their connection stopping.
*/
bool
-pe__shutdown_requested(const pe_node_t *node)
+pe__shutdown_requested(const pcmk_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
@@ -706,18 +672,22 @@ pe__shutdown_requested(const pe_node_t *node)
/*!
* \internal
- * \brief Update a data set's "recheck by" time
+ * \brief Update "recheck by" time in scheduler data
*
- * \param[in] recheck Epoch time when recheck should happen
- * \param[in,out] data_set Current working set
+ * \param[in] recheck Epoch time when recheck should happen
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] reason What time is being updated for (for logs)
*/
void
-pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
+pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
+ const char *reason)
{
- if ((recheck > get_effective_time(data_set))
- && ((data_set->recheck_by == 0)
- || (data_set->recheck_by > recheck))) {
- data_set->recheck_by = recheck;
+ if ((recheck > get_effective_time(scheduler))
+ && ((scheduler->recheck_by == 0)
+ || (scheduler->recheck_by > recheck))) {
+ scheduler->recheck_by = recheck;
+ crm_debug("Updated next scheduler recheck to %s for %s",
+ pcmk__trim(ctime(&recheck)), reason);
}
}
@@ -731,28 +701,28 @@ pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
- * \param[in,out] data_set Cluster working set containing \p xml_obj
+ * \param[in,out] scheduler Scheduler data containing \p xml_obj
*/
void
pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
- gboolean overwrite, pe_working_set_t *data_set)
+ gboolean overwrite, pcmk_scheduler_t *scheduler)
{
crm_time_t *next_change = crm_time_new_undefined();
- pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
+ pe_eval_nvpairs(scheduler->input, xml_obj, set_name, rule_data, hash,
always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
- pe__update_recheck_time(recheck, data_set);
+ pe__update_recheck_time(recheck, scheduler, "rule evaluation");
}
crm_time_free(next_change);
}
bool
-pe__resource_is_disabled(const pe_resource_t *rsc)
+pe__resource_is_disabled(const pcmk_resource_t *rsc)
{
const char *target_role = NULL;
@@ -761,10 +731,10 @@ pe__resource_is_disabled(const pe_resource_t *rsc)
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
- if ((target_role_e == RSC_ROLE_STOPPED)
- || ((target_role_e == RSC_ROLE_UNPROMOTED)
+ if ((target_role_e == pcmk_role_stopped)
+ || ((target_role_e == pcmk_role_unpromoted)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable))) {
+ pcmk_rsc_promotable))) {
return true;
}
}
@@ -781,17 +751,17 @@ pe__resource_is_disabled(const pe_resource_t *rsc)
* \return true if \p rsc is running only on \p node, otherwise false
*/
bool
-pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node)
+pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return (rsc != NULL) && pcmk__list_of_1(rsc->running_on)
- && pe__same_node((const pe_node_t *) rsc->running_on->data, node);
+ && pe__same_node((const pcmk_node_t *) rsc->running_on->data, node);
}
bool
-pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
+pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list)
{
for (GList *ele = rsc->running_on; ele; ele = ele->next) {
- pe_node_t *node = (pe_node_t *) ele->data;
+ pcmk_node_t *node = (pcmk_node_t *) ele->data;
if (pcmk__str_in_list(node->details->uname, node_list,
pcmk__str_star_matches|pcmk__str_casei)) {
return true;
@@ -802,7 +772,7 @@ pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
}
bool
-pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node)
+pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node)
{
return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node));
}
@@ -813,7 +783,7 @@ pe__filter_rsc_list(GList *rscs, GList *filter)
GList *retval = NULL;
for (GList *gIter = rscs; gIter; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
/* I think the second condition is safe here for all callers of this
* function. If not, it needs to move into pe__node_text.
@@ -828,7 +798,8 @@ pe__filter_rsc_list(GList *rscs, GList *filter)
}
GList *
-pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
+pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s)
+{
GList *nodes = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
@@ -838,7 +809,7 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
*/
nodes = g_list_prepend(nodes, strdup("*"));
} else {
- pe_node_t *node = pe_find_node(data_set->nodes, s);
+ pcmk_node_t *node = pe_find_node(scheduler->nodes, s);
if (node) {
/* The given string was a valid uname for a node. Return a
@@ -852,7 +823,7 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
* second case, we'll return a NULL pointer and nothing will
* get displayed.
*/
- nodes = pe__unames_with_tag(data_set, s);
+ nodes = pe__unames_with_tag(scheduler, s);
}
}
@@ -860,14 +831,16 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
}
GList *
-pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
+pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s)
+{
GList *resources = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
resources = g_list_prepend(resources, strdup("*"));
} else {
- pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
- pe_find_renamed|pe_find_any);
+ const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
+ pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
+ s, flags);
if (rsc) {
/* A colon in the name we were given means we're being asked to filter
@@ -885,7 +858,7 @@ pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
* typo or something. See pe__build_node_name_list() for more
* detail.
*/
- resources = pe__rscs_with_tag(data_set, s);
+ resources = pe__rscs_with_tag(scheduler, s);
}
}
@@ -893,12 +866,12 @@ pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
}
xmlNode *
-pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name)
+pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name)
{
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
const char *rsc_id = rsc->id;
- if (parent->variant == pe_clone) {
+ if (parent->variant == pcmk_rsc_variant_clone) {
rsc_id = pe__clone_child_id(parent);
}
diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h
deleted file mode 100644
index daa3781..0000000
--- a/lib/pengine/variant.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2004-2022 the Pacemaker project contributors
- *
- * The version control history for this file may have further details.
- *
- * This source code is licensed under the GNU Lesser General Public License
- * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
- */
-
-#ifndef PE_VARIANT__H
-# define PE_VARIANT__H
-
-# if PE__VARIANT_BUNDLE
-
-typedef struct {
- int offset;
- char *ipaddr;
- pe_node_t *node;
- pe_resource_t *ip;
- pe_resource_t *child;
- pe_resource_t *container;
- pe_resource_t *remote;
-} pe__bundle_replica_t;
-
-enum pe__bundle_mount_flags {
- pe__bundle_mount_none = 0x00,
-
- // mount instance-specific subdirectory rather than source directly
- pe__bundle_mount_subdir = 0x01
-};
-
-typedef struct {
- char *source;
- char *target;
- char *options;
- uint32_t flags; // bitmask of pe__bundle_mount_flags
-} pe__bundle_mount_t;
-
-typedef struct {
- char *source;
- char *target;
-} pe__bundle_port_t;
-
-enum pe__container_agent {
- PE__CONTAINER_AGENT_UNKNOWN,
- PE__CONTAINER_AGENT_DOCKER,
- PE__CONTAINER_AGENT_RKT,
- PE__CONTAINER_AGENT_PODMAN,
-};
-
-#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
-#define PE__CONTAINER_AGENT_DOCKER_S "docker"
-#define PE__CONTAINER_AGENT_RKT_S "rkt"
-#define PE__CONTAINER_AGENT_PODMAN_S "podman"
-
-typedef struct pe__bundle_variant_data_s {
- int promoted_max;
- int nreplicas;
- int nreplicas_per_host;
- char *prefix;
- char *image;
- const char *ip_last;
- char *host_network;
- char *host_netmask;
- char *control_port;
- char *container_network;
- char *ip_range_start;
- gboolean add_host;
- gchar *container_host_options;
- char *container_command;
- char *launcher_options;
- const char *attribute_target;
-
- pe_resource_t *child;
-
- GList *replicas; // pe__bundle_replica_t *
- GList *ports; // pe__bundle_port_t *
- GList *mounts; // pe__bundle_mount_t *
-
- enum pe__container_agent agent_type;
-} pe__bundle_variant_data_t;
-
-# define get_bundle_variant_data(data, rsc) \
- CRM_ASSERT(rsc != NULL); \
- CRM_ASSERT(rsc->variant == pe_container); \
- CRM_ASSERT(rsc->variant_opaque != NULL); \
- data = (pe__bundle_variant_data_t *)rsc->variant_opaque; \
-
-# endif
-
-#endif
diff --git a/lib/services/Makefile.am b/lib/services/Makefile.am
index a7e10c9..5a19003 100644
--- a/lib/services/Makefile.am
+++ b/lib/services/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2012-2021 the Pacemaker project contributors
+# Copyright 2012-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,19 +12,18 @@ MAINTAINERCLEANFILES = Makefile.in
AM_CPPFLAGS = -I$(top_srcdir)/include
lib_LTLIBRARIES = libcrmservice.la
-noinst_HEADERS = pcmk-dbus.h upstart.h systemd.h \
- services_lsb.h services_nagios.h \
- services_ocf.h \
- services_private.h
+noinst_HEADERS = $(wildcard *.h)
-libcrmservice_la_LDFLAGS = -version-info 31:2:3
+libcrmservice_la_LDFLAGS = -version-info 32:0:4
libcrmservice_la_CFLAGS =
libcrmservice_la_CFLAGS += $(CFLAGS_HARDENED_LIB)
libcrmservice_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcrmservice_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(DBUS_LIBS)
+libcrmservice_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \
+ $(DBUS_LIBS)
+## Library sources (*must* use += format for bumplibs)
libcrmservice_la_SOURCES = services.c
libcrmservice_la_SOURCES += services_linux.c
libcrmservice_la_SOURCES += services_lsb.c
diff --git a/lib/services/dbus.c b/lib/services/dbus.c
index f052c0a..8a517d2 100644
--- a/lib/services/dbus.c
+++ b/lib/services/dbus.c
@@ -594,6 +594,8 @@ handle_query_result(DBusMessage *reply, struct property_query *data)
DBusMessageIter variant_iter;
DBusBasicValue value;
+ dbus_error_init(&error);
+
// First, check if the reply contains an error
if (pcmk_dbus_find_error((void*)&error, reply, &error)) {
crm_err("DBus query for %s property '%s' failed: %s",
diff --git a/lib/services/services.c b/lib/services/services.c
index b60d8bd..e438443 100644
--- a/lib/services/services.c
+++ b/lib/services/services.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -233,8 +233,8 @@ copy_action_arguments(svc_action_t *op, uint32_t ra_caps, const char *name,
}
if (pcmk_is_set(ra_caps, pcmk_ra_cap_status)
- && pcmk__str_eq(action, "monitor", pcmk__str_casei)) {
- action = "status";
+ && pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
+ action = PCMK_ACTION_STATUS;
}
op->action = strdup(action);
if (op->action == NULL) {
@@ -1028,7 +1028,7 @@ services_action_sync(svc_action_t * op)
op->synchronous = true;
- if (pcmk__str_eq(op->action, "meta-data", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) {
/* Synchronous meta-data operations are handled specially. Since most
* resource classes don't provide any meta-data, it has to be
* synthesized from available information about the agent.
diff --git a/lib/services/services_linux.c b/lib/services/services_linux.c
index fb12f73..c7792f0 100644
--- a/lib/services/services_linux.c
+++ b/lib/services/services_linux.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -725,7 +725,7 @@ services__generic_error(const svc_action_t *op)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_STATUS_UNKNOWN;
}
@@ -760,7 +760,7 @@ services__not_installed_error(const svc_action_t *op)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_STATUS_NOT_INSTALLED;
}
@@ -795,7 +795,7 @@ services__authorization_error(const svc_action_t *op)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_STATUS_INSUFFICIENT_PRIV;
}
@@ -831,7 +831,7 @@ services__configuration_error(const svc_action_t *op, bool is_fatal)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_NOT_CONFIGURED;
}
@@ -954,7 +954,7 @@ action_launch_child(svc_action_t *op)
#if SUPPORT_CIBSECRETS
rc = pcmk__substitute_secrets(op->rsc, op->params);
if (rc != pcmk_rc_ok) {
- if (pcmk__str_eq(op->action, "stop", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
crm_info("Proceeding with stop operation for %s "
"despite being unable to load CIB secrets (%s)",
op->rsc, pcmk_rc_str(rc));
@@ -1178,7 +1178,7 @@ services__execute_file(svc_action_t *op)
if (stat(op->opaque->exec, &st) != 0) {
rc = errno;
crm_info("Cannot execute '%s': %s " CRM_XS " stat rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1186,7 +1186,7 @@ services__execute_file(svc_action_t *op)
if (pipe(stdout_fd) < 0) {
rc = errno;
crm_info("Cannot execute '%s': %s " CRM_XS " pipe(stdout) rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1197,7 +1197,7 @@ services__execute_file(svc_action_t *op)
close_pipe(stdout_fd);
crm_info("Cannot execute '%s': %s " CRM_XS " pipe(stderr) rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1210,7 +1210,7 @@ services__execute_file(svc_action_t *op)
close_pipe(stderr_fd);
crm_info("Cannot execute '%s': %s " CRM_XS " pipe(stdin) rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1235,7 +1235,7 @@ services__execute_file(svc_action_t *op)
close_pipe(stderr_fd);
crm_info("Cannot execute '%s': %s " CRM_XS " fork rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
if (op->synchronous) {
sigchld_cleanup(&data);
diff --git a/lib/services/services_lsb.c b/lib/services/services_lsb.c
index 134cc70..9ad7025 100644
--- a/lib/services/services_lsb.c
+++ b/lib/services/services_lsb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -290,7 +290,8 @@ enum ocf_exitcode
services__lsb2ocf(const char *action, int exit_status)
{
// For non-status actions, LSB and OCF share error codes <= 7
- if (!pcmk__str_any_of(action, "status", "monitor", NULL)) {
+ if (!pcmk__str_any_of(action, PCMK_ACTION_STATUS, PCMK_ACTION_MONITOR,
+ NULL)) {
if ((exit_status < 0) || (exit_status > PCMK_LSB_NOT_RUNNING)) {
return PCMK_OCF_UNKNOWN_ERROR;
}
diff --git a/lib/services/services_nagios.c b/lib/services/services_nagios.c
index abddca8..10759b5 100644
--- a/lib/services/services_nagios.c
+++ b/lib/services/services_nagios.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -49,7 +49,7 @@ services__nagios_prepare(svc_action_t *op)
return ENOMEM;
}
- if (pcmk__str_eq(op->action, "monitor", pcmk__str_casei)
+ if (pcmk__str_eq(op->action, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& (op->interval_ms == 0)) {
// Invoke --version for a nagios probe
diff --git a/lib/services/systemd.c b/lib/services/systemd.c
index 0c38ae0..ecac86c 100644
--- a/lib/services/systemd.c
+++ b/lib/services/systemd.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2022 the Pacemaker project contributors
+ * Copyright 2012-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -308,7 +308,7 @@ set_result_from_method_error(svc_action_t *op, const DBusError *error)
|| strstr(error->name, "org.freedesktop.systemd1.LoadFailed")
|| strstr(error->name, "org.freedesktop.systemd1.NoSuchUnit")) {
- if (pcmk__str_eq(op->action, "stop", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
crm_trace("Masking systemd stop failure (%s) for %s "
"because unknown service can be considered stopped",
error->name, pcmk__s(op->rsc, "unknown resource"));
@@ -459,7 +459,11 @@ invoke_unit_by_name(const char *arg_name, svc_action_t *op, char **path)
CRM_ASSERT(msg != NULL);
// Add the (expanded) unit name as the argument
- name = systemd_service_name(arg_name, op == NULL || pcmk__str_eq(op->action, "meta-data", pcmk__str_none));
+ name = systemd_service_name(arg_name,
+ (op == NULL)
+ || pcmk__str_eq(op->action,
+ PCMK_ACTION_META_DATA,
+ pcmk__str_none));
CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &name,
DBUS_TYPE_INVALID));
free(name);
@@ -717,6 +721,8 @@ process_unit_method_reply(DBusMessage *reply, svc_action_t *op)
{
DBusError error;
+ dbus_error_init(&error);
+
/* The first use of error here is not used other than as a non-NULL flag to
* indicate that a request was indeed sent
*/
@@ -932,7 +938,8 @@ invoke_unit_by_path(svc_action_t *op, const char *unit)
DBusMessage *msg = NULL;
DBusMessage *reply = NULL;
- if (pcmk__str_any_of(op->action, "monitor", "status", NULL)) {
+ if (pcmk__str_any_of(op->action, PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS,
+ NULL)) {
DBusPendingCall *pending = NULL;
char *state;
@@ -955,11 +962,11 @@ invoke_unit_by_path(svc_action_t *op, const char *unit)
}
return;
- } else if (pcmk__str_eq(op->action, "start", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_START, pcmk__str_none)) {
method = "StartUnit";
systemd_create_override(op->agent, op->timeout);
- } else if (pcmk__str_eq(op->action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) {
method = "StopUnit";
systemd_remove_override(op->agent, op->timeout);
@@ -988,7 +995,10 @@ invoke_unit_by_path(svc_action_t *op, const char *unit)
/* (ss) */
{
const char *replace_s = "replace";
- char *name = systemd_service_name(op->agent, pcmk__str_eq(op->action, "meta-data", pcmk__str_none));
+ char *name = systemd_service_name(op->agent,
+ pcmk__str_eq(op->action,
+ PCMK_ACTION_META_DATA,
+ pcmk__str_none));
CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &name, DBUS_TYPE_INVALID));
CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &replace_s, DBUS_TYPE_INVALID));
@@ -1072,7 +1082,7 @@ services__execute_systemd(svc_action_t *op)
(op->synchronous? "" : "a"), op->action, op->agent,
((op->rsc == NULL)? "" : " for resource "), pcmk__s(op->rsc, ""));
- if (pcmk__str_eq(op->action, "meta-data", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) {
op->stdout_data = systemd_unit_metadata(op->agent, op->timeout);
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
goto done;
diff --git a/lib/services/upstart.c b/lib/services/upstart.c
index 459b572..2306e73 100644
--- a/lib/services/upstart.c
+++ b/lib/services/upstart.c
@@ -1,7 +1,7 @@
/*
* Original copyright 2010 Senko Rasic <senko.rasic@dobarkod.hr>
* and Ante Karamatic <ivoks@init.hr>
- * Later changes copyright 2012-2022 the Pacemaker project contributors
+ * Later changes copyright 2012-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -412,7 +412,7 @@ set_result_from_method_error(svc_action_t *op, const DBusError *error)
if (strstr(error->name, UPSTART_06_API ".Error.UnknownInstance")) {
- if (pcmk__str_eq(op->action, "stop", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
crm_trace("Masking stop failure (%s) for %s "
"because unknown service can be considered stopped",
error->name, pcmk__s(op->rsc, "unknown resource"));
@@ -423,7 +423,7 @@ set_result_from_method_error(svc_action_t *op, const DBusError *error)
services__set_result(op, PCMK_OCF_NOT_INSTALLED,
PCMK_EXEC_NOT_INSTALLED, "Upstart job not found");
- } else if (pcmk__str_eq(op->action, "start", pcmk__str_casei)
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_START, pcmk__str_casei)
&& strstr(error->name, UPSTART_06_API ".Error.AlreadyStarted")) {
crm_trace("Masking start failure (%s) for %s "
"because already started resource is OK",
@@ -462,7 +462,7 @@ job_method_complete(DBusPendingCall *pending, void *user_data)
set_result_from_method_error(op, &error);
dbus_error_free(&error);
- } else if (pcmk__str_eq(op->action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) {
// Call has no return value
crm_debug("DBus request for stop of %s succeeded",
pcmk__s(op->rsc, "unknown resource"));
@@ -539,14 +539,14 @@ services__execute_upstart(svc_action_t *op)
goto cleanup;
}
- if (pcmk__str_eq(op->action, "meta-data", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) {
op->stdout_data = upstart_job_metadata(op->agent);
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
goto cleanup;
}
if (!object_path_for_job(op->agent, &job, op->timeout)) {
- if (pcmk__str_eq(action, "stop", pcmk__str_none)) {
+ if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
} else {
services__set_result(op, PCMK_OCF_NOT_INSTALLED,
@@ -563,7 +563,8 @@ services__execute_upstart(svc_action_t *op)
goto cleanup;
}
- if (pcmk__strcase_any_of(op->action, "monitor", "status", NULL)) {
+ if (pcmk__strcase_any_of(op->action, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_STATUS, NULL)) {
DBusPendingCall *pending = NULL;
char *state = NULL;
char *path = get_first_instance(job, op->timeout);
@@ -598,10 +599,10 @@ services__execute_upstart(svc_action_t *op)
goto cleanup;
- } else if (pcmk__str_eq(action, "start", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) {
action = "Start";
- } else if (pcmk__str_eq(action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
action = "Stop";
} else if (pcmk__str_eq(action, "restart", pcmk__str_none)) {
@@ -665,7 +666,7 @@ services__execute_upstart(svc_action_t *op)
set_result_from_method_error(op, &error);
dbus_error_free(&error);
- } else if (pcmk__str_eq(op->action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) {
// DBus call does not return a value
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);