summaryrefslogtreecommitdiffstats
path: root/lib/pengine
diff options
context:
space:
mode:
Diffstat (limited to 'lib/pengine')
-rw-r--r--lib/pengine/Makefile.am52
-rw-r--r--lib/pengine/bundle.c415
-rw-r--r--lib/pengine/clone.c428
-rw-r--r--lib/pengine/common.c339
-rw-r--r--lib/pengine/complex.c338
-rw-r--r--lib/pengine/failcounts.c247
-rw-r--r--lib/pengine/group.c102
-rw-r--r--lib/pengine/native.c335
-rw-r--r--lib/pengine/pe_actions.c1303
-rw-r--r--lib/pengine/pe_digest.c162
-rw-r--r--lib/pengine/pe_health.c16
-rw-r--r--lib/pengine/pe_notif.c226
-rw-r--r--lib/pengine/pe_output.c552
-rw-r--r--lib/pengine/pe_status_private.h83
-rw-r--r--lib/pengine/remote.c100
-rw-r--r--lib/pengine/rules.c47
-rw-r--r--lib/pengine/rules_alerts.c13
-rw-r--r--lib/pengine/status.c268
-rw-r--r--lib/pengine/tags.c37
-rw-r--r--lib/pengine/tests/Makefile.am15
-rw-r--r--lib/pengine/tests/native/Makefile.am4
-rw-r--r--lib/pengine/tests/native/native_find_rsc_test.c724
-rw-r--r--lib/pengine/tests/native/pe_base_name_eq_test.c31
-rw-r--r--lib/pengine/tests/status/Makefile.am12
-rw-r--r--lib/pengine/tests/status/pe_find_node_any_test.c6
-rw-r--r--lib/pengine/tests/status/pe_find_node_id_test.c6
-rw-r--r--lib/pengine/tests/status/pe_find_node_test.c6
-rw-r--r--lib/pengine/tests/status/pe_new_working_set_test.c10
-rw-r--r--lib/pengine/tests/status/set_working_set_defaults_test.c27
-rw-r--r--lib/pengine/tests/utils/Makefile.am5
-rw-r--r--lib/pengine/tests/utils/pe__cmp_node_name_test.c6
-rw-r--r--lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c4
-rw-r--r--lib/pengine/unpack.c1794
-rw-r--r--lib/pengine/utils.c331
-rw-r--r--lib/pengine/variant.h91
35 files changed, 4649 insertions, 3486 deletions
diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am
index c2a8c90..9ffc745 100644
--- a/lib/pengine/Makefile.am
+++ b/lib/pengine/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2022 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -15,27 +15,33 @@ include $(top_srcdir)/mk/common.mk
SUBDIRS = . tests
## libraries
-lib_LTLIBRARIES = libpe_rules.la libpe_status.la
-check_LTLIBRARIES = libpe_rules_test.la libpe_status_test.la
+lib_LTLIBRARIES = libpe_rules.la \
+ libpe_status.la
+check_LTLIBRARIES = libpe_rules_test.la \
+ libpe_status_test.la
-## SOURCES
-noinst_HEADERS = variant.h pe_status_private.h
+noinst_HEADERS = pe_status_private.h
-libpe_rules_la_LDFLAGS = -version-info 30:0:4
+libpe_rules_la_LDFLAGS = -version-info 30:1:4
libpe_rules_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpe_rules_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-libpe_rules_la_SOURCES = rules.c rules_alerts.c common.c
-libpe_status_la_LDFLAGS = -version-info 34:0:6
+## Library sources (*must* use += format for bumplibs)
+libpe_rules_la_SOURCES = common.c
+libpe_rules_la_SOURCES += rules.c
+libpe_rules_la_SOURCES += rules_alerts.c
+
+libpe_status_la_LDFLAGS = -version-info 35:0:7
libpe_status_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpe_status_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
libpe_status_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-# Use += rather than backlashed continuation lines for parsing by bumplibs
+
+## Library sources (*must* use += format for bumplibs)
libpe_status_la_SOURCES =
libpe_status_la_SOURCES += bundle.c
libpe_status_la_SOURCES += clone.c
@@ -64,18 +70,26 @@ libpe_status_la_SOURCES += utils.c
include $(top_srcdir)/mk/tap.mk
libpe_rules_test_la_SOURCES = $(libpe_rules_la_SOURCES)
-libpe_rules_test_la_LDFLAGS = $(libpe_rules_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libpe_rules_test_la_LDFLAGS = $(libpe_rules_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
-libpe_rules_test_la_CFLAGS = $(libpe_rules_la_CFLAGS) -DPCMK__UNIT_TESTING \
+libpe_rules_test_la_CFLAGS = $(libpe_rules_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
-fno-builtin -fno-inline
-libpe_rules_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
+libpe_rules_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la \
+ -lcmocka \
+ -lm
libpe_status_test_la_SOURCES = $(libpe_status_la_SOURCES)
-libpe_status_test_la_LDFLAGS = $(libpe_status_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libpe_status_test_la_LDFLAGS = $(libpe_status_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
-libpe_status_test_la_CFLAGS = $(libpe_status_la_CFLAGS) -DPCMK__UNIT_TESTING \
- -fno-builtin -fno-inline
-libpe_status_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
-
-clean-generic:
- rm -f *.log *.debug *~
+libpe_status_test_la_CFLAGS = $(libpe_status_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
+ -fno-builtin \
+ -fno-inline
+libpe_status_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la \
+ -lcmocka \
+ -lm
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index ff1b365..fd859d5 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -20,8 +20,69 @@
#include <crm/common/xml_internal.h>
#include <pe_status_private.h>
-#define PE__VARIANT_BUNDLE 1
-#include "./variant.h"
+enum pe__bundle_mount_flags {
+ pe__bundle_mount_none = 0x00,
+
+ // mount instance-specific subdirectory rather than source directly
+ pe__bundle_mount_subdir = 0x01
+};
+
+typedef struct {
+ char *source;
+ char *target;
+ char *options;
+ uint32_t flags; // bitmask of pe__bundle_mount_flags
+} pe__bundle_mount_t;
+
+typedef struct {
+ char *source;
+ char *target;
+} pe__bundle_port_t;
+
+enum pe__container_agent {
+ PE__CONTAINER_AGENT_UNKNOWN,
+ PE__CONTAINER_AGENT_DOCKER,
+ PE__CONTAINER_AGENT_RKT,
+ PE__CONTAINER_AGENT_PODMAN,
+};
+
+#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
+#define PE__CONTAINER_AGENT_DOCKER_S "docker"
+#define PE__CONTAINER_AGENT_RKT_S "rkt"
+#define PE__CONTAINER_AGENT_PODMAN_S "podman"
+
+typedef struct pe__bundle_variant_data_s {
+ int promoted_max;
+ int nreplicas;
+ int nreplicas_per_host;
+ char *prefix;
+ char *image;
+ const char *ip_last;
+ char *host_network;
+ char *host_netmask;
+ char *control_port;
+ char *container_network;
+ char *ip_range_start;
+ gboolean add_host;
+ gchar *container_host_options;
+ char *container_command;
+ char *launcher_options;
+ const char *attribute_target;
+
+ pcmk_resource_t *child;
+
+ GList *replicas; // pe__bundle_replica_t *
+ GList *ports; // pe__bundle_port_t *
+ GList *mounts; // pe__bundle_mount_t *
+
+ enum pe__container_agent agent_type;
+} pe__bundle_variant_data_t;
+
+#define get_bundle_variant_data(data, rsc) \
+ CRM_ASSERT(rsc != NULL); \
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_bundle); \
+ CRM_ASSERT(rsc->variant_opaque != NULL); \
+ data = (pe__bundle_variant_data_t *) rsc->variant_opaque;
/*!
* \internal
@@ -32,7 +93,7 @@
* \return Maximum replicas for bundle corresponding to \p rsc
*/
int
-pe__bundle_max(const pe_resource_t *rsc)
+pe__bundle_max(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
@@ -42,19 +103,149 @@ pe__bundle_max(const pe_resource_t *rsc)
/*!
* \internal
- * \brief Get maximum number of bundle replicas allowed to run on one node
+ * \brief Get the resource inside a bundle
*
- * \param[in] rsc Bundle or bundled resource to check
+ * \param[in] bundle Bundle to check
*
- * \return Maximum replicas per node for bundle corresponding to \p rsc
+ * \return Resource inside \p bundle if any, otherwise NULL
*/
-int
-pe__bundle_max_per_node(const pe_resource_t *rsc)
+pcmk_resource_t *
+pe__bundled_resource(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
- return bundle_data->nreplicas_per_host;
+ return bundle_data->child;
+}
+
+/*!
+ * \internal
+ * \brief Get containerized resource corresponding to a given bundle container
+ *
+ * \param[in] instance Collective instance that might be a bundle container
+ *
+ * \return Bundled resource instance inside \p instance if it is a bundle
+ * container instance, otherwise NULL
+ */
+const pcmk_resource_t *
+pe__get_rsc_in_container(const pcmk_resource_t *instance)
+{
+ const pe__bundle_variant_data_t *data = NULL;
+ const pcmk_resource_t *top = pe__const_top_resource(instance, true);
+
+ if ((top == NULL) || (top->variant != pcmk_rsc_variant_bundle)) {
+ return NULL;
+ }
+ get_bundle_variant_data(data, top);
+
+ for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
+ const pe__bundle_replica_t *replica = iter->data;
+
+ if (instance == replica->container) {
+ return replica->child;
+ }
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given node is created by a bundle
+ *
+ * \param[in] bundle Bundle resource to check
+ * \param[in] node Node to check
+ *
+ * \return true if \p node is an instance of \p bundle, otherwise false
+ */
+bool
+pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
+ const pcmk_node_t *node)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
+ pe__bundle_replica_t *replica = iter->data;
+
+ if (pe__same_node(node, replica->node)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*!
+ * \internal
+ * \brief Get the container of a bundle's first replica
+ *
+ * \param[in] bundle Bundle resource to get container for
+ *
+ * \return Container resource from first replica of \p bundle if any,
+ * otherwise NULL
+ */
+pcmk_resource_t *
+pe__first_container(const pcmk_resource_t *bundle)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+ const pe__bundle_replica_t *replica = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ if (bundle_data->replicas == NULL) {
+ return NULL;
+ }
+ replica = bundle_data->replicas->data;
+ return replica->container;
+}
+
+/*!
+ * \internal
+ * \brief Iterate over bundle replicas
+ *
+ * \param[in,out] bundle Bundle to iterate over
+ * \param[in] fn Function to call for each replica (its return value
+ * indicates whether to continue iterating)
+ * \param[in,out] user_data Pointer to pass to \p fn
+ */
+void
+pe__foreach_bundle_replica(pcmk_resource_t *bundle,
+ bool (*fn)(pe__bundle_replica_t *, void *),
+ void *user_data)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
+ if (!fn((pe__bundle_replica_t *) iter->data, user_data)) {
+ break;
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Iterate over const bundle replicas
+ *
+ * \param[in] bundle Bundle to iterate over
+ * \param[in] fn Function to call for each replica (its return value
+ * indicates whether to continue iterating)
+ * \param[in,out] user_data Pointer to pass to \p fn
+ */
+void
+pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
+ bool (*fn)(const pe__bundle_replica_t *,
+ void *),
+ void *user_data)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (const GList *iter = bundle_data->replicas; iter != NULL;
+ iter = iter->next) {
+
+ if (!fn((const pe__bundle_replica_t *) iter->data, user_data)) {
+ break;
+ }
+ }
}
static char *
@@ -159,7 +350,8 @@ valid_network(pe__bundle_variant_data_t *data)
if(data->nreplicas_per_host > 1) {
pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
data->nreplicas_per_host = 1;
- // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
+ // @TODO to be sure:
+ // pe__clear_resource_flags(rsc, pcmk_rsc_unique);
}
return TRUE;
}
@@ -167,7 +359,7 @@ valid_network(pe__bundle_variant_data_t *data)
}
static int
-create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
if(data->ip_range_start) {
@@ -198,7 +390,8 @@ create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
}
xml_obj = create_xml_node(xml_ip, "operations");
- crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
+ crm_create_op_xml(xml_obj, ID(xml_ip), PCMK_ACTION_MONITOR, "60s",
+ NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
@@ -226,7 +419,7 @@ container_agent_str(enum pe__container_agent t)
}
static int
-create_container_resource(pe_resource_t *parent,
+create_container_resource(pcmk_resource_t *parent,
const pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
@@ -295,11 +488,11 @@ create_container_resource(pe_resource_t *parent,
}
if (data->control_port != NULL) {
- pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=",
- data->control_port, NULL);
+ pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=",
+ data->control_port, NULL);
} else {
- g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt,
- DEFAULT_REMOTE_PORT);
+ g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d",
+ env_opt, DEFAULT_REMOTE_PORT);
}
for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
@@ -449,14 +642,15 @@ create_container_resource(pe_resource_t *parent,
}
xml_obj = create_xml_node(xml_container, "operations");
- crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
+ crm_create_op_xml(xml_obj, ID(xml_container), PCMK_ACTION_MONITOR, "60s",
+ NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (pe__unpack_resource(xml_container, &replica->container, parent,
parent->cluster) != pcmk_rc_ok) {
return pcmk_rc_unpack_error;
}
- pe__set_resource_flags(replica->container, pe_rsc_replica_container);
+ pe__set_resource_flags(replica->container, pcmk_rsc_replica_container);
parent->children = g_list_append(parent->children, replica->container);
return pcmk_rc_ok;
@@ -469,13 +663,13 @@ create_container_resource(pe_resource_t *parent,
* \param[in] uname Name of node to ban
*/
static void
-disallow_node(pe_resource_t *rsc, const char *uname)
+disallow_node(pcmk_resource_t *rsc, const char *uname)
{
gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
if (match) {
- ((pe_node_t *) match)->weight = -INFINITY;
- ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
+ ((pcmk_node_t *) match)->weight = -INFINITY;
+ ((pcmk_node_t *) match)->rsc_discover_mode = pcmk_probe_never;
}
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
@@ -483,12 +677,12 @@ disallow_node(pe_resource_t *rsc, const char *uname)
}
static int
-create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
if (replica->child && valid_network(data)) {
GHashTableIter gIter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
xmlNode *xml_remote = NULL;
char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
char *port_s = NULL;
@@ -527,8 +721,8 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
free(port_s);
/* Abandon our created ID, and pull the copy from the XML, because we
- * need something that will get freed during data set cleanup to use as
- * the node ID and uname.
+ * need something that will get freed during scheduler data cleanup to
+ * use as the node ID and uname.
*/
free(id);
id = NULL;
@@ -545,12 +739,12 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
} else {
node->weight = -INFINITY;
}
- node->rsc_discover_mode = pe_discover_never;
+ node->rsc_discover_mode = pcmk_probe_never;
/* unpack_remote_nodes() ensures that each remote node and guest node
- * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
- * Unfortunately, a bundle has to be mostly unpacked before it's obvious
- * what nodes will be needed, so we do it just above.
+ * has a pcmk_node_t entry. Ideally, it would do the same for bundle
+ * nodes. Unfortunately, a bundle has to be mostly unpacked before it's
+ * obvious what nodes will be needed, so we do it just above.
*
* Worse, that means that the node may have been utilized while
* unpacking other resources, without our weight correction. The most
@@ -569,7 +763,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
replica->node = pe__copy_node(node);
replica->node->weight = 500;
- replica->node->rsc_discover_mode = pe_discover_exclusive;
+ replica->node->rsc_discover_mode = pcmk_probe_exclusive;
/* Ensure the node shows up as allowed and with the correct discovery set */
if (replica->child->allowed_nodes != NULL) {
@@ -581,7 +775,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__copy_node(replica->node));
{
- pe_node_t *copy = pe__copy_node(replica->node);
+ pcmk_node_t *copy = pe__copy_node(replica->node);
copy->weight = -INFINITY;
g_hash_table_insert(replica->child->parent->allowed_nodes,
(gpointer) replica->node->details->id, copy);
@@ -625,7 +819,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
}
static int
-create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_replica_resources(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
int rc = pcmk_rc_ok;
@@ -658,7 +852,8 @@ create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
* containers with pacemaker-remoted inside in order to start
* services inside those containers.
*/
- pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
+ pe__set_resource_flags(replica->remote,
+ pcmk_rsc_remote_nesting_allowed);
}
return rc;
}
@@ -695,9 +890,9 @@ port_free(pe__bundle_port_t *port)
}
static pe__bundle_replica_t *
-replica_for_remote(pe_resource_t *remote)
+replica_for_remote(pcmk_resource_t *remote)
{
- pe_resource_t *top = remote;
+ pcmk_resource_t *top = remote;
pe__bundle_variant_data_t *bundle_data = NULL;
if (top == NULL) {
@@ -722,7 +917,7 @@ replica_for_remote(pe_resource_t *remote)
}
bool
-pe__bundle_needs_remote_name(pe_resource_t *rsc)
+pe__bundle_needs_remote_name(pcmk_resource_t *rsc)
{
const char *value;
GHashTable *params = NULL;
@@ -740,12 +935,12 @@ pe__bundle_needs_remote_name(pe_resource_t *rsc)
}
const char *
-pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
+pe__add_bundle_remote_name(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
xmlNode *xml, const char *field)
{
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
pe__bundle_replica_t *replica = NULL;
if (!pe__bundle_needs_remote_name(rsc)) {
@@ -786,7 +981,7 @@ pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
} while (0)
gboolean
-pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
+pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
xmlNode *xml_obj = NULL;
@@ -819,7 +1014,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
}
// Use 0 for default, minimum, and invalid promoted-max
- value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
+ value = crm_element_value(xml_obj, PCMK_META_PROMOTED_MAX);
if (value == NULL) {
// @COMPAT deprecated since 2.0.0
value = crm_element_value(xml_obj, "masters");
@@ -842,7 +1037,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
value = crm_element_value(xml_obj, "replicas-per-host");
pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
if (bundle_data->nreplicas_per_host == 1) {
- pe__clear_resource_flags(rsc, pe_rsc_unique);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unique);
}
bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
@@ -934,13 +1129,11 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
value = pcmk__itoa(bundle_data->nreplicas);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_INCARNATION_MAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value);
free(value);
value = pcmk__itoa(bundle_data->nreplicas_per_host);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_INCARNATION_NODEMAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value);
free(value);
crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
@@ -951,8 +1144,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
value = pcmk__itoa(bundle_data->promoted_max);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_PROMOTED_MAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value);
free(value);
}
@@ -972,7 +1164,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
GString *buffer = NULL;
if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
return FALSE;
}
@@ -1033,8 +1225,8 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
replica->offset = lpc++;
// Ensure the child's notify gets set based on the underlying primitive's value
- if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
- pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
+ if (pcmk_is_set(replica->child->flags, pcmk_rsc_notify)) {
+ pe__set_resource_flags(bundle_data->child, pcmk_rsc_notify);
}
allocate_ip(bundle_data, replica, buffer);
@@ -1109,7 +1301,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
}
static int
-replica_resource_active(pe_resource_t *rsc, gboolean all)
+replica_resource_active(pcmk_resource_t *rsc, gboolean all)
{
if (rsc) {
gboolean child_active = rsc->fns->active(rsc, all);
@@ -1124,7 +1316,7 @@ replica_resource_active(pe_resource_t *rsc, gboolean all)
}
gboolean
-pe__bundle_active(pe_resource_t *rsc, gboolean all)
+pe__bundle_active(pcmk_resource_t *rsc, gboolean all)
{
pe__bundle_variant_data_t *bundle_data = NULL;
GList *iter = NULL;
@@ -1171,8 +1363,8 @@ pe__bundle_active(pe_resource_t *rsc, gboolean all)
*
* \return Bundle replica if found, NULL otherwise
*/
-pe_resource_t *
-pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
+pcmk_resource_t *
+pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(bundle && node);
@@ -1195,7 +1387,7 @@ pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
* \deprecated This function will be removed in a future release
*/
static void
-print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
+print_rsc_in_list(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
if (rsc != NULL) {
@@ -1214,7 +1406,7 @@ print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
* \deprecated This function will be removed in a future release
*/
static void
-bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+bundle_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1232,9 +1424,10 @@ bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
status_print("image=\"%s\" ", bundle_data->image);
- status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print(">\n");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
@@ -1254,12 +1447,13 @@ bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1313,10 +1507,11 @@ pe__bundle_xml(pcmk__output_t *out, va_list args)
"id", rsc->id,
"type", container_agent_str(bundle_data->agent_type),
"image", bundle_data->image,
- "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
+ "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique),
+ "maintenance",
+ pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
"description", desc);
CRM_ASSERT(rc == pcmk_rc_ok);
}
@@ -1358,9 +1553,9 @@ pe__bundle_xml(pcmk__output_t *out, va_list args)
static void
pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
- pe_node_t *node, uint32_t show_opts)
+ pcmk_node_t *node, uint32_t show_opts)
{
- pe_resource_t *rsc = replica->child;
+ pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1394,23 +1589,24 @@ pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replic
* otherwise unmanaged, or an empty string otherwise
*/
static const char *
-get_unmanaged_str(const pe_resource_t *rsc)
+get_unmanaged_str(const pcmk_resource_t *rsc)
{
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
return " (maintenance)";
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
return " (unmanaged)";
}
return "";
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1460,7 +1656,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1497,7 +1693,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1512,9 +1708,9 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
static void
pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
- pe_node_t *node, uint32_t show_opts)
+ pcmk_node_t *node, uint32_t show_opts)
{
- const pe_resource_t *rsc = replica->child;
+ const pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1538,12 +1734,13 @@ pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replic
pe__common_output_text(out, rsc, buffer, node, show_opts);
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1593,7 +1790,7 @@ pe__bundle_text(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1630,7 +1827,7 @@ pe__bundle_text(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1651,8 +1848,8 @@ static void
print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
long options, void *print_data)
{
- pe_node_t *node = NULL;
- pe_resource_t *rsc = replica->child;
+ pcmk_node_t *node = NULL;
+ pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1682,7 +1879,7 @@ print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
* \deprecated This function will be removed in a future release
*/
void
-pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
+pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1703,8 +1900,8 @@ pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%sContainer bundle%s: %s [%s]%s%s\n",
pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("<br />\n<ul>\n");
}
@@ -1784,7 +1981,7 @@ free_bundle_replica(pe__bundle_replica_t *replica)
}
void
-pe__free_bundle(pe_resource_t *rsc)
+pe__free_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
@@ -1818,9 +2015,9 @@ pe__free_bundle(pe_resource_t *rsc)
}
enum rsc_role_e
-pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
+pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current)
{
- enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e container_role = pcmk_role_unknown;
return container_role;
}
@@ -1832,9 +2029,9 @@ pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
* \return Number of configured replicas, or 0 on error
*/
int
-pe_bundle_replicas(const pe_resource_t *rsc)
+pe_bundle_replicas(const pcmk_resource_t *rsc)
{
- if ((rsc == NULL) || (rsc->variant != pe_container)) {
+ if ((rsc == NULL) || (rsc->variant != pcmk_rsc_variant_bundle)) {
return 0;
} else {
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1845,7 +2042,7 @@ pe_bundle_replicas(const pe_resource_t *rsc)
}
void
-pe__count_bundle(pe_resource_t *rsc)
+pe__count_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1869,7 +2066,7 @@ pe__count_bundle(pe_resource_t *rsc)
}
gboolean
-pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -1913,7 +2110,7 @@ pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
* g_list_free().
*/
GList *
-pe__bundle_containers(const pe_resource_t *bundle)
+pe__bundle_containers(const pcmk_resource_t *bundle)
{
GList *containers = NULL;
const pe__bundle_variant_data_t *data = NULL;
@@ -1927,14 +2124,14 @@ pe__bundle_containers(const pe_resource_t *bundle)
return containers;
}
-// Bundle implementation of resource_object_functions_t:active_node()
-pe_node_t *
-pe__bundle_active_node(const pe_resource_t *rsc, unsigned int *count_all,
+// Bundle implementation of pcmk_rsc_methods_t:active_node()
+pcmk_node_t *
+pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
- pe_node_t *active = NULL;
- pe_node_t *node = NULL;
- pe_resource_t *container = NULL;
+ pcmk_node_t *active = NULL;
+ pcmk_node_t *node = NULL;
+ pcmk_resource_t *container = NULL;
GList *containers = NULL;
GList *iter = NULL;
GHashTable *nodes = NULL;
@@ -2002,3 +2199,21 @@ done:
g_hash_table_destroy(nodes);
return active;
}
+
+/*!
+ * \internal
+ * \brief Get maximum bundle resource instances per node
+ *
+ * \param[in] rsc Bundle resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__bundle_max_per_node(const pcmk_resource_t *rsc)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, rsc);
+ CRM_ASSERT(bundle_data->nreplicas_per_host >= 0);
+ return (unsigned int) bundle_data->nreplicas_per_host;
+}
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index e411f98..a92a4b7 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -18,13 +18,14 @@
#include <crm/msg_xml.h>
#include <crm/common/output.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#ifdef PCMK__COMPAT_2_0
-#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_LEGACY_S "s"
-#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s"
+#define PROMOTED_INSTANCES PCMK__ROLE_PROMOTED_LEGACY "s"
+#define UNPROMOTED_INSTANCES PCMK__ROLE_UNPROMOTED_LEGACY "s"
#else
-#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_S
-#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S
+#define PROMOTED_INSTANCES PCMK__ROLE_PROMOTED
+#define UNPROMOTED_INSTANCES PCMK__ROLE_UNPROMOTED
#endif
typedef struct clone_variant_data_s {
@@ -36,7 +37,7 @@ typedef struct clone_variant_data_s {
int total_clones;
- uint32_t flags; // Group of enum pe__clone_flags
+ uint32_t flags; // Group of enum pcmk__clone_flags
notify_data_t *stop_notify;
notify_data_t *start_notify;
@@ -46,8 +47,8 @@ typedef struct clone_variant_data_s {
xmlNode *xml_obj_child;
} clone_variant_data_t;
-#define get_clone_variant_data(data, rsc) \
- CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_clone)); \
+#define get_clone_variant_data(data, rsc) \
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_clone)); \
data = (clone_variant_data_t *) rsc->variant_opaque;
/*!
@@ -59,7 +60,7 @@ typedef struct clone_variant_data_s {
* \return Maximum instances for \p clone
*/
int
-pe__clone_max(const pe_resource_t *clone)
+pe__clone_max(const pcmk_resource_t *clone)
{
const clone_variant_data_t *clone_data = NULL;
@@ -76,7 +77,7 @@ pe__clone_max(const pe_resource_t *clone)
* \return Maximum allowed instances per node for \p clone
*/
int
-pe__clone_node_max(const pe_resource_t *clone)
+pe__clone_node_max(const pcmk_resource_t *clone)
{
const clone_variant_data_t *clone_data = NULL;
@@ -93,7 +94,7 @@ pe__clone_node_max(const pe_resource_t *clone)
* \return Maximum promoted instances for \p clone
*/
int
-pe__clone_promoted_max(const pe_resource_t *clone)
+pe__clone_promoted_max(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -110,7 +111,7 @@ pe__clone_promoted_max(const pe_resource_t *clone)
* \return Maximum promoted instances for \p clone
*/
int
-pe__clone_promoted_node_max(const pe_resource_t *clone)
+pe__clone_promoted_node_max(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -167,16 +168,16 @@ node_list_to_str(const GList *list)
}
static void
-clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+clone_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc,
clone_variant_data_t *clone_data, const char *desc)
{
GString *attrs = NULL;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__add_separated_word(&attrs, 64, "promotable", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
pcmk__add_separated_word(&attrs, 64, "unique", ", ");
}
@@ -184,10 +185,10 @@ clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
}
@@ -206,8 +207,8 @@ clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
}
void
-pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
- pe_working_set_t *data_set)
+pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid,
+ pcmk_scheduler_t *scheduler)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = rsc->variant_opaque;
@@ -218,15 +219,15 @@ pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
- g_list_length(data_set->nodes));
+ g_list_length(scheduler->nodes));
}
}
-pe_resource_t *
-find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
+pcmk_resource_t *
+find_clone_instance(const pcmk_resource_t *rsc, const char *sub_id)
{
char *child_id = NULL;
- pe_resource_t *child = NULL;
+ pcmk_resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
@@ -240,13 +241,13 @@ find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
return child;
}
-pe_resource_t *
-pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
+pcmk_resource_t *
+pe__create_clone_child(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
- pe_resource_t *child_rsc = NULL;
+ pcmk_resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
@@ -268,7 +269,7 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (pe__unpack_resource(child_copy, &child_rsc, rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
@@ -278,10 +279,10 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
- pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
+ pe__set_resource_flags_recursive(child_rsc, pcmk_rsc_removed);
}
- add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
+ add_hash_param(child_rsc->meta, PCMK_META_CLONE_MAX, inc_max);
pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
bail:
@@ -291,90 +292,89 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
return child_rsc;
}
+/*!
+ * \internal
+ * \brief Unpack a nonnegative integer value from a resource meta-attribute
+ *
+ * \param[in] rsc Resource with meta-attribute
+ * \param[in] meta_name Name of meta-attribute to unpack
+ * \param[in] deprecated_name If not NULL, try unpacking this
+ * if \p meta_name is unset
+ * \param[in] default_value Value to use if unset
+ *
+ * \return Integer parsed from resource's specified meta-attribute if a valid
+ * nonnegative integer, \p default_value if unset, or 0 if invalid
+ */
+static int
+unpack_meta_int(const pcmk_resource_t *rsc, const char *meta_name,
+ const char *deprecated_name, int default_value)
+{
+ int integer = default_value;
+ const char *value = g_hash_table_lookup(rsc->meta, meta_name);
+
+ if ((value == NULL) && (deprecated_name != NULL)) {
+ value = g_hash_table_lookup(rsc->meta, deprecated_name);
+ }
+ if (value != NULL) {
+ pcmk__scan_min_int(value, &integer, 0);
+ }
+ return integer;
+}
+
gboolean
-clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
- const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
- const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
-
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- const char *promoted_max = NULL;
- const char *promoted_node_max = NULL;
-
- promoted_max = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_PROMOTED_MAX);
- if (promoted_max == NULL) {
- // @COMPAT deprecated since 2.0.0
- promoted_max = g_hash_table_lookup(rsc->meta,
- PCMK_XA_PROMOTED_MAX_LEGACY);
- }
-
- promoted_node_max = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_PROMOTED_NODEMAX);
- if (promoted_node_max == NULL) {
- // @COMPAT deprecated since 2.0.0
- promoted_node_max =
- g_hash_table_lookup(rsc->meta,
- PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
- }
-
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
// Use 1 as default but 0 for minimum and invalid
- if (promoted_max == NULL) {
- clone_data->promoted_max = 1;
- } else {
- pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0);
- }
+ // @COMPAT PCMK_XA_PROMOTED_MAX_LEGACY deprecated since 2.0.0
+ clone_data->promoted_max = unpack_meta_int(rsc, PCMK_META_PROMOTED_MAX,
+ PCMK_XA_PROMOTED_MAX_LEGACY,
+ 1);
// Use 1 as default but 0 for minimum and invalid
- if (promoted_node_max == NULL) {
- clone_data->promoted_node_max = 1;
- } else {
- pcmk__scan_min_int(promoted_node_max,
- &(clone_data->promoted_node_max), 0);
- }
+ // @COMPAT PCMK_XA_PROMOTED_NODE_MAX_LEGACY deprecated since 2.0.0
+ clone_data->promoted_node_max =
+ unpack_meta_int(rsc, PCMK_META_PROMOTED_NODE_MAX,
+ PCMK_XA_PROMOTED_NODE_MAX_LEGACY, 1);
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
// Use 1 as default but 0 for minimum and invalid
- if (max_clones_node == NULL) {
- clone_data->clone_node_max = 1;
- } else {
- pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0);
- }
+ clone_data->clone_node_max = unpack_meta_int(rsc, PCMK_META_CLONE_NODE_MAX,
+ NULL, 1);
/* Use number of nodes (but always at least 1, which is handy for crm_verify
* for a CIB without nodes) as default, but 0 for minimum and invalid
*/
- if (max_clones == NULL) {
- clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes));
- } else {
- pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0);
- }
+ clone_data->clone_max = unpack_meta_int(rsc, PCMK_META_CLONE_MAX, NULL,
+ QB_MAX(1, g_list_length(scheduler->nodes)));
if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED))) {
clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,
"Clone", rsc->id,
clone_data->flags,
- pe__clone_ordered,
- "pe__clone_ordered");
+ pcmk__clone_ordered,
+ "pcmk__clone_ordered");
}
- if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
- pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
+ && (clone_data->clone_node_max > 1)) {
+
+ pcmk__config_err("Ignoring " PCMK_META_CLONE_NODE_MAX " of %d for %s "
"because anonymous clones support only one instance "
- "per node", rsc->id);
+ "per node", clone_data->clone_node_max, rsc->id);
clone_data->clone_node_max = 1;
}
@@ -382,9 +382,9 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
- pe__rsc_bool_str(rsc, pe_rsc_unique));
+ pe__rsc_bool_str(rsc, pcmk_rsc_unique));
pe_rsc_trace(rsc, "\tClone is promotable: %s",
- pe__rsc_bool_str(rsc, pe_rsc_promotable));
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable));
// Clones may contain a single group or primitive
for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
@@ -415,20 +415,20 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
- pe__rsc_bool_str(rsc, pe_rsc_unique));
+ pe__rsc_bool_str(rsc, pcmk_rsc_unique));
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
- if (pe__create_clone_child(rsc, data_set) == NULL) {
+ if (pe__create_clone_child(rsc, scheduler) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
- if (pe__create_clone_child(rsc, data_set) == NULL) {
+ if (pe__create_clone_child(rsc, scheduler) == NULL) {
return FALSE;
}
}
@@ -439,12 +439,12 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-clone_active(pe_resource_t * rsc, gboolean all)
+clone_active(pcmk_resource_t * rsc, gboolean all)
{
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
@@ -492,27 +492,29 @@ short_print(const char *list, const char *prefix, const char *type,
}
static const char *
-configured_role_str(pe_resource_t * rsc)
+configured_role_str(pcmk_resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
- target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
+ pcmk_resource_t *instance = rsc->children->data; // Any instance will do
+
+ target_role = g_hash_table_lookup(instance->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
-configured_role(pe_resource_t * rsc)
+configured_role(pcmk_resource_t *rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
- return RSC_ROLE_UNKNOWN;
+ return pcmk_role_unknown;
}
/*!
@@ -520,7 +522,7 @@ configured_role(pe_resource_t * rsc)
* \deprecated This function will be removed in a future release
*/
static void
-clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+clone_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
char *child_text = crm_strdup_printf("%s ", pre_text);
@@ -530,19 +532,20 @@ clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%s<clone ", pre_text);
status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
status_print("multi_state=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_promotable));
- status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable));
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print("failure_ignored=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure));
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
@@ -552,7 +555,7 @@ clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
}
bool
-is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
+is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any)
{
GList *gIter;
bool all = !any;
@@ -587,7 +590,7 @@ is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
* \deprecated This function will be removed in a future release
*/
void
-clone_print(pe_resource_t *rsc, const char *pre_text, long options,
+clone_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
GString *list_text = NULL;
@@ -616,9 +619,9 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
- pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_promotable)? " (promotable)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n<ul>\n");
@@ -629,16 +632,17 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
- if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (partially_active
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
print_full = TRUE;
}
@@ -652,15 +656,15 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
- if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ if (!pcmk_is_set(child_rsc->flags, pcmk_rsc_removed)
&& !pcmk_is_set(options, pe_print_clone_active)) {
pcmk__add_word(&stopped_list, 1024, child_rsc->id);
}
- } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
- || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
- || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+ } else if (is_set_recursive(child_rsc, pcmk_rsc_removed, TRUE)
+ || !is_set_recursive(child_rsc, pcmk_rsc_managed, FALSE)
+ || is_set_recursive(child_rsc, pcmk_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
@@ -668,8 +672,9 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
- pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+ pcmk_node_t *location = NULL;
+ location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
@@ -678,7 +683,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
- } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ } else if (a_role > pcmk_role_unpromoted) {
promoted_list = g_list_append(promoted_list, location);
} else {
@@ -709,7 +714,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
/* Promoted */
promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
pcmk__add_word(&list_text, 1024, host->details->uname);
active_instances++;
@@ -725,17 +730,17 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
/* Started/Unpromoted */
started_list = g_list_sort(started_list, pe__cmp_node_name);
for (gIter = started_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
pcmk__add_word(&list_text, 1024, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_UNPROMOTED) {
+ if (role == pcmk_role_unpromoted) {
short_print((const char *) list_text->str, child_text,
UNPROMOTED_INSTANCES " (target-role)", NULL,
options, print_data);
@@ -756,11 +761,11 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_STOPPED) {
+ if (role == pcmk_role_stopped) {
state = "Stopped (disabled)";
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
@@ -780,7 +785,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
list = g_list_sort(list, pe__cmp_node_name);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
- pe_node_t *node = (pe_node_t *)nIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
pcmk__add_word(&stopped_list, 1024, node->details->uname);
@@ -809,12 +814,13 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -838,7 +844,7 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
all = g_list_prepend(all, (gpointer) "*");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
@@ -852,16 +858,18 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
printed_header = TRUE;
desc = pe__resource_description(rsc, show_opts);
-
rc = pe__name_and_nvpairs_xml(out, true, "clone", 10,
"id", rsc->id,
- "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
- "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
+ "multi_state",
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable),
+ "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique),
+ "maintenance",
+ pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
"disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
- "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
+ "failure_ignored",
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure),
"target_role", configured_role_str(rsc),
"description", desc);
CRM_ASSERT(rc == pcmk_rc_ok);
@@ -879,12 +887,13 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__clone_default(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -916,7 +925,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
@@ -931,9 +940,10 @@ pe__clone_default(pcmk__output_t *out, va_list args)
print_full = TRUE;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
- if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (partially_active
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
print_full = TRUE;
}
@@ -947,7 +957,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
- if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ if (!pcmk_is_set(child_rsc->flags, pcmk_rsc_removed)
&& !pcmk_is_set(show_opts, pcmk_show_clone_detail)
&& pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
if (stopped == NULL) {
@@ -956,9 +966,9 @@ pe__clone_default(pcmk__output_t *out, va_list args)
g_hash_table_insert(stopped, strdup(child_rsc->id), strdup("Stopped"));
}
- } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
- || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
- || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+ } else if (is_set_recursive(child_rsc, pcmk_rsc_removed, TRUE)
+ || !is_set_recursive(child_rsc, pcmk_rsc_managed, FALSE)
+ || is_set_recursive(child_rsc, pcmk_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
@@ -966,8 +976,9 @@ pe__clone_default(pcmk__output_t *out, va_list args)
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
- pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+ pcmk_node_t *location = NULL;
+ location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
@@ -976,7 +987,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
- } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ } else if (a_role > pcmk_role_unpromoted) {
promoted_list = g_list_append(promoted_list, location);
} else {
@@ -1014,7 +1025,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
/* Promoted */
promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
if (!pcmk__str_in_list(host->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -1037,7 +1048,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
/* Started/Unpromoted */
started_list = g_list_sort(started_list, pe__cmp_node_name);
for (gIter = started_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
if (!pcmk__str_in_list(host->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -1052,10 +1063,10 @@ pe__clone_default(pcmk__output_t *out, va_list args)
if ((list_text != NULL) && (list_text->len > 0)) {
clone_header(out, &rc, rsc, clone_data, desc);
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_UNPROMOTED) {
+ if (role == pcmk_role_unpromoted) {
out->list_item(out, NULL,
UNPROMOTED_INSTANCES " (target-role): [ %s ]",
(const char *) list_text->str);
@@ -1075,7 +1086,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
@@ -1096,7 +1107,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
list = g_list_sort(list, pe__cmp_node_name);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
- pe_node_t *node = (pe_node_t *)nIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(node->details->uname, only_node,
@@ -1104,7 +1115,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node->details->uname);
const char *state = "Stopped";
- if (configured_role(rsc) == RSC_ROLE_STOPPED) {
+ if (configured_role(rsc) == pcmk_role_stopped) {
state = "Stopped (disabled)";
}
@@ -1166,7 +1177,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
}
void
-clone_free(pe_resource_t * rsc)
+clone_free(pcmk_resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
@@ -1175,7 +1186,7 @@ clone_free(pe_resource_t * rsc)
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
@@ -1200,13 +1211,13 @@ clone_free(pe_resource_t * rsc)
}
enum rsc_role_e
-clone_resource_state(const pe_resource_t * rsc, gboolean current)
+clone_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
- enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e clone_role = pcmk_role_unknown;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
@@ -1222,17 +1233,17 @@ clone_resource_state(const pe_resource_t * rsc, gboolean current)
* \internal
* \brief Check whether a clone has an instance for every node
*
- * \param[in] rsc Clone to check
- * \param[in] data_set Cluster state
+ * \param[in] rsc Clone to check
+ * \param[in] scheduler Scheduler data
*/
bool
-pe__is_universal_clone(const pe_resource_t *rsc,
- const pe_working_set_t *data_set)
+pe__is_universal_clone(const pcmk_resource_t *rsc,
+ const pcmk_scheduler_t *scheduler)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = rsc->variant_opaque;
- if (clone_data->clone_max == g_list_length(data_set->nodes)) {
+ if (clone_data->clone_max == g_list_length(scheduler->nodes)) {
return TRUE;
}
}
@@ -1240,7 +1251,7 @@ pe__is_universal_clone(const pe_resource_t *rsc,
}
gboolean
-pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -1256,9 +1267,9 @@ pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child_rsc = NULL;
+ const pcmk_resource_t *child_rsc = NULL;
- child_rsc = (const pe_resource_t *) iter->data;
+ child_rsc = (const pcmk_resource_t *) iter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
break;
@@ -1270,7 +1281,7 @@ pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
}
const char *
-pe__clone_child_id(const pe_resource_t *rsc)
+pe__clone_child_id(const pcmk_resource_t *rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
@@ -1286,12 +1297,12 @@ pe__clone_child_id(const pe_resource_t *rsc)
* \return true if clone is ordered, otherwise false
*/
bool
-pe__clone_is_ordered(const pe_resource_t *clone)
+pe__clone_is_ordered(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
- return pcmk_is_set(clone_data->flags, pe__clone_ordered);
+ return pcmk_is_set(clone_data->flags, pcmk__clone_ordered);
}
/*!
@@ -1305,7 +1316,7 @@ pe__clone_is_ordered(const pe_resource_t *clone)
* already set or pcmk_rc_already if it was)
*/
int
-pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
+pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag)
{
clone_variant_data_t *clone_data = NULL;
@@ -1321,6 +1332,26 @@ pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
/*!
* \internal
+ * \brief Check whether a clone flag is set
+ *
+ * \param[in] group Clone resource to check
+ * \param[in] flags Flag or flags to check
+ *
+ * \return \c true if all \p flags are set for \p clone, otherwise \c false
+ */
+bool
+pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+ CRM_ASSERT(clone_data != NULL);
+
+ return pcmk_all_flags_set(clone_data->flags, flags);
+}
+
+/*!
+ * \internal
* \brief Create pseudo-actions needed for promotable clones
*
* \param[in,out] clone Promotable clone to create actions for
@@ -1328,63 +1359,59 @@ pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
* \param[in] any_demoting Whether any instance will be demoted
*/
void
-pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
+pe__create_promotable_pseudo_ops(pcmk_resource_t *clone, bool any_promoting,
bool any_demoting)
{
- pe_action_t *action = NULL;
- pe_action_t *action_complete = NULL;
+ pcmk_action_t *action = NULL;
+ pcmk_action_t *action_complete = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
// Create a "promote" action for the clone itself
- action = pe__new_rsc_pseudo_action(clone, RSC_PROMOTE, !any_promoting,
- true);
+ action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTE,
+ !any_promoting, true);
// Create a "promoted" action for when all promotions are done
- action_complete = pe__new_rsc_pseudo_action(clone, RSC_PROMOTED,
+ action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTED,
!any_promoting, true);
action_complete->priority = INFINITY;
// Create notification pseudo-actions for promotion
if (clone_data->promote_notify == NULL) {
clone_data->promote_notify = pe__action_notif_pseudo_ops(clone,
- RSC_PROMOTE,
+ PCMK_ACTION_PROMOTE,
action,
action_complete);
}
// Create a "demote" action for the clone itself
- action = pe__new_rsc_pseudo_action(clone, RSC_DEMOTE, !any_demoting, true);
+ action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTE,
+ !any_demoting, true);
// Create a "demoted" action for when all demotions are done
- action_complete = pe__new_rsc_pseudo_action(clone, RSC_DEMOTED,
+ action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTED,
!any_demoting, true);
action_complete->priority = INFINITY;
// Create notification pseudo-actions for demotion
if (clone_data->demote_notify == NULL) {
clone_data->demote_notify = pe__action_notif_pseudo_ops(clone,
- RSC_DEMOTE,
+ PCMK_ACTION_DEMOTE,
action,
action_complete);
if (clone_data->promote_notify != NULL) {
order_actions(clone_data->stop_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->start_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->start_notify->pre,
- pe_order_optional);
+ clone_data->start_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->stop_notify->pre,
- pe_order_optional);
+ clone_data->stop_notify->pre, pcmk__ar_ordered);
}
}
}
@@ -1396,7 +1423,7 @@ pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
* \param[in,out] clone Clone to create notifications for
*/
void
-pe__create_clone_notifications(pe_resource_t *clone)
+pe__create_clone_notifications(pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -1415,7 +1442,7 @@ pe__create_clone_notifications(pe_resource_t *clone)
* \param[in,out] clone Clone to free notification data for
*/
void
-pe__free_clone_notification_data(pe_resource_t *clone)
+pe__free_clone_notification_data(pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -1445,26 +1472,45 @@ pe__free_clone_notification_data(pe_resource_t *clone)
* \param[in,out] stopped Stopped action for \p clone
*/
void
-pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
- pe_action_t *start, pe_action_t *started,
- pe_action_t *stop, pe_action_t *stopped)
+pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone,
+ pcmk_action_t *start, pcmk_action_t *started,
+ pcmk_action_t *stop, pcmk_action_t *stopped)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
if (clone_data->start_notify == NULL) {
- clone_data->start_notify = pe__action_notif_pseudo_ops(clone, RSC_START,
+ clone_data->start_notify = pe__action_notif_pseudo_ops(clone,
+ PCMK_ACTION_START,
start, started);
}
if (clone_data->stop_notify == NULL) {
- clone_data->stop_notify = pe__action_notif_pseudo_ops(clone, RSC_STOP,
+ clone_data->stop_notify = pe__action_notif_pseudo_ops(clone,
+ PCMK_ACTION_STOP,
stop, stopped);
if ((clone_data->start_notify != NULL)
&& (clone_data->stop_notify != NULL)) {
order_actions(clone_data->stop_notify->post_done,
- clone_data->start_notify->pre, pe_order_optional);
+ clone_data->start_notify->pre, pcmk__ar_ordered);
}
}
}
+
+/*!
+ * \internal
+ * \brief Get maximum clone resource instances per node
+ *
+ * \param[in] rsc Clone resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__clone_max_per_node(const pcmk_resource_t *rsc)
+{
+ const clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, rsc);
+ return clone_data->clone_node_max;
+}
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
index 6c69bfc..0fdd5a1 100644
--- a/lib/pengine/common.c
+++ b/lib/pengine/common.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,6 +15,7 @@
#include <glib.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
gboolean was_processing_error = FALSE;
@@ -104,7 +105,7 @@ static pcmk__cluster_option_t pe_opts[] = {
},
{
"stonith-action", NULL, "select", "reboot, off, poweroff",
- "reboot", pcmk__is_fencing_action,
+ PCMK_ACTION_REBOOT, pcmk__is_fencing_action,
N_("Action to send to fence device when a node needs to be fenced "
"(\"poweroff\" is a deprecated alias for \"off\")"),
NULL
@@ -157,7 +158,17 @@ static pcmk__cluster_option_t pe_opts[] = {
"twice, the maximum `pcmk_delay_base/max`. By default, priority "
"fencing delay is disabled.")
},
-
+ {
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL,
+ "0", pcmk__valid_interval_spec,
+ N_("How long to wait for a node that has joined the cluster to join "
+ "the controller process group"),
+ N_("Fence nodes that do not join the controller process group within "
+ "this much time after joining the cluster, to allow the cluster "
+ "to continue managing resources. A value of 0 means never fence "
+ "pending nodes. Setting the value to 2h means fence nodes after "
+ "2 hours.")
+ },
{
"cluster-delay", NULL, "time", NULL,
"60s", pcmk__valid_interval_spec,
@@ -311,34 +322,34 @@ fail2text(enum action_fail_response fail)
const char *result = "<unknown>";
switch (fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
result = "ignore";
break;
- case action_fail_demote:
+ case pcmk_on_fail_demote:
result = "demote";
break;
- case action_fail_block:
+ case pcmk_on_fail_block:
result = "block";
break;
- case action_fail_recover:
+ case pcmk_on_fail_restart:
result = "recover";
break;
- case action_fail_migrate:
+ case pcmk_on_fail_ban:
result = "migrate";
break;
- case action_fail_stop:
+ case pcmk_on_fail_stop:
result = "stop";
break;
- case action_fail_fence:
+ case pcmk_on_fail_fence_node:
result = "fence";
break;
- case action_fail_standby:
+ case pcmk_on_fail_standby_node:
result = "standby";
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
result = "restart-container";
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
result = "reset-remote";
break;
}
@@ -348,49 +359,46 @@ fail2text(enum action_fail_response fail)
enum action_tasks
text2task(const char *task)
{
- if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- return stop_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STOPPED, pcmk__str_casei)) {
- return stopped_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
- return start_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STARTED, pcmk__str_casei)) {
- return started_rsc;
- } else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
- return shutdown_crm;
- } else if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
- return stonith_node;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
- return monitor_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei)) {
- return action_notify;
- } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFIED, pcmk__str_casei)) {
- return action_notified;
- } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
- return action_promote;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
- return action_demote;
- } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTED, pcmk__str_casei)) {
- return action_promoted;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTED, pcmk__str_casei)) {
- return action_demoted;
- }
-#if SUPPORT_TRACING
- if (pcmk__str_eq(task, CRMD_ACTION_CANCEL, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DELETE, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
- return no_action;
- }
- crm_trace("Unsupported action: %s", task);
-#endif
+ if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)) {
+ return pcmk_action_stop;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_STOPPED, pcmk__str_casei)) {
+ return pcmk_action_stopped;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_casei)) {
+ return pcmk_action_start;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_RUNNING, pcmk__str_casei)) {
+ return pcmk_action_started;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_casei)) {
+ return pcmk_action_shutdown;
- return no_action;
+ } else if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
+ return pcmk_action_fence;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
+ return pcmk_action_monitor;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
+ return pcmk_action_notify;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFIED, pcmk__str_casei)) {
+ return pcmk_action_notified;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
+ return pcmk_action_promote;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
+ return pcmk_action_demote;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTED, pcmk__str_casei)) {
+ return pcmk_action_promoted;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTED, pcmk__str_casei)) {
+ return pcmk_action_demoted;
+ }
+ return pcmk_action_unspecified;
}
const char *
@@ -399,47 +407,47 @@ task2text(enum action_tasks task)
const char *result = "<unknown>";
switch (task) {
- case no_action:
+ case pcmk_action_unspecified:
result = "no_action";
break;
- case stop_rsc:
- result = CRMD_ACTION_STOP;
+ case pcmk_action_stop:
+ result = PCMK_ACTION_STOP;
break;
- case stopped_rsc:
- result = CRMD_ACTION_STOPPED;
+ case pcmk_action_stopped:
+ result = PCMK_ACTION_STOPPED;
break;
- case start_rsc:
- result = CRMD_ACTION_START;
+ case pcmk_action_start:
+ result = PCMK_ACTION_START;
break;
- case started_rsc:
- result = CRMD_ACTION_STARTED;
+ case pcmk_action_started:
+ result = PCMK_ACTION_RUNNING;
break;
- case shutdown_crm:
- result = CRM_OP_SHUTDOWN;
+ case pcmk_action_shutdown:
+ result = PCMK_ACTION_DO_SHUTDOWN;
break;
- case stonith_node:
- result = CRM_OP_FENCE;
+ case pcmk_action_fence:
+ result = PCMK_ACTION_STONITH;
break;
- case monitor_rsc:
- result = CRMD_ACTION_STATUS;
+ case pcmk_action_monitor:
+ result = PCMK_ACTION_MONITOR;
break;
- case action_notify:
- result = CRMD_ACTION_NOTIFY;
+ case pcmk_action_notify:
+ result = PCMK_ACTION_NOTIFY;
break;
- case action_notified:
- result = CRMD_ACTION_NOTIFIED;
+ case pcmk_action_notified:
+ result = PCMK_ACTION_NOTIFIED;
break;
- case action_promote:
- result = CRMD_ACTION_PROMOTE;
+ case pcmk_action_promote:
+ result = PCMK_ACTION_PROMOTE;
break;
- case action_promoted:
- result = CRMD_ACTION_PROMOTED;
+ case pcmk_action_promoted:
+ result = PCMK_ACTION_PROMOTED;
break;
- case action_demote:
- result = CRMD_ACTION_DEMOTE;
+ case pcmk_action_demote:
+ result = PCMK_ACTION_DEMOTE;
break;
- case action_demoted:
- result = CRMD_ACTION_DEMOTED;
+ case pcmk_action_demoted:
+ result = PCMK_ACTION_DEMOTED;
break;
}
@@ -450,50 +458,50 @@ const char *
role2text(enum rsc_role_e role)
{
switch (role) {
- case RSC_ROLE_UNKNOWN:
- return RSC_ROLE_UNKNOWN_S;
- case RSC_ROLE_STOPPED:
- return RSC_ROLE_STOPPED_S;
- case RSC_ROLE_STARTED:
- return RSC_ROLE_STARTED_S;
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_stopped:
+ return PCMK__ROLE_STOPPED;
+
+ case pcmk_role_started:
+ return PCMK__ROLE_STARTED;
+
+ case pcmk_role_unpromoted:
#ifdef PCMK__COMPAT_2_0
- return RSC_ROLE_UNPROMOTED_LEGACY_S;
+ return PCMK__ROLE_UNPROMOTED_LEGACY;
#else
- return RSC_ROLE_UNPROMOTED_S;
+ return PCMK__ROLE_UNPROMOTED;
#endif
- case RSC_ROLE_PROMOTED:
+
+ case pcmk_role_promoted:
#ifdef PCMK__COMPAT_2_0
- return RSC_ROLE_PROMOTED_LEGACY_S;
+ return PCMK__ROLE_PROMOTED_LEGACY;
#else
- return RSC_ROLE_PROMOTED_S;
+ return PCMK__ROLE_PROMOTED;
#endif
+
+ default: // pcmk_role_unknown
+ return PCMK__ROLE_UNKNOWN;
}
- CRM_CHECK(role >= RSC_ROLE_UNKNOWN, return RSC_ROLE_UNKNOWN_S);
- CRM_CHECK(role < RSC_ROLE_MAX, return RSC_ROLE_UNKNOWN_S);
- // coverity[dead_error_line]
- return RSC_ROLE_UNKNOWN_S;
}
enum rsc_role_e
text2role(const char *role)
{
CRM_ASSERT(role != NULL);
- if (pcmk__str_eq(role, RSC_ROLE_STOPPED_S, pcmk__str_casei)) {
- return RSC_ROLE_STOPPED;
- } else if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_casei)) {
- return RSC_ROLE_STARTED;
- } else if (pcmk__strcase_any_of(role, RSC_ROLE_UNPROMOTED_S,
- RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
- return RSC_ROLE_UNPROMOTED;
- } else if (pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S, NULL)) {
- return RSC_ROLE_PROMOTED;
- } else if (pcmk__str_eq(role, RSC_ROLE_UNKNOWN_S, pcmk__str_casei)) {
- return RSC_ROLE_UNKNOWN;
+ if (pcmk__str_eq(role, PCMK__ROLE_STOPPED, pcmk__str_casei)) {
+ return pcmk_role_stopped;
+ } else if (pcmk__str_eq(role, PCMK__ROLE_STARTED, pcmk__str_casei)) {
+ return pcmk_role_started;
+ } else if (pcmk__strcase_any_of(role, PCMK__ROLE_UNPROMOTED,
+ PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
+ return pcmk_role_unpromoted;
+ } else if (pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
+ return pcmk_role_promoted;
+ } else if (pcmk__str_eq(role, PCMK__ROLE_UNKNOWN, pcmk__str_casei)) {
+ return pcmk_role_unknown;
}
crm_err("Unknown role: %s", role);
- return RSC_ROLE_UNKNOWN;
+ return pcmk_role_unknown;
}
void
@@ -514,48 +522,103 @@ add_hash_param(GHashTable * hash, const char *name, const char *value)
}
}
+/*!
+ * \internal
+ * \brief Look up an attribute value on the appropriate node
+ *
+ * If \p node is a guest node and either the \c XML_RSC_ATTR_TARGET meta
+ * attribute is set to "host" for \p rsc or \p force_host is \c true, query the
+ * attribute on the node's host. Otherwise, query the attribute on \p node
+ * itself.
+ *
+ * \param[in] node Node to query attribute value on by default
+ * \param[in] name Name of attribute to query
+ * \param[in] rsc Resource on whose behalf we're querying
+ * \param[in] node_type Type of resource location lookup
+ * \param[in] force_host Force a lookup on the guest node's host, regardless of
+ * the \c XML_RSC_ATTR_TARGET value
+ *
+ * \return Value of the attribute on \p node or on the host of \p node
+ *
+ * \note If \p force_host is \c true, \p node \e must be a guest node.
+ */
const char *
-pe_node_attribute_calculated(const pe_node_t *node, const char *name,
- const pe_resource_t *rsc)
+pe__node_attribute_calculated(const pcmk_node_t *node, const char *name,
+ const pcmk_resource_t *rsc,
+ enum pcmk__rsc_node node_type,
+ bool force_host)
{
- const char *source;
-
- if(node == NULL) {
- return NULL;
+ // @TODO: Use pe__is_guest_node() after merging libpe_{rules,status}
+ bool is_guest = (node != NULL)
+ && (node->details->type == pcmk_node_variant_remote)
+ && (node->details->remote_rsc != NULL)
+ && (node->details->remote_rsc->container != NULL);
+ const char *source = NULL;
+ const char *node_type_s = NULL;
+ const char *reason = NULL;
+
+ const pcmk_resource_t *container = NULL;
+ const pcmk_node_t *host = NULL;
+
+ CRM_ASSERT((node != NULL) && (name != NULL) && (rsc != NULL)
+ && (!force_host || is_guest));
+
+ /* Ignore XML_RSC_ATTR_TARGET if node is not a guest node. This represents a
+ * user configuration error.
+ */
+ source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
+ if (!force_host
+ && (!is_guest || !pcmk__str_eq(source, "host", pcmk__str_casei))) {
- } else if(rsc == NULL) {
return g_hash_table_lookup(node->details->attrs, name);
}
- source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
- if(source == NULL || !pcmk__str_eq("host", source, pcmk__str_casei)) {
- return g_hash_table_lookup(node->details->attrs, name);
- }
+ container = node->details->remote_rsc->container;
- /* Use attributes set for the containers location
- * instead of for the container itself
- *
- * Useful when the container is using the host's local
- * storage
- */
+ switch (node_type) {
+ case pcmk__rsc_node_assigned:
+ node_type_s = "assigned";
+ host = container->allocated_to;
+ if (host == NULL) {
+ reason = "not assigned";
+ }
+ break;
- CRM_ASSERT(node->details->remote_rsc);
- CRM_ASSERT(node->details->remote_rsc->container);
+ case pcmk__rsc_node_current:
+ node_type_s = "current";
- if(node->details->remote_rsc->container->running_on) {
- pe_node_t *host = node->details->remote_rsc->container->running_on->data;
- pe_rsc_trace(rsc, "%s: Looking for %s on the container host %s",
- rsc->id, name, pe__node_name(host));
- return g_hash_table_lookup(host->details->attrs, name);
+ if (container->running_on != NULL) {
+ host = container->running_on->data;
+ }
+ if (host == NULL) {
+ reason = "inactive";
+ }
+ break;
+
+ default:
+ // Add support for other enum pcmk__rsc_node values if needed
+ CRM_ASSERT(false);
+ break;
}
- pe_rsc_trace(rsc, "%s: Not looking for %s on the container host: %s is inactive",
- rsc->id, name, node->details->remote_rsc->container->id);
+ if (host != NULL) {
+ const char *value = g_hash_table_lookup(host->details->attrs, name);
+
+ pe_rsc_trace(rsc,
+ "%s: Value lookup for %s on %s container host %s %s%s",
+ rsc->id, name, node_type_s, pe__node_name(host),
+ ((value != NULL)? "succeeded: " : "failed"),
+ pcmk__s(value, ""));
+ return value;
+ }
+ pe_rsc_trace(rsc,
+ "%s: Not looking for %s on %s container host: %s is %s",
+ rsc->id, name, node_type_s, container->id, reason);
return NULL;
}
const char *
-pe_node_attribute_raw(const pe_node_t *node, const char *name)
+pe_node_attribute_raw(const pcmk_node_t *node, const char *name)
{
if(node == NULL) {
return NULL;
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index f168124..0ab2e04 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -13,15 +13,17 @@
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
-static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
- unsigned int *count_clean);
+static pcmk_node_t *active_node(const pcmk_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
-resource_object_functions_t resource_class_functions[] = {
+pcmk_rsc_methods_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
@@ -34,6 +36,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__native_is_filtered,
active_node,
+ pe__primitive_max_per_node,
},
{
group_unpack,
@@ -47,6 +50,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__group_is_filtered,
active_node,
+ pe__group_max_per_node,
},
{
clone_unpack,
@@ -60,6 +64,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__clone_is_filtered,
active_node,
+ pe__clone_max_per_node,
},
{
pe__unpack_bundle,
@@ -73,6 +78,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_bundle,
pe__bundle_is_filtered,
pe__bundle_active_node,
+ pe__bundle_max_per_node,
}
};
@@ -80,23 +86,23 @@ static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
- return pe_native;
+ return pcmk_rsc_variant_primitive;
} else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
- return pe_group;
+ return pcmk_rsc_variant_group;
} else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
- return pe_clone;
+ return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
- return pe_clone;
+ return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
- return pe_container;
+ return pcmk_rsc_variant_bundle;
}
- return pe_unknown;
+ return pcmk_rsc_variant_unknown;
}
static void
@@ -106,10 +112,12 @@ dup_attr(gpointer key, gpointer value, gpointer user_data)
}
static void
-expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
+expand_parents_fixed_nvpairs(pcmk_resource_t *rsc,
+ pe_rule_eval_data_t *rule_data,
+ GHashTable *meta_hash, pcmk_scheduler_t *scheduler)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
- pe_resource_t *p = rsc->parent;
+ pcmk_resource_t *p = rsc->parent;
if (p == NULL) {
return ;
@@ -119,8 +127,8 @@ expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_dat
/* The fixed value of the lower parent resource takes precedence and is not overwritten. */
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
- pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
- rule_data, parent_orig_meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS, rule_data,
+ parent_orig_meta, NULL, FALSE, scheduler);
p = p->parent;
}
@@ -146,8 +154,8 @@ expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_dat
}
void
-get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
- pe_node_t * node, pe_working_set_t * data_set)
+get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t * rsc,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
@@ -157,8 +165,8 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
@@ -170,23 +178,23 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
const char *prop_name = (const char *) a->name;
- const char *prop_value = crm_element_value(rsc->xml, prop_name);
+ const char *prop_value = pcmk__xml_attr_value(a);
add_hash_param(meta_hash, prop_name, prop_value);
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
- meta_hash, NULL, FALSE, data_set);
+ meta_hash, NULL, FALSE, scheduler);
/* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
/* If it is already explicitly set as a child, it will not be overwritten. */
if (rsc->parent != NULL) {
- expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
+ expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, scheduler);
}
/* check the defaults */
- pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
- &rule_data, meta_hash, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, XML_TAG_META_SETS,
+ &rule_data, meta_hash, NULL, FALSE, scheduler);
/* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
/* The values already set up to this point will not be overwritten. */
@@ -196,13 +204,13 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
}
void
-get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
- const pe_node_t *node, pe_working_set_t *data_set)
+get_rsc_attributes(GHashTable *meta_hash, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -213,16 +221,17 @@ get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
- meta_hash, NULL, FALSE, data_set);
+ meta_hash, NULL, FALSE, scheduler);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
- get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
+ get_rsc_attributes(meta_hash, rsc->parent, node, scheduler);
} else {
/* and finally check the defaults */
- pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
- &rule_data, meta_hash, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, XML_TAG_ATTR_SETS,
+ &rule_data, meta_hash, NULL, FALSE,
+ scheduler);
}
}
@@ -234,9 +243,9 @@ template_op_key(xmlNode * op)
char *key = NULL;
if ((role == NULL)
- || pcmk__strcase_any_of(role, RSC_ROLE_STARTED_S, RSC_ROLE_UNPROMOTED_S,
- RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
- role = RSC_ROLE_UNKNOWN_S;
+ || pcmk__strcase_any_of(role, PCMK__ROLE_STARTED, PCMK__ROLE_UNPROMOTED,
+ PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
+ role = PCMK__ROLE_UNKNOWN;
}
key = crm_strdup_printf("%s-%s", name, role);
@@ -244,7 +253,8 @@ template_op_key(xmlNode * op)
}
static gboolean
-unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
+unpack_template(xmlNode *xml_obj, xmlNode **expanded_xml,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
@@ -268,7 +278,7 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
id = ID(xml_obj);
if (id == NULL) {
- pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
@@ -277,7 +287,8 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
return FALSE;
}
- cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
+ cib_resources = get_xpath_object("//" XML_CIB_TAG_RESOURCES,
+ scheduler->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
@@ -292,7 +303,7 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
- crm_xml_replace(new_xml, XML_ATTR_ID, id);
+ crm_xml_add(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
@@ -346,19 +357,19 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
- /* Disable multi-level templates for now */
- /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
+#if 0 /* Disable multi-level templates for now */
+ if (!unpack_template(new_xml, expanded_xml, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
-
return FALSE;
- } */
+ }
+#endif
return TRUE;
}
static gboolean
-add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
+add_template_rsc(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *template_ref = NULL;
const char *id = NULL;
@@ -375,7 +386,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
id = ID(xml_obj);
if (id == NULL) {
- pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
@@ -384,7 +395,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
return FALSE;
}
- if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
+ if (add_tag_ref(scheduler->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
@@ -392,7 +403,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
}
static bool
-detect_promotable(pe_resource_t *rsc)
+detect_promotable(pcmk_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
@@ -402,8 +413,7 @@ detect_promotable(pe_resource_t *rsc)
}
// @COMPAT deprecated since 2.0.0
- if (pcmk__str_eq(crm_element_name(rsc->xml), PCMK_XE_PROMOTABLE_LEGACY,
- pcmk__str_casei)) {
+ if (pcmk__xe_is(rsc->xml, PCMK_XE_PROMOTABLE_LEGACY)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
@@ -423,18 +433,18 @@ free_params_table(gpointer data)
/*!
* \brief Get a table of resource parameters
*
- * \param[in,out] rsc Resource to query
- * \param[in] node Node for evaluating rules (NULL for defaults)
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Resource to query
+ * \param[in] node Node for evaluating rules (NULL for defaults)
+ * \param[in,out] scheduler Scheduler data
*
* \return Hash table containing resource parameter names and values
- * (or NULL if \p rsc or \p data_set is NULL)
+ * (or NULL if \p rsc or \p scheduler is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
-pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
GHashTable *params_on_node = NULL;
@@ -445,7 +455,7 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
const char *node_name = "";
// Sanity check
- if ((rsc == NULL) || (data_set == NULL)) {
+ if ((rsc == NULL) || (scheduler == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
@@ -462,7 +472,7 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
- get_rsc_attributes(params_on_node, rsc, node, data_set);
+ get_rsc_attributes(params_on_node, rsc, node, scheduler);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
@@ -478,29 +488,30 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
* \param[in] is_default Whether \p value was selected by default
*/
static void
-unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
+unpack_requires(pcmk_resource_t *rsc, const char *value, bool is_default)
{
if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
- if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_fencing);
+ if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
- } else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"is disabled", rsc->id);
@@ -508,27 +519,29 @@ unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
return;
} else {
- pe__set_resource_flags(rsc,
- pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_fencing
+ |pcmk_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
- if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
value = PCMK__VALUE_QUORUM;
- } else if ((rsc->variant == pe_native)
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
&& xml_contains_remote_node(rsc->xml)) {
value = PCMK__VALUE_QUORUM;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_enable_unfencing)) {
value = PCMK__VALUE_UNFENCING;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
value = PCMK__VALUE_FENCING;
- } else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
+ } else if (rsc->cluster->no_quorum_policy == pcmk_no_quorum_ignore) {
value = PCMK__VALUE_NOTHING;
} else {
@@ -550,18 +563,18 @@ unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
#ifndef PCMK__COMPAT_2_0
static void
-warn_about_deprecated_classes(pe_resource_t *rsc)
+warn_about_deprecated_classes(pcmk_resource_t *rsc)
{
const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
- pe_warn_once(pe_wo_upstart,
+ pe_warn_once(pcmk__wo_upstart,
"Support for Upstart resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
} else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
- pe_warn_once(pe_wo_nagios,
+ pe_warn_once(pcmk__wo_nagios,
"Support for Nagios resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
@@ -574,12 +587,12 @@ warn_about_deprecated_classes(pe_resource_t *rsc)
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
- * \c pe_resource_t object.
+ * \c pcmk_resource_t object.
*
- * \param[in] xml_obj XML node containing the resource's configuration
- * \param[out] rsc Where to store the unpacked resource information
- * \param[in] parent Resource's parent, if any
- * \param[in,out] data_set Cluster working set
+ * \param[in] xml_obj XML node containing the resource's configuration
+ * \param[out] rsc Where to store the unpacked resource information
+ * \param[in] parent Resource's parent, if any
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
@@ -587,8 +600,8 @@ warn_about_deprecated_classes(pe_resource_t *rsc)
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
-pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
- pe_resource_t *parent, pe_working_set_t *data_set)
+pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
+ pcmk_resource_t *parent, pcmk_scheduler_t *scheduler)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
@@ -599,7 +612,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
@@ -607,31 +620,31 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
};
CRM_CHECK(rsc != NULL, return EINVAL);
- CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
+ CRM_CHECK((xml_obj != NULL) && (scheduler != NULL),
*rsc = NULL;
return EINVAL);
- rule_data.now = data_set->now;
+ rule_data.now = scheduler->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
- if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
+ if (unpack_template(xml_obj, &expanded_xml, scheduler) == FALSE) {
return pcmk_rc_unpack_error;
}
- *rsc = calloc(1, sizeof(pe_resource_t));
+ *rsc = calloc(1, sizeof(pcmk_resource_t));
if (*rsc == NULL) {
crm_crit("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
- (*rsc)->cluster = data_set;
+ (*rsc)->cluster = scheduler;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
@@ -648,12 +661,12 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
- (*rsc)->ops_xml = expand_idref(ops, data_set->input);
+ (*rsc)->ops_xml = expand_idref(ops, scheduler->input);
- (*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
- if ((*rsc)->variant == pe_unknown) {
+ (*rsc)->variant = get_resource_type((const char *) (*rsc)->xml->name);
+ if ((*rsc)->variant == pcmk_rsc_variant_unknown) {
pe_err("Ignoring resource '%s' of unknown type '%s'",
- id, crm_element_name((*rsc)->xml));
+ id, (*rsc)->xml->name);
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
@@ -678,23 +691,23 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
- get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
- (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
+ get_meta_attributes((*rsc)->meta, *rsc, NULL, scheduler);
+ (*rsc)->parameters = pe_rsc_params(*rsc, NULL, scheduler); // \deprecated
(*rsc)->flags = 0;
- pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
+ pe__set_resource_flags(*rsc, pcmk_rsc_runnable|pcmk_rsc_unassigned);
- if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- pe__set_resource_flags(*rsc, pe_rsc_managed);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ pe__set_resource_flags(*rsc, pcmk_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
- (*rsc)->role = RSC_ROLE_STOPPED;
- (*rsc)->next_role = RSC_ROLE_UNKNOWN;
+ (*rsc)->role = pcmk_role_stopped;
+ (*rsc)->next_role = pcmk_role_unknown;
- (*rsc)->recovery_type = recovery_stop_start;
+ (*rsc)->recovery_type = pcmk_multiply_active_restart;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
@@ -704,12 +717,12 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_critical);
+ pe__set_resource_flags(*rsc, pcmk_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_notify);
+ pe__set_resource_flags(*rsc, pcmk_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
@@ -723,7 +736,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(*rsc, pcmk_rsc_migratable);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
@@ -732,38 +745,38 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
*/
- pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(*rsc, pcmk_rsc_migratable);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_managed);
} else {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
- pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_maintenance);
}
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
- pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_maintenance);
}
if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_unique);
+ pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
if (detect_promotable(*rsc)) {
- pe__set_resource_flags(*rsc, pe_rsc_promotable);
+ pe__set_resource_flags(*rsc, pcmk_rsc_promotable);
}
} else {
- pe__set_resource_flags(*rsc, pe_rsc_unique);
+ pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
@@ -771,7 +784,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
(*rsc)->id);
- pe_warn_once(pe_wo_restart_type,
+ pe_warn_once(pcmk__wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
@@ -782,17 +795,17 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_stop_only;
+ (*rsc)->recovery_type = pcmk_multiply_active_stop;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_block;
+ (*rsc)->recovery_type = pcmk_multiply_active_block;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_stop_unexpected;
+ (*rsc)->recovery_type = pcmk_multiply_active_unexpected;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
@@ -803,7 +816,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
", using default of \"stop_start\"", value);
}
- (*rsc)->recovery_type = recovery_stop_start;
+ (*rsc)->recovery_type = pcmk_multiply_active_restart;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop/start", (*rsc)->id);
}
@@ -813,7 +826,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->stickiness = char2score(value);
}
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MIGRATION_THRESHOLD);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
@@ -821,8 +834,8 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
- pe_warn_once(pe_wo_neg_threshold,
- XML_RSC_ATTR_FAIL_STICKINESS
+ pe_warn_once(pcmk__wo_neg_threshold,
+ PCMK_META_MIGRATION_THRESHOLD
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
@@ -830,21 +843,21 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
- pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
- pe__set_resource_flags(*rsc, pe_rsc_fence_device);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_fencing);
+ pe__set_resource_flags(*rsc, pcmk_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
unpack_requires(*rsc, value, false);
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_FAILURE_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
- GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
+ GHashTable *params = pe_rsc_params(*rsc, NULL, scheduler);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
@@ -865,34 +878,35 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
- (*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
+ (*rsc)->next_role != pcmk_role_unknown? role2text((*rsc)->next_role) : "default");
- if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
+ if ((*rsc)->fns->unpack(*rsc, scheduler) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
- if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
- resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
+ resource_location(*rsc, NULL, 0, "symmetric_default", scheduler);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
- resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
+ resource_location(*rsc, NULL, 0, "remote_connection_default",
+ scheduler);
}
pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
- pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
+ pcmk_is_set((*rsc)->flags, pcmk_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
- (*rsc)->utilization, NULL, FALSE, data_set);
+ (*rsc)->utilization, NULL, FALSE, scheduler);
if (expanded_xml) {
- if (add_template_rsc(xml_obj, data_set) == FALSE) {
+ if (add_template_rsc(xml_obj, scheduler) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
@@ -902,9 +916,9 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
}
gboolean
-is_parent(pe_resource_t *child, pe_resource_t *rsc)
+is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc)
{
- pe_resource_t *parent = child;
+ pcmk_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
@@ -918,15 +932,16 @@ is_parent(pe_resource_t *child, pe_resource_t *rsc)
return FALSE;
}
-pe_resource_t *
-uber_parent(pe_resource_t * rsc)
+pcmk_resource_t *
+uber_parent(pcmk_resource_t *rsc)
{
- pe_resource_t *parent = rsc;
+ pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
- while (parent->parent != NULL && parent->parent->variant != pe_container) {
+ while ((parent->parent != NULL)
+ && (parent->parent->variant != pcmk_rsc_variant_bundle)) {
parent = parent->parent;
}
return parent;
@@ -943,16 +958,17 @@ uber_parent(pe_resource_t * rsc)
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
-const pe_resource_t *
-pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
+const pcmk_resource_t *
+pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
{
- const pe_resource_t *parent = rsc;
+ const pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL) {
- if (!include_bundle && (parent->parent->variant == pe_container)) {
+ if (!include_bundle
+ && (parent->parent->variant == pcmk_rsc_variant_bundle)) {
break;
}
parent = parent->parent;
@@ -961,7 +977,7 @@ pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
}
void
-common_free(pe_resource_t * rsc)
+common_free(pcmk_resource_t * rsc)
{
if (rsc == NULL) {
return;
@@ -984,7 +1000,9 @@ common_free(pe_resource_t * rsc)
g_hash_table_destroy(rsc->utilization);
}
- if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((rsc->parent == NULL)
+ && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
+
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
@@ -1037,8 +1055,8 @@ common_free(pe_resource_t * rsc)
* \return true if the count should continue, or false if sufficiently known
*/
bool
-pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
- pe_node_t **active, unsigned int *count_all,
+pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_node_t **active, unsigned int *count_all,
unsigned int *count_clean)
{
bool keep_looking = false;
@@ -1065,7 +1083,7 @@ pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
} else {
keep_looking = true;
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
if (is_happy && ((*active == NULL) || !(*active)->details->online
|| (*active)->details->unclean)) {
*active = node; // This is the first clean node
@@ -1079,12 +1097,12 @@ pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
return keep_looking;
}
-// Shared implementation of resource_object_functions_t:active_node()
-static pe_node_t *
-active_node(const pe_resource_t *rsc, unsigned int *count_all,
+// Shared implementation of pcmk_rsc_methods_t:active_node()
+static pcmk_node_t *
+active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
- pe_node_t *active = NULL;
+ pcmk_node_t *active = NULL;
if (count_all != NULL) {
*count_all = 0;
@@ -1096,7 +1114,7 @@ active_node(const pe_resource_t *rsc, unsigned int *count_all,
return NULL;
}
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
+ if (!pe__count_active_node(rsc, (pcmk_node_t *) iter->data, &active,
count_all, count_clean)) {
break; // Don't waste time iterating if we don't have to
}
@@ -1117,8 +1135,8 @@ active_node(const pe_resource_t *rsc, unsigned int *count_all,
* active nodes or only clean active nodes is desired according to the
* "requires" meta-attribute.
*/
-pe_node_t *
-pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
+pcmk_node_t *
+pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count)
{
if (rsc == NULL) {
if (count != NULL) {
@@ -1126,7 +1144,7 @@ pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
}
return NULL;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
return rsc->fns->active_node(rsc, count, NULL);
} else {
@@ -1135,20 +1153,20 @@ pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
}
void
-pe__count_common(pe_resource_t *rsc)
+pe__count_common(pcmk_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
- ((pe_resource_t *) item->data)->fns->count(item->data);
+ ((pcmk_resource_t *) item->data)->fns->count(item->data);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
- || (rsc->role > RSC_ROLE_STOPPED)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
+ || (rsc->role > pcmk_role_stopped)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
rsc->cluster->blocked_resources++;
}
}
@@ -1163,7 +1181,7 @@ pe__count_common(pe_resource_t *rsc)
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
-pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
+pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
diff --git a/lib/pengine/failcounts.c b/lib/pengine/failcounts.c
index a4a3e11..6990d3d 100644
--- a/lib/pengine/failcounts.c
+++ b/lib/pengine/failcounts.c
@@ -77,7 +77,8 @@ is_matched_failure(const char *rsc_id, const xmlNode *conf_op_xml,
}
static gboolean
-block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
+block_failure(const pcmk_node_t *node, pcmk_resource_t *rsc,
+ const xmlNode *xml_op)
{
char *xml_name = clone_strip(rsc->id);
@@ -180,11 +181,11 @@ block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
* \note The caller is responsible for freeing the result.
*/
static inline char *
-rsc_fail_name(const pe_resource_t *rsc)
+rsc_fail_name(const pcmk_resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
- return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
+ return pcmk_is_set(rsc->flags, pcmk_rsc_unique)? strdup(name) : clone_strip(name);
}
/*!
@@ -236,7 +237,6 @@ generate_fail_regex(const char *prefix, const char *rsc_name,
* \brief Compile regular expressions to match failure-related node attributes
*
* \param[in] rsc Resource being checked for failures
- * \param[in] data_set Data set (for CRM feature set version)
* \param[out] failcount_re Storage for regular expression for fail count
* \param[out] lastfailure_re Storage for regular expression for last failure
*
@@ -245,23 +245,25 @@ generate_fail_regex(const char *prefix, const char *rsc_name,
* regfree().
*/
static int
-generate_fail_regexes(const pe_resource_t *rsc,
- const pe_working_set_t *data_set,
+generate_fail_regexes(const pcmk_resource_t *rsc,
regex_t *failcount_re, regex_t *lastfailure_re)
{
+ int rc = pcmk_rc_ok;
char *rsc_name = rsc_fail_name(rsc);
- const char *version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ const char *version = crm_element_value(rsc->cluster->input,
+ XML_ATTR_CRM_VERSION);
+
+ // @COMPAT Pacemaker <= 1.1.16 used a single fail count per resource
gboolean is_legacy = (compare_version(version, "3.0.13") < 0);
- int rc = pcmk_rc_ok;
if (generate_fail_regex(PCMK__FAIL_COUNT_PREFIX, rsc_name, is_legacy,
- pcmk_is_set(rsc->flags, pe_rsc_unique),
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique),
failcount_re) != pcmk_rc_ok) {
rc = EINVAL;
} else if (generate_fail_regex(PCMK__LAST_FAILURE_PREFIX, rsc_name,
is_legacy,
- pcmk_is_set(rsc->flags, pe_rsc_unique),
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique),
lastfailure_re) != pcmk_rc_ok) {
rc = EINVAL;
regfree(failcount_re);
@@ -271,68 +273,137 @@ generate_fail_regexes(const pe_resource_t *rsc,
return rc;
}
-int
-pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
- time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+// Data for fail-count-related iterators
+struct failcount_data {
+ const pcmk_node_t *node;// Node to check for fail count
+ pcmk_resource_t *rsc; // Resource to check for fail count
+ uint32_t flags; // Fail count flags
+ const xmlNode *xml_op; // History entry for expiration purposes (or NULL)
+ regex_t failcount_re; // Fail count regular expression to match
+ regex_t lastfailure_re; // Last failure regular expression to match
+ int failcount; // Fail count so far
+ time_t last_failure; // Time of most recent failure so far
+};
+
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a node attribute
+ *
+ * \param[in] key Node attribute name
+ * \param[in] value Node attribute value
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_attr(gpointer key, gpointer value, gpointer user_data)
{
- char *key = NULL;
- const char *value = NULL;
- regex_t failcount_re, lastfailure_re;
- int failcount = 0;
- time_t last = 0;
- GHashTableIter iter;
-
- CRM_CHECK(generate_fail_regexes(rsc, rsc->cluster, &failcount_re,
- &lastfailure_re) == pcmk_rc_ok,
- return 0);
+ struct failcount_data *fc_data = user_data;
+
+ // If this is a matching fail count attribute, update fail count
+ if (regexec(&(fc_data->failcount_re), (const char *) key, 0, NULL, 0) == 0) {
+ fc_data->failcount = pcmk__add_scores(fc_data->failcount,
+ char2score(value));
+ pe_rsc_trace(fc_data->rsc, "Added %s (%s) to %s fail count (now %s)",
+ (const char *) key, (const char *) value, fc_data->rsc->id,
+ pcmk_readable_score(fc_data->failcount));
+ return;
+ }
- /* Resource fail count is sum of all matching operation fail counts */
- g_hash_table_iter_init(&iter, node->details->attrs);
- while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
- if (regexec(&failcount_re, key, 0, NULL, 0) == 0) {
- failcount = pcmk__add_scores(failcount, char2score(value));
- crm_trace("Added %s (%s) to %s fail count (now %s)",
- key, value, rsc->id, pcmk_readable_score(failcount));
- } else if (regexec(&lastfailure_re, key, 0, NULL, 0) == 0) {
- long long last_ll;
-
- if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
- last = (time_t) QB_MAX(last, last_ll);
- }
+ // If this is a matching last failure attribute, update last failure
+ if (regexec(&(fc_data->lastfailure_re), (const char *) key, 0, NULL,
+ 0) == 0) {
+ long long last_ll;
+
+ if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
+ fc_data->last_failure = (time_t) QB_MAX(fc_data->last_failure,
+ last_ll);
}
}
+}
- regfree(&failcount_re);
- regfree(&lastfailure_re);
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a filler resource
+ *
+ * \param[in] data Filler resource
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_filler(gpointer data, gpointer user_data)
+{
+ pcmk_resource_t *filler = data;
+ struct failcount_data *fc_data = user_data;
+ time_t filler_last_failure = 0;
+
+ fc_data->failcount += pe_get_failcount(fc_data->node, filler,
+ &filler_last_failure, fc_data->flags,
+ fc_data->xml_op);
+ fc_data->last_failure = QB_MAX(fc_data->last_failure, filler_last_failure);
+}
- if ((failcount > 0) && (last > 0) && (last_failure != NULL)) {
- *last_failure = last;
- }
+/*!
+ * \internal
+ * \brief Get a resource's fail count on a node
+ *
+ * \param[in] node Node to check
+ * \param[in,out] rsc Resource to check
+ * \param[out] last_failure If not NULL, where to set time of most recent
+ * failure of \p rsc on \p node
+ * \param[in] flags Group of enum pcmk__fc_flags
+ * \param[in] xml_op If not NULL, consider only the action in this
+ * history entry when determining whether on-fail
+ * is configured as "blocked", otherwise consider
+ * all actions configured for \p rsc
+ *
+ * \return Fail count for \p rsc on \p node according to \p flags
+ */
+int
+pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc,
+ time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+{
+ struct failcount_data fc_data = {
+ .node = node,
+ .rsc = rsc,
+ .flags = flags,
+ .xml_op = xml_op,
+ .failcount = 0,
+ .last_failure = (time_t) 0,
+ };
+
+ // Calculate resource failcount as sum of all matching operation failcounts
+ CRM_CHECK(generate_fail_regexes(rsc, &fc_data.failcount_re,
+ &fc_data.lastfailure_re) == pcmk_rc_ok,
+ return 0);
+ g_hash_table_foreach(node->details->attrs, update_failcount_for_attr,
+ &fc_data);
+ regfree(&(fc_data.failcount_re));
+ regfree(&(fc_data.lastfailure_re));
- /* If failure blocks the resource, disregard any failure timeout */
- if ((failcount > 0) && rsc->failure_timeout
+ // If failure blocks the resource, disregard any failure timeout
+ if ((fc_data.failcount > 0) && (rsc->failure_timeout > 0)
&& block_failure(node, rsc, xml_op)) {
- pe_warn("Ignoring failure timeout %d for %s because it conflicts with on-fail=block",
+ pe_warn("Ignoring failure timeout %d for %s "
+ "because it conflicts with on-fail=block",
rsc->failure_timeout, rsc->id);
rsc->failure_timeout = 0;
}
- /* If all failures have expired, ignore fail count */
- if (pcmk_is_set(flags, pe_fc_effective) && (failcount > 0) && (last > 0)
- && rsc->failure_timeout) {
+ // If all failures have expired, ignore fail count
+ if (pcmk_is_set(flags, pcmk__fc_effective) && (fc_data.failcount > 0)
+ && (fc_data.last_failure > 0) && (rsc->failure_timeout != 0)) {
time_t now = get_effective_time(rsc->cluster);
- if (now > (last + rsc->failure_timeout)) {
- crm_debug("Failcount for %s on %s expired after %ds",
- rsc->id, pe__node_name(node), rsc->failure_timeout);
- failcount = 0;
+ if (now > (fc_data.last_failure + rsc->failure_timeout)) {
+ pe_rsc_debug(rsc, "Failcount for %s on %s expired after %ds",
+ rsc->id, pe__node_name(node), rsc->failure_timeout);
+ fc_data.failcount = 0;
}
}
- /* We never want the fail counts of a bundle container's fillers to
- * count towards the container's fail count.
+ /* Add the fail count of any filler resources, except that we never want the
+ * fail counts of a bundle container's fillers to count towards the
+ * container's fail count.
*
* Most importantly, a Pacemaker Remote connection to a bundle container
* is a filler of the container, but can reside on a different node than the
@@ -340,62 +411,56 @@ pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
* container's fail count on that node could lead to attempting to stop the
* container on the wrong node.
*/
-
- if (pcmk_is_set(flags, pe_fc_fillers) && rsc->fillers
+ if (pcmk_is_set(flags, pcmk__fc_fillers) && (rsc->fillers != NULL)
&& !pe_rsc_is_bundled(rsc)) {
- GList *gIter = NULL;
-
- for (gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *filler = (pe_resource_t *) gIter->data;
- time_t filler_last_failure = 0;
-
- failcount += pe_get_failcount(node, filler, &filler_last_failure,
- flags, xml_op);
-
- if (last_failure && filler_last_failure > *last_failure) {
- *last_failure = filler_last_failure;
- }
- }
-
- if (failcount > 0) {
- crm_info("Container %s and the resources within it "
- "have failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ g_list_foreach(rsc->fillers, update_failcount_for_filler, &fc_data);
+ if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc,
+ "Container %s and the resources within it "
+ "have failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- } else if (failcount > 0) {
- crm_info("%s has failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ } else if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc, "%s has failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- return failcount;
+ if (last_failure != NULL) {
+ if ((fc_data.failcount > 0) && (fc_data.last_failure > 0)) {
+ *last_failure = fc_data.last_failure;
+ } else {
+ *last_failure = 0;
+ }
+ }
+ return fc_data.failcount;
}
/*!
* \brief Schedule a controller operation to clear a fail count
*
- * \param[in,out] rsc Resource with failure
- * \param[in] node Node failure occurred on
- * \param[in] reason Readable description why needed (for logging)
- * \param[in,out] data_set Working set for cluster
+ * \param[in,out] rsc Resource with failure
+ * \param[in] node Node failure occurred on
+ * \param[in] reason Readable description why needed (for logging)
+ * \param[in,out] scheduler Scheduler data cluster
*
* \return Scheduled action
*/
-pe_action_t *
-pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
- const char *reason, pe_working_set_t *data_set)
+pcmk_action_t *
+pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *reason, pcmk_scheduler_t *scheduler)
{
char *key = NULL;
- pe_action_t *clear = NULL;
+ pcmk_action_t *clear = NULL;
- CRM_CHECK(rsc && node && reason && data_set, return NULL);
+ CRM_CHECK(rsc && node && reason && scheduler, return NULL);
- key = pcmk__op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
- clear = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE,
- data_set);
+ key = pcmk__op_key(rsc->id, PCMK_ACTION_CLEAR_FAILCOUNT, 0);
+ clear = custom_action(rsc, key, PCMK_ACTION_CLEAR_FAILCOUNT, node, FALSE,
+ scheduler);
add_hash_param(clear->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s",
rsc->id, pe__node_name(node), reason, clear->uuid);
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index d54b01a..dad610c 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -21,8 +21,8 @@
#include <pe_status_private.h>
typedef struct group_variant_data_s {
- pe_resource_t *last_child; // Last group member
- uint32_t flags; // Group of enum pe__group_flags
+ pcmk_resource_t *last_child; // Last group member
+ uint32_t flags; // Group of enum pcmk__group_flags
} group_variant_data_t;
/*!
@@ -33,11 +33,11 @@ typedef struct group_variant_data_s {
*
* \return Last member of \p group if any, otherwise NULL
*/
-pe_resource_t *
-pe__last_group_member(const pe_resource_t *group)
+pcmk_resource_t *
+pe__last_group_member(const pcmk_resource_t *group)
{
if (group != NULL) {
- CRM_CHECK((group->variant == pe_group)
+ CRM_CHECK((group->variant == pcmk_rsc_variant_group)
&& (group->variant_opaque != NULL), return NULL);
return ((group_variant_data_t *) group->variant_opaque)->last_child;
}
@@ -54,11 +54,11 @@ pe__last_group_member(const pe_resource_t *group)
* \return true if all \p flags are set for \p group, otherwise false
*/
bool
-pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
+pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags)
{
group_variant_data_t *group_data = NULL;
- CRM_CHECK((group != NULL) && (group->variant == pe_group)
+ CRM_CHECK((group != NULL) && (group->variant == pcmk_rsc_variant_group)
&& (group->variant_opaque != NULL), return false);
group_data = (group_variant_data_t *) group->variant_opaque;
return pcmk_all_flags_set(group_data->flags, flags);
@@ -74,7 +74,7 @@ pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
* \param[in] wo_bit "Warn once" flag to use for deprecation warning
*/
static void
-set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
+set_group_flag(pcmk_resource_t *group, const char *option, uint32_t flag,
uint32_t wo_bit)
{
const char *value_s = NULL;
@@ -97,12 +97,12 @@ set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
}
static int
-inactive_resources(pe_resource_t *rsc)
+inactive_resources(pcmk_resource_t *rsc)
{
int retval = 0;
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (!child_rsc->fns->active(child_rsc, TRUE)) {
retval++;
@@ -113,7 +113,7 @@ inactive_resources(pe_resource_t *rsc)
}
static void
-group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+group_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc,
int n_inactive, bool show_inactive, const char *desc)
{
GString *attrs = NULL;
@@ -128,10 +128,10 @@ group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
}
@@ -150,8 +150,8 @@ group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
}
static bool
-skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
- GList *only_rsc, uint32_t show_opts)
+skip_child_rsc(pcmk_resource_t *rsc, pcmk_resource_t *child,
+ gboolean parent_passes, GList *only_rsc, uint32_t show_opts)
{
bool star_list = pcmk__list_of_1(only_rsc) &&
pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
@@ -177,7 +177,7 @@ skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
}
gboolean
-group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
@@ -191,9 +191,10 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
rsc->variant_opaque = group_data;
// @COMPAT These are deprecated since 2.1.5
- set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pe__group_ordered,
- pe_wo_group_order);
- set_group_flag(rsc, "collocated", pe__group_colocated, pe_wo_group_coloc);
+ set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pcmk__group_ordered,
+ pcmk__wo_group_order);
+ set_group_flag(rsc, "collocated", pcmk__group_colocated,
+ pcmk__wo_group_coloc);
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
@@ -202,11 +203,11 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
if (pcmk__str_eq((const char *)xml_native_rsc->name,
XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
- pe_resource_t *new_rsc = NULL;
+ pcmk_resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
continue;
}
@@ -232,14 +233,14 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-group_active(pe_resource_t * rsc, gboolean all)
+group_active(pcmk_resource_t *rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
@@ -261,7 +262,7 @@ group_active(pe_resource_t * rsc, gboolean all)
* \deprecated This function will be removed in a future release
*/
static void
-group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+group_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
GList *gIter = rsc->children;
@@ -272,7 +273,7 @@ group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
@@ -286,7 +287,7 @@ group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
* \deprecated This function will be removed in a future release
*/
void
-group_print(pe_resource_t *rsc, const char *pre_text, long options,
+group_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
char *child_text = NULL;
@@ -317,7 +318,7 @@ group_print(pe_resource_t *rsc, const char *pre_text, long options,
} else {
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("<li>\n");
@@ -335,12 +336,13 @@ group_print(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__group_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -359,7 +361,7 @@ pe__group_xml(pcmk__output_t *out, va_list args)
}
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
@@ -367,8 +369,8 @@ pe__group_xml(pcmk__output_t *out, va_list args)
if (rc == pcmk_rc_no_output) {
char *count = pcmk__itoa(g_list_length(gIter));
- const char *maint_s = pe__rsc_bool_str(rsc, pe_rsc_maintenance);
- const char *managed_s = pe__rsc_bool_str(rsc, pe_rsc_managed);
+ const char *maint_s = pe__rsc_bool_str(rsc, pcmk_rsc_maintenance);
+ const char *managed_s = pe__rsc_bool_str(rsc, pcmk_rsc_managed);
const char *disabled_s = pcmk__btoa(pe__resource_is_disabled(rsc));
rc = pe__name_and_nvpairs_xml(out, true, "group", 5,
@@ -393,12 +395,13 @@ pe__group_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__group_default(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -431,7 +434,7 @@ pe__group_default(pcmk__output_t *out, va_list args)
} else {
for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
@@ -450,14 +453,14 @@ pe__group_default(pcmk__output_t *out, va_list args)
}
void
-group_free(pe_resource_t * rsc)
+group_free(pcmk_resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
@@ -471,13 +474,13 @@ group_free(pe_resource_t * rsc)
}
enum rsc_role_e
-group_resource_state(const pe_resource_t * rsc, gboolean current)
+group_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
- enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e group_role = pcmk_role_unknown;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
@@ -490,7 +493,7 @@ group_resource_state(const pe_resource_t * rsc, gboolean current)
}
gboolean
-pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -508,7 +511,7 @@ pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child_rsc = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child_rsc = iter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
@@ -519,3 +522,18 @@ pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
return !passes;
}
+
+/*!
+ * \internal
+ * \brief Get maximum group resource instances per node
+ *
+ * \param[in] rsc Group resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__group_max_per_node(const pcmk_resource_t *rsc)
+{
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
+ return 1U;
+}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 5e92ddc..48b1a6a 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -30,18 +30,19 @@
* \brief Check whether a resource is active on multiple nodes
*/
static bool
-is_multiply_active(const pe_resource_t *rsc)
+is_multiply_active(const pcmk_resource_t *rsc)
{
unsigned int count = 0;
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
static void
-native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
+native_priority_to_node(pcmk_resource_t *rsc, pcmk_node_t *node,
+ gboolean failed)
{
int priority = 0;
@@ -49,7 +50,7 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
return;
}
- if (rsc->role == RSC_ROLE_PROMOTED) {
+ if (rsc->role == pcmk_role_promoted) {
// Promoted instance takes base priority + 1
priority = rsc->priority + 1;
@@ -60,9 +61,9 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
node->details->priority += priority;
pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
pe__node_name(node), node->details->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ (rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "");
+ (rsc->role == pcmk_role_promoted)? " + 1" : "");
/* Priority of a resource running on a guest node is added to the cluster
* node as well. */
@@ -71,28 +72,29 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
GList *gIter = node->details->remote_rsc->container->running_on;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = gIter->data;
+ pcmk_node_t *a_node = gIter->data;
a_node->details->priority += priority;
pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) "
"from guest node %s",
pe__node_name(a_node), a_node->details->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ (rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "",
+ (rsc->role == pcmk_role_promoted)? " + 1" : "",
pe__node_name(node));
}
}
}
void
-native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed)
+native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler, gboolean failed)
{
GList *gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = (pe_node_t *) gIter->data;
+ pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
@@ -101,25 +103,27 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
}
pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node),
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : "(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
native_priority_to_node(rsc, node, failed);
}
- if (rsc->variant == pe_native && node->details->maintenance) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_maintenance);
+ if ((rsc->variant == pcmk_rsc_variant_primitive)
+ && node->details->maintenance) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_maintenance);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- pe_resource_t *p = rsc->parent;
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ pcmk_resource_t *p = rsc->parent;
pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
- resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
+ resource_location(rsc, node, INFINITY, "not_managed_default",
+ scheduler);
while(p && node->details->online) {
/* add without the additional location constraint */
@@ -131,43 +135,46 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
- case recovery_stop_only:
+ case pcmk_multiply_active_stop:
{
GHashTableIter gIter;
- pe_node_t *local_node = NULL;
+ pcmk_node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
- rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
+ rsc->allowed_nodes = pe__node_list2table(scheduler->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -INFINITY;
}
}
break;
- case recovery_block:
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ case pcmk_multiply_active_block:
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
/* If the resource belongs to a group or bundle configured with
* multiple-active=block, block the entire entity.
*/
if (rsc->parent
- && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
- && rsc->parent->recovery_type == recovery_block) {
+ && ((rsc->parent->variant == pcmk_rsc_variant_group)
+ || (rsc->parent->variant == pcmk_rsc_variant_bundle))
+ && (rsc->parent->recovery_type == pcmk_multiply_active_block)) {
GList *gIter = rsc->parent->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = gIter->data;
- pe__clear_resource_flags(child, pe_rsc_managed);
- pe__set_resource_flags(child, pe_rsc_block);
+ pe__clear_resource_flags(child, pcmk_rsc_managed);
+ pe__set_resource_flags(child, pcmk_rsc_blocked);
}
}
break;
- default: // recovery_stop_start, recovery_stop_unexpected
+
+ // pcmk_multiply_active_restart, pcmk_multiply_active_unexpected
+ default:
/* The scheduler will do the right thing because the relevant
* variables and flags are set when unpacking the history.
*/
@@ -183,22 +190,22 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
}
if (rsc->parent != NULL) {
- native_add_running(rsc->parent, node, data_set, FALSE);
+ native_add_running(rsc->parent, node, scheduler, FALSE);
}
}
static void
-recursive_clear_unique(pe_resource_t *rsc, gpointer user_data)
+recursive_clear_unique(pcmk_resource_t *rsc, gpointer user_data)
{
- pe__clear_resource_flags(rsc, pe_rsc_unique);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unique);
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
}
gboolean
-native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *parent = uber_parent(rsc);
+ pcmk_resource_t *parent = uber_parent(rsc);
const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
@@ -206,14 +213,15 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
// Only some agent standards support unique and promotable clones
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
- && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_unique)
+ && pe_rsc_is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
- pe__force_anon(standard, parent, rsc->id, data_set);
+ pe__force_anon(standard, parent, rsc->id, scheduler);
/* Clear globally-unique on the parent and all its descendants unpacked
* so far (clearing the parent should make any future children unpacking
@@ -224,7 +232,7 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
recursive_clear_unique(rsc, NULL);
}
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
- && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
+ && pcmk_is_set(parent->flags, pcmk_rsc_promotable)) {
pe_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
@@ -235,42 +243,44 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
static bool
-rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
+rsc_is_on_node(pcmk_resource_t *rsc, const pcmk_node_t *node, int flags)
{
pe_rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, pe__node_name(node));
- if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
+ if (pcmk_is_set(flags, pcmk_rsc_match_current_node)
+ && (rsc->running_on != NULL)) {
for (GList *iter = rsc->running_on; iter; iter = iter->next) {
- pe_node_t *loc = (pe_node_t *) iter->data;
+ pcmk_node_t *loc = (pcmk_node_t *) iter->data;
if (loc->details == node->details) {
return true;
}
}
- } else if (pcmk_is_set(flags, pe_find_inactive)
+ } else if (pcmk_is_set(flags, pe_find_inactive) // @COMPAT deprecated
&& (rsc->running_on == NULL)) {
return true;
- } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
+ } else if (!pcmk_is_set(flags, pcmk_rsc_match_current_node)
+ && (rsc->allocated_to != NULL)
&& (rsc->allocated_to->details == node->details)) {
return true;
}
return false;
}
-pe_resource_t *
-native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
- int flags)
+pcmk_resource_t *
+native_find_rsc(pcmk_resource_t *rsc, const char *id,
+ const pcmk_node_t *on_node, int flags)
{
bool match = false;
- pe_resource_t *result = NULL;
+ pcmk_resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
- if (flags & pe_find_clone) {
+ if (pcmk_is_set(flags, pcmk_rsc_match_clone_only)) {
const char *rid = ID(rsc->xml);
if (!pe_rsc_is_clone(pe__const_top_resource(rsc, false))) {
@@ -283,13 +293,13 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
} else if (!strcmp(id, rsc->id)) {
match = true;
- } else if (pcmk_is_set(flags, pe_find_renamed)
+ } else if (pcmk_is_set(flags, pcmk_rsc_match_history)
&& rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = true;
- } else if (pcmk_is_set(flags, pe_find_any)
- || (pcmk_is_set(flags, pe_find_anon)
- && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
+ } else if (pcmk_is_set(flags, pcmk_rsc_match_basename)
+ || (pcmk_is_set(flags, pcmk_rsc_match_anon_basename)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
@@ -304,7 +314,7 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
}
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
result = rsc->fns->find_rsc(child, id, on_node, flags);
if (result) {
@@ -316,8 +326,8 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
// create is ignored
char *
-native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
- pe_working_set_t * data_set)
+native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
+ const char *name, pcmk_scheduler_t *scheduler)
{
char *value_copy = NULL;
const char *value = NULL;
@@ -327,7 +337,7 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
value = g_hash_table_lookup(params, name);
if (value == NULL) {
/* try meta attributes instead */
@@ -338,16 +348,17 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c
}
gboolean
-native_active(pe_resource_t * rsc, gboolean all)
+native_active(pcmk_resource_t * rsc, gboolean all)
{
for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = (pe_node_t *) gIter->data;
+ pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
if (a_node->details->unclean) {
pe_rsc_trace(rsc, "Resource %s: %s is unclean",
rsc->id, pe__node_name(a_node));
return TRUE;
- } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!a_node->details->online
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "Resource %s: %s is offline",
rsc->id, pe__node_name(a_node));
} else {
@@ -365,27 +376,32 @@ struct print_data_s {
};
static const char *
-native_pending_state(const pe_resource_t *rsc)
+native_pending_state(const pcmk_resource_t *rsc)
{
const char *pending_state = NULL;
- if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
+ if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_START, pcmk__str_casei)) {
pending_state = "Starting";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
pending_state = "Stopping";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_TO,
+ pcmk__str_casei)) {
pending_state = "Migrating";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_FROM,
+ pcmk__str_casei)) {
/* Work might be done in here. */
pending_state = "Migrating";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_PROMOTE,
+ pcmk__str_casei)) {
pending_state = "Promoting";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_DEMOTE,
+ pcmk__str_casei)) {
pending_state = "Demoting";
}
@@ -393,11 +409,11 @@ native_pending_state(const pe_resource_t *rsc)
}
static const char *
-native_pending_task(const pe_resource_t *rsc)
+native_pending_task(const pcmk_resource_t *rsc)
{
const char *pending_task = NULL;
- if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
@@ -415,21 +431,21 @@ native_pending_task(const pe_resource_t *rsc)
}
static enum rsc_role_e
-native_displayable_role(const pe_resource_t *rsc)
+native_displayable_role(const pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
- if ((role == RSC_ROLE_STARTED)
+ if ((role == pcmk_role_started)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)) {
+ pcmk_rsc_promotable)) {
- role = RSC_ROLE_UNPROMOTED;
+ role = pcmk_role_unpromoted;
}
return role;
}
static const char *
-native_displayable_state(const pe_resource_t *rsc, bool print_pending)
+native_displayable_state(const pcmk_resource_t *rsc, bool print_pending)
{
const char *rsc_state = NULL;
@@ -447,7 +463,7 @@ native_displayable_state(const pe_resource_t *rsc, bool print_pending)
* \deprecated This function will be removed in a future release
*/
static void
-native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+native_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -471,12 +487,14 @@ native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print("target_role=\"%s\" ", target_role);
}
status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
- status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
- status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_removed));
+ status_print("blocked=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_blocked));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print("failure_ignored=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure));
status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
if (options & pe_print_pending) {
@@ -496,7 +514,7 @@ native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
status_print("%s <node name=\"%s\" " XML_ATTR_ID "=\"%s\" "
"cached=\"%s\"/>\n",
@@ -542,8 +560,8 @@ add_output_node(GString *s, const char *node, bool have_nodes)
* \note Caller must free the result with g_free().
*/
gchar *
-pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
- const pe_node_t *node, uint32_t show_opts,
+pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
+ const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -552,7 +570,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
GString *outstr = NULL;
bool have_flags = false;
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
return NULL;
}
@@ -580,14 +598,14 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
pcmk__s(provider, ""), ":", kind, "):\t", NULL);
// State on node
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
g_string_append(outstr, " ORPHANED");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
enum rsc_role_e role = native_displayable_role(rsc);
g_string_append(outstr, " FAILED");
- if (role > RSC_ROLE_UNPROMOTED) {
+ if (role > pcmk_role_unpromoted) {
pcmk__add_word(&outstr, 0, role2text(role));
}
} else {
@@ -600,7 +618,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
}
// Failed probe operation
- if (native_displayable_role(rsc) == RSC_ROLE_STOPPED) {
+ if (native_displayable_role(rsc) == pcmk_role_stopped) {
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
if (probe_op != NULL) {
int rc;
@@ -632,30 +650,31 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
* Started, as it is the default anyways, and doesn't prevent the
* resource from becoming promoted).
*/
- if (target_role_e == RSC_ROLE_STOPPED) {
+ if (target_role_e == pcmk_role_stopped) {
have_flags = add_output_flag(outstr, "disabled", have_flags);
} else if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)
- && target_role_e == RSC_ROLE_UNPROMOTED) {
+ pcmk_rsc_promotable)
+ && (target_role_e == pcmk_role_unpromoted)) {
have_flags = add_output_flag(outstr, "target-role:", have_flags);
g_string_append(outstr, target_role);
}
}
// Blocked or maintenance implies unmanaged
- if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_any_flags_set(rsc->flags,
+ pcmk_rsc_blocked|pcmk_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
have_flags = add_output_flag(outstr, "blocked", have_flags);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
have_flags = add_output_flag(outstr, "maintenance", have_flags);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
have_flags = add_output_flag(outstr, "unmanaged", have_flags);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
have_flags = add_output_flag(outstr, "failure ignored", have_flags);
}
@@ -682,7 +701,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
bool have_nodes = false;
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *n = (pe_node_t *) iter->data;
+ pcmk_node_t *n = (pcmk_node_t *) iter->data;
have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
}
@@ -695,8 +714,8 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
}
int
-pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
@@ -705,7 +724,7 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
xmlNodePtr list_node = NULL;
const char *cl = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
@@ -720,19 +739,20 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
cl = "rsc-managed";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
cl = "rsc-failed";
- } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
+ && (rsc->running_on == NULL)) {
cl = "rsc-failed";
} else if (pcmk__list_of_multiple(rsc->running_on)) {
cl = "rsc-multiple";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
cl = "rsc-failure-ignored";
} else {
@@ -752,13 +772,13 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
}
int
-pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *target_role = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
@@ -788,12 +808,12 @@ pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
* \deprecated This function will be removed in a future release
*/
void
-common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
- const pe_node_t *node, long options, void *print_data)
+common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name,
+ const pcmk_node_t *node, long options, void *print_data)
{
const char *target_role = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
@@ -818,10 +838,10 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
}
if (options & pe_print_html) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
status_print("<font color=\"yellow\">");
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
status_print("<font color=\"red\">");
} else if (rsc->running_on == NULL) {
@@ -830,7 +850,7 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
} else if (pcmk__list_of_multiple(rsc->running_on)) {
status_print("<font color=\"orange\">");
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
status_print("<font color=\"yellow\">");
} else {
@@ -863,7 +883,7 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
}
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *n = (pe_node_t *) gIter->data;
+ pcmk_node_t *n = (pcmk_node_t *) gIter->data;
counter++;
@@ -908,12 +928,12 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
* \deprecated This function will be removed in a future release
*/
void
-native_print(pe_resource_t *rsc, const char *pre_text, long options,
+native_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
@@ -929,12 +949,13 @@ native_print(pe_resource_t *rsc, const char *pre_text, long options,
common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -956,7 +977,7 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
@@ -979,12 +1000,12 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
"role", rsc_state,
"target_role", target_role,
"active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
- "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
- "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
- "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "orphaned", pe__rsc_bool_str(rsc, pcmk_rsc_removed),
+ "blocked", pe__rsc_bool_str(rsc, pcmk_rsc_blocked),
+ "maintenance", pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
+ "failure_ignored", pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure),
"nodes_running_on", nodes_running_on,
"pending", (print_pending? native_pending_task(rsc) : NULL),
"locked_to", lock_node_name,
@@ -997,7 +1018,7 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
GList *gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
"name", node->details->uname,
@@ -1011,22 +1032,23 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
@@ -1035,18 +1057,19 @@ pe__resource_html(pcmk__output_t *out, va_list args)
return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
@@ -1060,14 +1083,14 @@ pe__resource_text(pcmk__output_t *out, va_list args)
}
void
-native_free(pe_resource_t * rsc)
+native_free(pcmk_resource_t * rsc)
{
pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
-native_resource_state(const pe_resource_t * rsc, gboolean current)
+native_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
@@ -1089,17 +1112,18 @@ native_resource_state(const pe_resource_t * rsc, gboolean current)
*
* \return If list contains only one node, that node, or NULL otherwise
*/
-pe_node_t *
-native_location(const pe_resource_t *rsc, GList **list, int current)
+pcmk_node_t *
+native_location(const pcmk_resource_t *rsc, GList **list, int current)
{
- pe_node_t *one = NULL;
+ // @COMPAT: Accept a pcmk__rsc_node argument instead of int current
+ pcmk_node_t *one = NULL;
GList *result = NULL;
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
child->fns->location(child, &result, current);
}
@@ -1126,7 +1150,7 @@ native_location(const pe_resource_t *rsc, GList **list, int current)
GList *gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
@@ -1144,7 +1168,7 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
GList *gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
@@ -1155,7 +1179,7 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
int *rsc_counter = NULL;
int *active_counter = NULL;
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
continue;
}
@@ -1185,11 +1209,11 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
GList *gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_node_t *node = (pe_node_t *) gIter2->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE &&
- pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
continue;
}
@@ -1398,17 +1422,32 @@ pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
}
gboolean
-pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
return FALSE;
} else if (check_parent && rsc->parent) {
- const pe_resource_t *up = pe__const_top_resource(rsc, true);
+ const pcmk_resource_t *up = pe__const_top_resource(rsc, true);
return up->fns->is_filtered(up, only_rsc, FALSE);
}
return TRUE;
}
+
+/*!
+ * \internal
+ * \brief Get maximum primitive resource instances per node
+ *
+ * \param[in] rsc Primitive resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__primitive_max_per_node(const pcmk_resource_t *rsc)
+{
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
+ return 1U;
+}
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index ed7f0da..aaa6598 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,29 +14,30 @@
#include <crm/crm.h>
#include <crm/msg_xml.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
+#include <crm/common/xml_internal.h>
#include "pe_status_private.h"
-static void unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms);
+static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms);
static void
-add_singleton(pe_working_set_t *data_set, pe_action_t *action)
+add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action)
{
- if (data_set->singletons == NULL) {
- data_set->singletons = pcmk__strkey_table(NULL, NULL);
+ if (scheduler->singletons == NULL) {
+ scheduler->singletons = pcmk__strkey_table(NULL, NULL);
}
- g_hash_table_insert(data_set->singletons, action->uuid, action);
+ g_hash_table_insert(scheduler->singletons, action->uuid, action);
}
-static pe_action_t *
-lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
+static pcmk_action_t *
+lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid)
{
- if (data_set->singletons == NULL) {
+ if (scheduler->singletons == NULL) {
return NULL;
}
- return g_hash_table_lookup(data_set->singletons, action_uuid);
+ return g_hash_table_lookup(scheduler->singletons, action_uuid);
}
/*!
@@ -46,21 +47,21 @@ lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Existing action that matches arguments (or NULL if none)
*/
-static pe_action_t *
-find_existing_action(const char *key, const pe_resource_t *rsc,
- const pe_node_t *node, const pe_working_set_t *data_set)
+static pcmk_action_t *
+find_existing_action(const char *key, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const pcmk_scheduler_t *scheduler)
{
GList *matches = NULL;
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- /* When rsc is NULL, it would be quicker to check data_set->singletons,
- * but checking all data_set->actions takes the node into account.
+ /* When rsc is NULL, it would be quicker to check scheduler->singletons,
+ * but checking all scheduler->actions takes the node into account.
*/
- matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
+ matches = find_actions(((rsc == NULL)? scheduler->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
@@ -72,79 +73,78 @@ find_existing_action(const char *key, const pe_resource_t *rsc,
return action;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration corresponding to a specific action key
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] key "RSC_ACTION_INTERVAL" of action to find
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
static xmlNode *
-find_rsc_op_entry_helper(const pe_resource_t *rsc, const char *key,
- gboolean include_disabled)
+find_exact_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- guint interval_ms = 0;
- gboolean do_retry = TRUE;
- char *local_key = NULL;
- const char *name = NULL;
- const char *interval_spec = NULL;
- char *match_key = NULL;
- xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- retry:
- for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
- operation = pcmk__xe_next(operation)) {
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
+ bool enabled = false;
+ const char *config_name = NULL;
+ const char *interval_spec = NULL;
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
-
- interval_ms = crm_parse_interval_spec(interval_spec);
- match_key = pcmk__op_key(rsc->id, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
-
- if (rsc->clone_name) {
- match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
- }
-
- if (op != NULL) {
- free(local_key);
- return op;
- }
+ // @TODO This does not consider rules, defaults, etc.
+ if (!include_disabled
+ && (pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
}
- }
-
- free(local_key);
- if (do_retry == FALSE) {
- return NULL;
- }
- do_retry = FALSE;
- if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
- local_key = pcmk__op_key(rsc->id, "migrate", 0);
- key = local_key;
- goto retry;
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) != interval_ms) {
+ continue;
+ }
- } else if (strstr(key, "_notify_")) {
- local_key = pcmk__op_key(rsc->id, "notify", 0);
- key = local_key;
- goto retry;
+ config_name = crm_element_value(operation, "name");
+ if (pcmk__str_eq(action_name, config_name, pcmk__str_none)) {
+ return operation;
+ }
}
-
return NULL;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration of a resource action
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] action_name Action name to search for
+ * \param[in] interval_ms Action interval (in milliseconds) to search for
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
xmlNode *
-find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
+pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- return find_rsc_op_entry_helper(rsc, key, FALSE);
+ xmlNode *action_config = NULL;
+
+ // Try requested action first
+ action_config = find_exact_action_config(rsc, action_name, interval_ms,
+ include_disabled);
+
+ // For migrate_to and migrate_from actions, retry with "migrate"
+ // @TODO This should be either documented or deprecated
+ if ((action_config == NULL)
+ && pcmk__str_any_of(action_name, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ action_config = find_exact_action_config(rsc, "migrate", 0,
+ include_disabled);
+ }
+
+ return action_config;
}
/*!
@@ -156,98 +156,106 @@ find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
-static pe_action_t *
-new_action(char *key, const char *task, pe_resource_t *rsc,
- const pe_node_t *node, bool optional, bool for_graph,
- pe_working_set_t *data_set)
+static pcmk_action_t *
+new_action(char *key, const char *task, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, bool optional, pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = calloc(1, sizeof(pe_action_t));
+ pcmk_action_t *action = calloc(1, sizeof(pcmk_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
- action->extra = pcmk__strkey_table(free, free);
- action->meta = pcmk__strkey_table(free, free);
if (node) {
action->node = pe__copy_node(node);
}
- if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
- pe__set_action_flags(action, pe_action_dc);
+ pe__set_action_flags(action, pcmk_action_on_dc);
}
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
if (optional) {
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
} else {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
- if (rsc != NULL) {
+ if (rsc == NULL) {
+ action->meta = pcmk__strkey_table(free, free);
+ } else {
guint interval_ms = 0;
- action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
- unpack_operation(action, action->op_entry, rsc->container, data_set,
- interval_ms);
+ action->op_entry = pcmk__find_action_config(rsc, task, interval_ms,
+ true);
+
+ /* If the given key is for one of the many notification pseudo-actions
+ * (pre_notify_promote, etc.), the actual action name is "notify"
+ */
+ if ((action->op_entry == NULL) && (strstr(key, "_notify_") != NULL)) {
+ action->op_entry = find_exact_action_config(rsc, PCMK_ACTION_NOTIFY,
+ 0, true);
+ }
+
+ unpack_operation(action, action->op_entry, interval_ms);
}
- if (for_graph) {
- pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
- (optional? "optional" : "required"),
- data_set->action_id, key, task,
- ((rsc == NULL)? "no resource" : rsc->id),
- pe__node_name(node));
- action->id = data_set->action_id++;
+ pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
+ (optional? "optional" : "required"),
+ scheduler->action_id, key, task,
+ ((rsc == NULL)? "no resource" : rsc->id),
+ pe__node_name(node));
+ action->id = scheduler->action_id++;
- data_set->actions = g_list_prepend(data_set->actions, action);
- if (rsc == NULL) {
- add_singleton(data_set, action);
- } else {
- rsc->actions = g_list_prepend(rsc->actions, action);
- }
+ scheduler->actions = g_list_prepend(scheduler->actions, action);
+ if (rsc == NULL) {
+ add_singleton(scheduler, action);
+ } else {
+ rsc->actions = g_list_prepend(rsc->actions, action);
}
return action;
}
/*!
* \internal
- * \brief Evaluate node attribute values for an action
+ * \brief Unpack a resource's action-specific instance parameters
*
- * \param[in,out] action Action to unpack attributes for
- * \param[in,out] data_set Cluster working set
+ * \param[in] action_xml XML of action's configuration in CIB (if any)
+ * \param[in,out] node_attrs Table of node attributes (for rule evaluation)
+ * \param[in,out] scheduler Cluster working set (for rule evaluation)
+ *
+ * \return Newly allocated hash table of action-specific instance parameters
*/
-static void
-unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
+GHashTable *
+pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
+ GHashTable *node_attrs,
+ pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
- && (action->op_entry != NULL)) {
-
- pe_rule_eval_data_t rule_data = {
- .node_hash = action->node->details->attrs,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
- .match_data = NULL,
- .rsc_data = NULL,
- .op_data = NULL
- };
-
- pe__set_action_flags(action, pe_action_have_node_attrs);
- pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
- &rule_data, action->extra, NULL,
- FALSE, data_set);
- }
+ GHashTable *params = pcmk__strkey_table(free, free);
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_attrs,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ pe__unpack_dataset_nvpairs(action_xml, XML_TAG_ATTR_SETS,
+ &rule_data, params, NULL,
+ FALSE, scheduler);
+ return params;
}
/*!
@@ -258,46 +266,46 @@ unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
* \param[in] optional Requested optional status
*/
static void
-update_action_optional(pe_action_t *action, gboolean optional)
+update_action_optional(pcmk_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
- && !pcmk_is_set(action->flags, pe_action_pseudo)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
+ && !pcmk_is_set(action->flags, pcmk_action_pseudo)
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, pe__node_name(action->node),
action->rsc->id);
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
}
static enum pe_quorum_policy
-effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
+effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
- enum pe_quorum_policy policy = data_set->no_quorum_policy;
+ enum pe_quorum_policy policy = scheduler->no_quorum_policy;
- if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- policy = no_quorum_ignore;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
+ policy = pcmk_no_quorum_ignore;
- } else if (data_set->no_quorum_policy == no_quorum_demote) {
+ } else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) {
switch (rsc->role) {
- case RSC_ROLE_PROMOTED:
- case RSC_ROLE_UNPROMOTED:
- if (rsc->next_role > RSC_ROLE_UNPROMOTED) {
- pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED,
+ case pcmk_role_promoted:
+ case pcmk_role_unpromoted:
+ if (rsc->next_role > pcmk_role_unpromoted) {
+ pe__set_next_role(rsc, pcmk_role_unpromoted,
"no-quorum-policy=demote");
}
- policy = no_quorum_ignore;
+ policy = pcmk_no_quorum_ignore;
break;
default:
- policy = no_quorum_stop;
+ policy = pcmk_no_quorum_stop;
break;
}
}
@@ -309,50 +317,47 @@ effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
* \brief Update a resource action's runnable flag
*
* \param[in,out] action Action to update
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
-update_resource_action_runnable(pe_action_t *action, bool for_graph,
- pe_working_set_t *data_set)
+update_resource_action_runnable(pcmk_action_t *action,
+ pcmk_scheduler_t *scheduler)
{
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
- "%s on %s is unrunnable (node is offline)",
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING, "%s on %s is unrunnable (node is offline)",
action->uuid, pe__node_name(action->node));
- if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
- && for_graph
- && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
- pe_fence_node(data_set, action->node, "stop is unrunnable", false);
+ pe_fence_node(scheduler, action->node, "stop is unrunnable", false);
}
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& action->node->details->pending) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING,
"Action %s on %s is unrunnable (node is pending)",
action->uuid, pe__node_name(action->node));
- } else if (action->needs == rsc_req_nothing) {
+ } else if (action->needs == pcmk_requires_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
- && !pe_can_fence(data_set, action->node)) {
+ && !pe_can_fence(scheduler, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
@@ -361,37 +366,37 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, pe__node_name(action->node));
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
} else {
- switch (effective_quorum_policy(action->rsc, data_set)) {
- case no_quorum_stop:
+ switch (effective_quorum_policy(action->rsc, scheduler)) {
+ case pcmk_no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
- case no_quorum_freeze:
+ case pcmk_no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
break;
}
}
@@ -405,19 +410,20 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
* \param[in] action New action
*/
static void
-update_resource_flags_for_action(pe_resource_t *rsc, const pe_action_t *action)
+update_resource_flags_for_action(pcmk_resource_t *rsc,
+ const pcmk_action_t *action)
{
- /* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used
- * within Pacemaker, and should be deprecated and eventually removed
+ /* @COMPAT pcmk_rsc_starting and pcmk_rsc_stopping are deprecated and unused
+ * within Pacemaker, and will eventually be removed
*/
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_stopping);
+ if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_stopping);
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
- if (pcmk_is_set(action->flags, pe_action_runnable)) {
- pe__set_resource_flags(rsc, pe_rsc_starting);
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_starting);
} else {
- pe__clear_resource_flags(rsc, pe_rsc_starting);
+ pe__clear_resource_flags(rsc, pcmk_rsc_starting);
}
}
}
@@ -428,80 +434,121 @@ valid_stop_on_fail(const char *value)
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
-static const char *
-unpack_operation_on_fail(pe_action_t * action)
+/*!
+ * \internal
+ * \brief Validate (and possibly reset) resource action's on_fail meta-attribute
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] action_config Action configuration XML from CIB (if any)
+ * \param[in,out] meta Table of action meta-attributes
+ */
+static void
+validate_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ const xmlNode *action_config, GHashTable *meta)
{
const char *name = NULL;
const char *role = NULL;
- const char *on_fail = NULL;
const char *interval_spec = NULL;
- const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ const char *value = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+ char *key = NULL;
+ char *new_value = NULL;
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ // Stop actions can only use certain on-fail values
+ if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
- "allowed for stop", action->rsc->id, value);
- return NULL;
-
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
- // demote on_fail defaults to monitor value for promoted role if present
- xmlNode *operation = NULL;
+ "allowed for stop", rsc->id, value);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
+ }
- CRM_CHECK(action->rsc != NULL, return NULL);
+ /* Demote actions default on-fail to the on-fail value for the first
+ * recurring monitor for the promoted role (if any).
+ */
+ if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
+ && (value == NULL)) {
- for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
- (operation != NULL) && (value == NULL);
- operation = pcmk__xe_next(operation)) {
+ /* @TODO This does not consider promote options set in a meta-attribute
+ * block (which may have rules that need to be evaluated) rather than
+ * XML properties.
+ */
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
+ const char *promote_on_fail = NULL;
- if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ /* We only care about explicit on-fail (if promote uses default, so
+ * can demote)
+ */
+ promote_on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
+ if (promote_on_fail == NULL) {
continue;
}
+
+ // We only care about recurring monitors for the promoted role
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
- on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!on_fail) {
- continue;
- } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) {
+ if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
continue;
- } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S,
- NULL)) {
+ }
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
- } else if (crm_parse_interval_spec(interval_spec) == 0) {
+ }
+
+ // We only care about enabled monitors
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
continue;
- } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
+ }
+
+ // Demote actions can't default to on-fail="demote"
+ if (pcmk__str_eq(promote_on_fail, "demote", pcmk__str_casei)) {
continue;
}
- value = on_fail;
+ // Use value from first applicable promote action found
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup(promote_on_fail);
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
}
- } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
- value = "ignore";
+ return;
+ }
- } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- name = crm_element_value(action->op_entry, "name");
- role = crm_element_value(action->op_entry, "role");
- interval_spec = crm_element_value(action->op_entry,
+ if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
+ && !pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup("ignore");
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
+ return;
+ }
+
+ // on-fail="demote" is allowed only for certain actions
+ if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ name = crm_element_value(action_config, "name");
+ role = crm_element_value(action_config, "role");
+ interval_spec = crm_element_value(action_config,
XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
- && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S, NULL)
+ if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ && (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
- "allowed for it", action->rsc->id, name);
- return NULL;
+ "allowed for it", rsc->id, name);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
}
}
-
- return value;
}
static int
@@ -510,7 +557,7 @@ unpack_timeout(const char *value)
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
return timeout_ms;
}
@@ -579,346 +626,475 @@ unpack_start_delay(const char *value, GHashTable *meta)
return start_delay;
}
+/*!
+ * \internal
+ * \brief Find a resource's most frequent recurring monitor
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Operation XML configured for most frequent recurring monitor for
+ * \p rsc (if any)
+ */
static xmlNode *
-find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
+most_frequent_monitor(const pcmk_resource_t *rsc)
{
- guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
- const char *name = NULL;
- const char *interval_spec = NULL;
xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- for (operation = pcmk__xe_first_child(rsc->ops_xml);
- operation != NULL;
- operation = pcmk__xe_next(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
-
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
+ bool enabled = false;
+ guint interval_ms = 0;
+ const char *interval_spec = crm_element_value(operation,
+ XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
- continue;
- }
+ // We only care about enabled recurring monitors
+ if (!pcmk__str_eq(crm_element_value(operation, "name"),
+ PCMK_ACTION_MONITOR, pcmk__str_none)) {
+ continue;
+ }
+ interval_ms = crm_parse_interval_spec(interval_spec);
+ if (interval_ms == 0) {
+ continue;
+ }
- interval_ms = crm_parse_interval_spec(interval_spec);
+ // @TODO This does not account for rules, defaults, etc.
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
+ }
- if (interval_ms && (interval_ms < min_interval_ms)) {
- min_interval_ms = interval_ms;
- op = operation;
- }
+ if (interval_ms < min_interval_ms) {
+ min_interval_ms = interval_ms;
+ op = operation;
}
}
-
return op;
}
/*!
- * \brief Unpack operation XML into an action structure
+ * \internal
+ * \brief Unpack action meta-attributes
*
- * Unpack an operation's meta-attributes (normalizing the interval, timeout,
- * and start delay values as integer milliseconds), requirements, and
- * failure policy.
+ * \param[in,out] rsc Resource that action is for
+ * \param[in] node Node that action is on
+ * \param[in] action_name Action name
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] action_config Action XML configuration from CIB (if any)
*
- * \param[in,out] action Action to unpack into
- * \param[in] xml_obj Operation XML (or NULL if all defaults)
- * \param[in] container Resource that contains affected resource, if any
- * \param[in,out] data_set Cluster state
- * \param[in] interval_ms How frequently to perform the operation
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds) from its CIB XML
+ * configuration (including defaults).
+ *
+ * \return Newly allocated hash table with normalized action meta-attributes
*/
-static void
-unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms)
+GHashTable *
+pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *action_name, guint interval_ms,
+ const xmlNode *action_config)
{
- int timeout_ms = 0;
- const char *value = NULL;
- bool is_probe = false;
+ GHashTable *meta = NULL;
+ char *name = NULL;
+ char *value = NULL;
+ const char *timeout_spec = NULL;
+ const char *str = NULL;
pe_rsc_eval_data_t rsc_rule_data = {
- .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
- .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
- .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
+ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE),
};
pe_op_eval_data_t op_rule_data = {
- .op_name = action->task,
- .interval = interval_ms
+ .op_name = action_name,
+ .interval = interval_ms,
};
pe_rule_eval_data_t rule_data = {
- .node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .node_hash = (node == NULL)? NULL : node->details->attrs,
+ .role = pcmk_role_unknown,
+ .now = rsc->cluster->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
- .op_data = &op_rule_data
+ .op_data = &op_rule_data,
};
- CRM_CHECK(action && action->rsc, return);
-
- is_probe = pcmk_is_probe(action->task, interval_ms);
+ meta = pcmk__strkey_table(free, free);
// Cluster-wide <op_defaults> <meta_attributes>
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, FALSE, data_set);
-
- // Determine probe default timeout differently
- if (is_probe) {
- xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
-
- if (min_interval_mon) {
- value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
- if (value) {
- crm_trace("\t%s: Setting default timeout to minimum-interval "
- "monitor's timeout '%s'", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ pe__unpack_dataset_nvpairs(rsc->cluster->op_defaults, XML_TAG_META_SETS,
+ &rule_data, meta, NULL, FALSE, rsc->cluster);
+
+ // Derive default timeout for probes from recurring monitor timeouts
+ if (pcmk_is_probe(action_name, interval_ms)) {
+ xmlNode *min_interval_mon = most_frequent_monitor(rsc);
+
+ if (min_interval_mon != NULL) {
+ /* @TODO This does not consider timeouts set in meta_attributes
+ * blocks (which may also have rules that need to be evaluated).
+ */
+ timeout_spec = crm_element_value(min_interval_mon,
+ XML_ATTR_TIMEOUT);
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting default timeout for %s probe to "
+ "most frequent monitor's timeout '%s'",
+ rsc->id, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
}
- if (xml_obj) {
- xmlAttrPtr xIter = NULL;
-
+ if (action_config != NULL) {
// <op> <meta_attributes> take precedence over defaults
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, TRUE, data_set);
+ pe__unpack_dataset_nvpairs(action_config, XML_TAG_META_SETS, &rule_data,
+ meta, NULL, TRUE, rsc->cluster);
/* Anything set as an <op> XML property has highest precedence.
* This ensures we use the name and interval from the <op> tag.
+ * (See below for the only exception, fence device start/probe timeout.)
*/
- for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
- const char *prop_name = (const char *)xIter->name;
- const char *prop_value = crm_element_value(xml_obj, prop_name);
+ for (xmlAttrPtr attr = action_config->properties;
+ attr != NULL; attr = attr->next) {
+ name = strdup((const char *) attr->name);
+ value = strdup(pcmk__xml_attr_value(attr));
- g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
- g_hash_table_remove(action->meta, "id");
+ g_hash_table_remove(meta, XML_ATTR_ID);
// Normalize interval to milliseconds
if (interval_ms > 0) {
- g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
- crm_strdup_printf("%u", interval_ms));
+ name = strdup(XML_LRM_ATTR_INTERVAL);
+ CRM_ASSERT(name != NULL);
+ value = crm_strdup_printf("%u", interval_ms);
+ g_hash_table_insert(meta, name, value);
} else {
- g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
- }
-
- /*
- * Timeout order of precedence:
- * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
- * and task is start or a probe; pcmk_monitor_timeout works
- * by default for a recurring monitor)
- * 2. explicit op timeout on the primitive
- * 3. default op timeout
- * a. if probe, then min-interval monitor's timeout
- * b. else, in XML_CIB_TAG_OPCONFIG
- * 4. CRM_DEFAULT_OP_TIMEOUT_S
- *
- * #1 overrides general rule of <op> XML property having highest
- * precedence.
+ g_hash_table_remove(meta, XML_LRM_ATTR_INTERVAL);
+ }
+
+ /* Timeout order of precedence (highest to lowest):
+ * 1. pcmk_monitor_timeout resource parameter (only for starts and probes
+ * when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
+ * monitors via the executor instead)
+ * 2. timeout configured in <op> (with <op timeout> taking precedence over
+ * <op> <meta_attributes>)
+ * 3. timeout configured in <op_defaults> <meta_attributes>
+ * 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
*/
+
+ // Check for pcmk_monitor_timeout
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
- && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
- || is_probe)) {
-
- GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
+ && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
+ || pcmk_is_probe(action_name, interval_ms))) {
+
+ GHashTable *params = pe_rsc_params(rsc, node, rsc->cluster);
+
+ timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting timeout for %s %s to "
+ "pcmk_monitor_timeout (%s)",
+ rsc->id, action_name, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
+ }
+ }
- value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ // Normalize timeout to positive milliseconds
+ name = strdup(XML_ATTR_TIMEOUT);
+ CRM_ASSERT(name != NULL);
+ timeout_spec = g_hash_table_lookup(meta, XML_ATTR_TIMEOUT);
+ g_hash_table_insert(meta, name, pcmk__itoa(unpack_timeout(timeout_spec)));
+
+ // Ensure on-fail has a valid value
+ validate_on_fail(rsc, action_name, action_config, meta);
+
+ // Normalize start-delay
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_START_DELAY);
+ if (str != NULL) {
+ unpack_start_delay(str, meta);
+ } else {
+ long long start_delay = 0;
- if (value) {
- crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
- "overriding default", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_ORIGIN);
+ if (unpack_interval_origin(str, action_config, interval_ms,
+ rsc->cluster->now, &start_delay)) {
+ name = strdup(XML_OP_ATTR_START_DELAY);
+ CRM_ASSERT(name != NULL);
+ g_hash_table_insert(meta, name,
+ crm_strdup_printf("%lld", start_delay));
}
}
+ return meta;
+}
- // Normalize timeout to positive milliseconds
- value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
- timeout_ms = unpack_timeout(value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- pcmk__itoa(timeout_ms));
+/*!
+ * \internal
+ * \brief Determine an action's quorum and fencing dependency
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action being unpacked
+ *
+ * \return Quorum and fencing dependency appropriate to action
+ */
+enum rsc_start_requirement
+pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name)
+{
+ const char *value = NULL;
+ enum rsc_start_requirement requires = pcmk_requires_nothing;
+
+ CRM_CHECK((rsc != NULL) && (action_name != NULL), return requires);
- if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
- action->needs = rsc_req_nothing;
+ if (!pcmk__strcase_any_of(action_name, PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE, NULL)) {
value = "nothing (not start or promote)";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
- action->needs = rsc_req_stonith;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
+ requires = pcmk_requires_fencing;
value = "fencing";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
- action->needs = rsc_req_quorum;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_quorum)) {
+ requires = pcmk_requires_quorum;
value = "quorum";
} else {
- action->needs = rsc_req_nothing;
value = "nothing";
}
- pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
+ pe_rsc_trace(rsc, "%s of %s requires %s", action_name, rsc->id, value);
+ return requires;
+}
- value = unpack_operation_on_fail(action);
+/*!
+ * \internal
+ * \brief Parse action failure response from a user-provided string
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] value User-provided configuration value for on-fail
+ *
+ * \return Action failure response parsed from \p text
+ */
+enum action_fail_response
+pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, const char *value)
+{
+ const char *desc = NULL;
+ bool needs_remote_reset = false;
+ enum action_fail_response on_fail = pcmk_on_fail_ignore;
if (value == NULL) {
+ // Use default
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
- action->on_fail = action_fail_block;
- g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
- value = "block"; // The above could destroy the original string
+ on_fail = pcmk_on_fail_block;
+ desc = "block";
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
- action->on_fail = action_fail_fence;
- value = "node fencing";
-
- if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "node fencing";
+ } else {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
- "operation '%s' to 'stop' because 'fence' is not "
- "valid when fencing is disabled", action->uuid);
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ "%s of %s to 'stop' because 'fence' is not "
+ "valid when fencing is disabled",
+ action_name, rsc->id);
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
- action->on_fail = action_fail_standby;
- value = "node standby";
+ on_fail = pcmk_on_fail_standby_node;
+ desc = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
NULL)) {
- action->on_fail = action_fail_ignore;
- value = "ignore";
+ desc = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
- action->on_fail = action_fail_migrate;
- value = "force migration";
+ on_fail = pcmk_on_fail_ban;
+ desc = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate)";
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
- if (container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate)";
-
+ if (rsc->container == NULL) {
+ pe_rsc_debug(rsc,
+ "Using default " XML_OP_ATTR_ON_FAIL
+ " for %s of %s because it does not have a container",
+ action_name, rsc->id);
} else {
- value = NULL;
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate)";
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- action->on_fail = action_fail_demote;
- value = "demote instance";
+ on_fail = pcmk_on_fail_demote;
+ desc = "demote instance";
} else {
- pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
- value = NULL;
+ pcmk__config_err("Using default '" XML_OP_ATTR_ON_FAIL "' for "
+ "%s of %s because '%s' is not valid",
+ action_name, rsc->id, value);
}
- /* defaults */
- if (value == NULL && container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate) (default)";
+ /* Remote node connections are handled specially. Failures that result
+ * in dropping an active connection must result in fencing. The only
+ * failures that don't are probes and starts. The user can explicitly set
+ * on-fail="fence" to fence after start failures.
+ */
+ if (pe__resource_is_remote_conn(rsc)
+ && !pcmk_is_probe(action_name, interval_ms)
+ && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) {
+ needs_remote_reset = true;
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ desc = NULL; // Force default for unmanaged connections
+ }
+ }
- /* For remote nodes, ensure that any failure that results in dropping an
- * active connection to the node results in fencing of the node.
- *
- * There are only two action failures that don't result in fencing.
- * 1. probes - probe failures are expected.
- * 2. start - a start failure indicates that an active connection does not already
- * exist. The user can set op on-fail=fence if they really want to fence start
- * failures. */
- } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
- && pe__resource_is_remote_conn(action->rsc, data_set)
- && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
- && (interval_ms == 0))
- && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
-
- if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop unmanaged remote node (enforcing default)";
+ if (desc != NULL) {
+ // Explicit value used, default not needed
- } else {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- value = "fence remote node (default)";
- } else {
- value = "recover remote node connection (default)";
- }
+ } else if (rsc->container != NULL) {
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate) (default)";
- if (action->rsc->remote_reconnect_ms) {
- action->fail_role = RSC_ROLE_STOPPED;
+ } else if (needs_remote_reset) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
+ desc = "fence remote node (default)";
+ } else {
+ desc = "recover remote node connection (default)";
}
- action->on_fail = action_fail_reset_remote;
+ on_fail = pcmk_on_fail_reset_remote;
+ } else {
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop unmanaged remote node (enforcing default)";
}
- } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- action->on_fail = action_fail_fence;
- value = "resource fence (default)";
-
+ } else if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "resource fence (default)";
} else {
- action->on_fail = action_fail_block;
- value = "resource block (default)";
+ on_fail = pcmk_on_fail_block;
+ desc = "resource block (default)";
}
- } else if (value == NULL) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate) (default)";
+ } else {
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate) (default)";
}
- pe_rsc_trace(action->rsc, "%s failure handling: %s",
- action->uuid, value);
+ pe_rsc_trace(rsc, "Failure handling for %s-interval %s of %s: %s",
+ pcmk__readable_interval(interval_ms), action_name,
+ rsc->id, desc);
+ return on_fail;
+}
- value = NULL;
- if (xml_obj != NULL) {
- value = g_hash_table_lookup(action->meta, "role_after_failure");
- if (value) {
- pe_warn_once(pe_wo_role_after,
- "Support for role_after_failure is deprecated and will be removed in a future release");
- }
+/*!
+ * \internal
+ * \brief Determine a resource's role after failure of an action
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] on_fail Failure handling for action
+ * \param[in] meta Unpacked action meta-attributes
+ *
+ * \return Resource role that results from failure of action
+ */
+enum rsc_role_e
+pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name,
+ enum action_fail_response on_fail, GHashTable *meta)
+{
+ const char *value = NULL;
+ enum rsc_role_e role = pcmk_role_unknown;
+
+ // Set default for role after failure specially in certain circumstances
+ switch (on_fail) {
+ case pcmk_on_fail_stop:
+ role = pcmk_role_stopped;
+ break;
+
+ case pcmk_on_fail_reset_remote:
+ if (rsc->remote_reconnect_ms != 0) {
+ role = pcmk_role_stopped;
+ }
+ break;
+
+ default:
+ break;
}
- if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
- action->fail_role = text2role(value);
+
+ // @COMPAT Check for explicitly configured role (deprecated)
+ value = g_hash_table_lookup(meta, "role_after_failure");
+ if (value != NULL) {
+ pe_warn_once(pcmk__wo_role_after,
+ "Support for role_after_failure is deprecated "
+ "and will be removed in a future release");
+ if (role == pcmk_role_unknown) {
+ role = text2role(value);
+ }
}
- /* defaults */
- if (action->fail_role == RSC_ROLE_UNKNOWN) {
- if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
- action->fail_role = RSC_ROLE_UNPROMOTED;
+
+ if (role == pcmk_role_unknown) {
+ // Use default
+ if (pcmk__str_eq(action_name, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
+ role = pcmk_role_unpromoted;
} else {
- action->fail_role = RSC_ROLE_STARTED;
+ role = pcmk_role_started;
}
}
- pe_rsc_trace(action->rsc, "%s failure results in: %s",
- action->uuid, role2text(action->fail_role));
+ pe_rsc_trace(rsc, "Role after %s %s failure is: %s",
+ rsc->id, action_name, role2text(role));
+ return role;
+}
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
- if (value) {
- unpack_start_delay(value, action->meta);
- } else {
- long long start_delay = 0;
+/*!
+ * \internal
+ * \brief Unpack action configuration
+ *
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds), requirements, and
+ * failure policy from its CIB XML configuration (including defaults).
+ *
+ * \param[in,out] action Resource action to unpack into
+ * \param[in] xml_obj Action configuration XML (NULL for defaults only)
+ * \param[in] interval_ms How frequently to perform the operation
+ */
+static void
+unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms)
+{
+ const char *value = NULL;
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
- if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
- &start_delay)) {
- g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
- crm_strdup_printf("%lld", start_delay));
- }
- }
+ action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
+ action->task, interval_ms, xml_obj);
+ action->needs = pcmk__action_requires(action->rsc, action->task);
+
+ value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ action->on_fail = pcmk__parse_on_fail(action->rsc, action->task,
+ interval_ms, value);
+
+ action->fail_role = pcmk__role_after_failure(action->rsc, action->task,
+ action->on_fail, action->meta);
}
/*!
@@ -929,31 +1105,26 @@ unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] save_action Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
- * \return Action object corresponding to arguments
- * \note This function takes ownership of (and might free) \p key. If
- * \p save_action is true, \p data_set will own the returned action,
- * otherwise it is the caller's responsibility to free the return value
- * with pe_free_action().
+ * \return Action object corresponding to arguments (guaranteed not to be
+ * \c NULL)
+ * \note This function takes ownership of (and might free) \p key, and
+ * \p scheduler takes ownership of the returned action (the caller should
+ * not free it).
*/
-pe_action_t *
-custom_action(pe_resource_t *rsc, char *key, const char *task,
- const pe_node_t *on_node, gboolean optional, gboolean save_action,
- pe_working_set_t *data_set)
+pcmk_action_t *
+custom_action(pcmk_resource_t *rsc, char *key, const char *task,
+ const pcmk_node_t *on_node, gboolean optional,
+ pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
-
- if (save_action) {
- action = find_existing_action(key, rsc, on_node, data_set);
- }
+ CRM_ASSERT((key != NULL) && (task != NULL) && (scheduler != NULL));
+ action = find_existing_action(key, rsc, on_node, scheduler);
if (action == NULL) {
- action = new_action(key, task, rsc, on_node, optional, save_action,
- data_set);
+ action = new_action(key, task, rsc, on_node, optional, scheduler);
} else {
free(key);
}
@@ -961,28 +1132,38 @@ custom_action(pe_resource_t *rsc, char *key, const char *task,
update_action_optional(action, optional);
if (rsc != NULL) {
- if (action->node != NULL) {
- unpack_action_node_attributes(action, data_set);
- }
+ if ((action->node != NULL) && (action->op_entry != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_attrs_evaluated)) {
- update_resource_action_runnable(action, save_action, data_set);
+ GHashTable *attrs = action->node->details->attrs;
- if (save_action) {
- update_resource_flags_for_action(rsc, action);
+ if (action->extra != NULL) {
+ g_hash_table_destroy(action->extra);
+ }
+ action->extra = pcmk__unpack_action_rsc_params(action->op_entry,
+ attrs, scheduler);
+ pe__set_action_flags(action, pcmk_action_attrs_evaluated);
}
+
+ update_resource_action_runnable(action, scheduler);
+ update_resource_flags_for_action(rsc, action);
+ }
+
+ if (action->extra == NULL) {
+ action->extra = pcmk__strkey_table(free, free);
}
return action;
}
-pe_action_t *
-get_pseudo_op(const char *name, pe_working_set_t * data_set)
+pcmk_action_t *
+get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler)
{
- pe_action_t *op = lookup_singleton(data_set, name);
+ pcmk_action_t *op = lookup_singleton(scheduler, name);
if (op == NULL) {
- op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
- pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
+ op = custom_action(NULL, strdup(name), name, NULL, TRUE, scheduler);
+ pe__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
}
return op;
}
@@ -991,15 +1172,15 @@ static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *candidate = gIter->data;
+ pcmk_resource_t *candidate = gIter->data;
if (candidate->children != NULL) {
matches = find_unfencing_devices(candidate->children, matches);
- } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
+ } else if (!pcmk_is_set(candidate->flags, pcmk_rsc_fence_device)) {
continue;
- } else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) {
+ } else if (pcmk_is_set(candidate->flags, pcmk_rsc_needs_unfencing)) {
matches = g_list_prepend(matches, candidate);
} else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
@@ -1013,8 +1194,8 @@ find_unfencing_devices(GList *candidates, GList *matches)
}
static int
-node_priority_fencing_delay(const pe_node_t *node,
- const pe_working_set_t *data_set)
+node_priority_fencing_delay(const pcmk_node_t *node,
+ const pcmk_scheduler_t *scheduler)
{
int member_count = 0;
int online_count = 0;
@@ -1023,13 +1204,13 @@ node_priority_fencing_delay(const pe_node_t *node,
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
- if (data_set->priority_fencing_delay <= 0) {
+ if (scheduler->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
- if (node->details->type != node_member) {
+ if (node->details->type != pcmk_node_variant_cluster) {
return 0;
}
@@ -1038,10 +1219,10 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *n = gIter->data;
+ for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *n = gIter->data;
- if (n->details->type != node_member) {
+ if (n->details->type != pcmk_node_variant_cluster) {
continue;
}
@@ -1077,54 +1258,58 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- return data_set->priority_fencing_delay;
+ return scheduler->priority_fencing_delay;
}
-pe_action_t *
-pe_fence_op(pe_node_t *node, const char *op, bool optional,
- const char *reason, bool priority_delay, pe_working_set_t *data_set)
+pcmk_action_t *
+pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
+ const char *reason, bool priority_delay,
+ pcmk_scheduler_t *scheduler)
{
char *op_key = NULL;
- pe_action_t *stonith_op = NULL;
+ pcmk_action_t *stonith_op = NULL;
if(op == NULL) {
- op = data_set->stonith_action;
+ op = scheduler->stonith_action;
}
- op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
+ op_key = crm_strdup_printf("%s-%s-%s",
+ PCMK_ACTION_STONITH, node->details->uname, op);
- stonith_op = lookup_singleton(data_set, op_key);
+ stonith_op = lookup_singleton(scheduler, op_key);
if(stonith_op == NULL) {
- stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
+ stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
+ TRUE, scheduler);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
- if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* Extra work to detect device changes
*/
GString *digests_all = g_string_sized_new(1024);
GString *digests_secure = g_string_sized_new(1024);
- GList *matches = find_unfencing_devices(data_set->resources, NULL);
+ GList *matches = find_unfencing_devices(scheduler->resources, NULL);
char *key = NULL;
char *value = NULL;
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *match = gIter->data;
+ pcmk_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
- data = pe__compare_fencing_digest(match, agent, node, data_set);
- if(data->rc == RSC_DIGEST_ALL) {
+ data = pe__compare_fencing_digest(match, agent, node,
+ scheduler);
+ if (data->rc == pcmk__digest_mismatch) {
optional = FALSE;
crm_notice("Unfencing node %s because the definition of "
"%s changed", pe__node_name(node), match->id);
- if (!pcmk__is_daemon && data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ if (!pcmk__is_daemon && scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
out->info(out,
"notice: Unfencing node %s because the "
@@ -1157,7 +1342,7 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
free(op_key);
}
- if (data_set->priority_fencing_delay > 0
+ if (scheduler->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
@@ -1174,15 +1359,16 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
- char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
+ char *delay_s = pcmk__itoa(node_priority_fencing_delay(node,
+ scheduler));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
- if(optional == FALSE && pe_can_fence(data_set, node)) {
- pe__clear_action_flags(stonith_op, pe_action_optional);
+ if(optional == FALSE && pe_can_fence(scheduler, node)) {
+ pe__clear_action_flags(stonith_op, pcmk_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
@@ -1193,13 +1379,13 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
}
void
-pe_free_action(pe_action_t * action)
+pe_free_action(pcmk_action_t *action)
{
if (action == NULL) {
return;
}
- g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
- g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
+ g_list_free_full(action->actions_before, free);
+ g_list_free_full(action->actions_after, free);
if (action->extra) {
g_hash_table_destroy(action->extra);
}
@@ -1215,7 +1401,8 @@ pe_free_action(pe_action_t * action)
}
int
-pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
+pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
@@ -1224,8 +1411,8 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -1240,10 +1427,11 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
}
- if (timeout_spec == NULL && data_set->op_defaults) {
+ if (timeout_spec == NULL && scheduler->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
- &rule_data, action_meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->op_defaults, XML_TAG_META_SETS,
+ &rule_data, action_meta, NULL, FALSE,
+ scheduler);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
@@ -1252,7 +1440,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
if (action_meta != NULL) {
@@ -1262,16 +1450,16 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
enum action_tasks
-get_complex_task(const pe_resource_t *rsc, const char *name)
+get_complex_task(const pcmk_resource_t *rsc, const char *name)
{
enum action_tasks task = text2task(name);
- if ((rsc != NULL) && (rsc->variant == pe_native)) {
+ if ((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)) {
switch (task) {
- case stopped_rsc:
- case started_rsc:
- case action_demoted:
- case action_promoted:
+ case pcmk_action_stopped:
+ case pcmk_action_started:
+ case pcmk_action_demoted:
+ case pcmk_action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s",
name, rsc->id);
--task;
@@ -1294,14 +1482,14 @@ get_complex_task(const pe_resource_t *rsc, const char *name)
*
* \return First action in list that matches criteria, or NULL if none
*/
-pe_action_t *
+pcmk_action_t *
find_first_action(const GList *input, const char *uuid, const char *task,
- const pe_node_t *on_node)
+ const pcmk_node_t *on_node)
{
CRM_CHECK(uuid || task, return NULL);
for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
@@ -1324,7 +1512,7 @@ find_first_action(const GList *input, const char *uuid, const char *task,
}
GList *
-find_actions(GList *input, const char *key, const pe_node_t *on_node)
+find_actions(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
@@ -1332,7 +1520,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
@@ -1358,7 +1546,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
}
GList *
-find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
+find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *result = NULL;
@@ -1369,7 +1557,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
@@ -1397,7 +1585,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
* without a node will be assigned to node.
*/
GList *
-pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
+pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
@@ -1423,16 +1611,18 @@ pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
* \note It is the caller's responsibility to free() the result.
*/
char *
-pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
+pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
- case pe_action_runnable:
- case pe_action_migrate_runnable:
+ case pcmk_action_runnable:
change = "unrunnable";
break;
- case pe_action_optional:
+ case pcmk_action_migratable:
+ change = "unmigrateable";
+ break;
+ case pcmk_action_optional:
change = "required";
break;
default:
@@ -1446,7 +1636,8 @@ pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
action->task);
}
-void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
+void pe_action_set_reason(pcmk_action_t *action, const char *reason,
+ bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
@@ -1468,20 +1659,14 @@ void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrit
*
* \param[in,out] rsc Resource to clear
* \param[in] node Node to clear history on
- * \param[in,out] data_set Cluster working set
- *
- * \return New action to clear resource history
*/
-pe_action_t *
-pe__clear_resource_history(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+void
+pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- char *key = NULL;
+ CRM_ASSERT((rsc != NULL) && (node != NULL));
- CRM_ASSERT(rsc && node);
- key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
- return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
- data_set);
+ custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
+ PCMK_ACTION_LRM_DELETE, node, FALSE, rsc->cluster);
}
#define sort_return(an_int, why) do { \
@@ -1646,19 +1831,19 @@ sort_op_by_callid(gconstpointer a, gconstpointer b)
*
* \return New action object corresponding to arguments
*/
-pe_action_t *
-pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
+pcmk_action_t *
+pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional,
bool runnable)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL));
action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
- optional, TRUE, rsc->cluster);
- pe__set_action_flags(action, pe_action_pseudo);
+ optional, rsc->cluster);
+ pe__set_action_flags(action, pcmk_action_pseudo);
if (runnable) {
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
return action;
}
@@ -1673,7 +1858,7 @@ pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
* \note This is more efficient than calling add_hash_param().
*/
void
-pe__add_action_expected_result(pe_action_t *action, int expected_result)
+pe__add_action_expected_result(pcmk_action_t *action, int expected_result)
{
char *name = NULL;
diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c
index b8047da..546a2a7 100644
--- a/lib/pengine/pe_digest.c
+++ b/lib/pengine/pe_digest.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -93,27 +93,27 @@ attr_in_string(xmlAttrPtr a, void *user_data)
* \param[in] xml_op Unused
* \param[in] op_version CRM feature set to use for digest calculation
* \param[in] overrides Key/value table to override resource parameters
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *params,
+calculate_main_digest(op_digest_cache_t *data, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *params,
const char *task, guint *interval_ms,
const xmlNode *xml_op, const char *op_version,
- GHashTable *overrides, pe_working_set_t *data_set)
+ GHashTable *overrides, pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ xmlNode *action_config = NULL;
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
/* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers
* that themselves are Pacemaker Remote nodes
*/
- (void) pe__add_bundle_remote_name(rsc, data_set, data->params_all,
+ (void) pe__add_bundle_remote_name(rsc, scheduler, data->params_all,
XML_RSC_ATTR_REMOTE_RA_ADDR);
- // If interval was overridden, reset it
if (overrides != NULL) {
+ // If interval was overridden, reset it
const char *interval_s = g_hash_table_lookup(overrides, CRM_META "_"
XML_LRM_ATTR_INTERVAL);
@@ -125,34 +125,42 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
*interval_ms = (guint) value_ll;
}
}
- }
- action = custom_action(rsc, pcmk__op_key(rsc->id, task, *interval_ms),
- task, node, TRUE, FALSE, data_set);
- if (overrides != NULL) {
+ // Add overrides to list of all parameters
g_hash_table_foreach(overrides, hash2field, data->params_all);
}
- g_hash_table_foreach(params, hash2field, data->params_all);
- g_hash_table_foreach(action->extra, hash2field, data->params_all);
- g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
- pcmk__filter_op_for_digest(data->params_all);
+ // Add provided instance parameters
+ g_hash_table_foreach(params, hash2field, data->params_all);
- /* Given a non-recurring operation with extra parameters configured,
- * in case that the main digest doesn't match, even if the restart
- * digest matches, enforce a restart rather than a reload-agent anyway.
- * So that it ensures any changes of the extra parameters get applied
- * for this specific operation, and the digests calculated for the
- * resulting lrm_rsc_op will be correct.
- * Mark the implied rc RSC_DIGEST_RESTART for the case that the main
- * digest doesn't match.
+ // Find action configuration XML in CIB
+ action_config = pcmk__find_action_config(rsc, task, *interval_ms, true);
+
+ /* Add action-specific resource instance attributes to the digest list.
+ *
+ * If this is a one-time action with action-specific instance attributes,
+ * enforce a restart instead of reload-agent in case the main digest doesn't
+ * match, even if the restart digest does. This ensures any changes of the
+ * action-specific parameters get applied for this specific action, and
+ * digests calculated for the resulting history will be correct. Default the
+ * result to RSC_DIGEST_RESTART for the case where the main digest doesn't
+ * match.
*/
- if (*interval_ms == 0
- && g_hash_table_size(action->extra) > 0) {
- data->rc = RSC_DIGEST_RESTART;
+ params = pcmk__unpack_action_rsc_params(action_config, node->details->attrs,
+ scheduler);
+ if ((*interval_ms == 0) && (g_hash_table_size(params) > 0)) {
+ data->rc = pcmk__digest_restart;
}
+ g_hash_table_foreach(params, hash2field, data->params_all);
+ g_hash_table_destroy(params);
+
+ // Add action meta-attributes
+ params = pcmk__unpack_action_meta(rsc, node, task, *interval_ms,
+ action_config);
+ g_hash_table_foreach(params, hash2metafield, data->params_all);
+ g_hash_table_destroy(params);
- pe_free_action(action);
+ pcmk__filter_op_for_digest(data->params_all);
data->digest_all_calc = calculate_operation_digest(data->params_all,
op_version);
@@ -177,7 +185,7 @@ is_fence_param(xmlAttrPtr attr, void *user_data)
* \param[in] overrides Key/value hash table to override resource parameters
*/
static void
-calculate_secure_digest(op_digest_cache_t *data, const pe_resource_t *rsc,
+calculate_secure_digest(op_digest_cache_t *data, const pcmk_resource_t *rsc,
GHashTable *params, const xmlNode *xml_op,
const char *op_version, GHashTable *overrides)
{
@@ -288,17 +296,17 @@ calculate_restart_digest(op_digest_cache_t *data, const xmlNode *xml_op,
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] overrides Key/value table to override resource parameters
* \param[in] calc_secure Whether to calculate secure digest
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to new digest cache entry (or NULL on memory error)
* \note It is the caller's responsibility to free the result using
* pe__free_digests().
*/
op_digest_cache_t *
-pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
- const pe_node_t *node, const xmlNode *xml_op,
- GHashTable *overrides, bool calc_secure,
- pe_working_set_t *data_set)
+pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
+ guint *interval_ms, const pcmk_node_t *node,
+ const xmlNode *xml_op, GHashTable *overrides,
+ bool calc_secure, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t));
const char *op_version = NULL;
@@ -308,23 +316,23 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
return NULL;
}
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
if (xml_op != NULL) {
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
}
- if (op_version == NULL && data_set != NULL && data_set->input != NULL) {
- op_version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ if (op_version == NULL && scheduler != NULL && scheduler->input != NULL) {
+ op_version = crm_element_value(scheduler->input, XML_ATTR_CRM_VERSION);
}
if (op_version == NULL) {
op_version = CRM_FEATURE_SET;
}
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op,
- op_version, overrides, data_set);
+ op_version, overrides, scheduler);
if (calc_secure) {
calculate_secure_digest(data, rsc, params, xml_op, op_version,
overrides);
@@ -343,14 +351,14 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
* \param[in,out] node Node action was performed on
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] calc_secure Whether to calculate secure digest
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry
*/
static op_digest_cache_t *
-rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
- pe_node_t *node, const xmlNode *xml_op,
- bool calc_secure, pe_working_set_t *data_set)
+rsc_action_digest(pcmk_resource_t *rsc, const char *task, guint interval_ms,
+ pcmk_node_t *node, const xmlNode *xml_op,
+ bool calc_secure, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = NULL;
char *key = pcmk__op_key(rsc->id, task, interval_ms);
@@ -358,7 +366,7 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
data = g_hash_table_lookup(node->details->digest_cache, key);
if (data == NULL) {
data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
- NULL, calc_secure, data_set);
+ NULL, calc_secure, scheduler);
CRM_ASSERT(data != NULL);
g_hash_table_insert(node->details->digest_cache, strdup(key), data);
}
@@ -370,16 +378,16 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
* \internal
* \brief Calculate operation digests and compare against an XML history entry
*
- * \param[in,out] rsc Resource to check
- * \param[in] xml_op Resource history XML
- * \param[in,out] node Node to use for digest calculation
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Resource to check
+ * \param[in] xml_op Resource history XML
+ * \param[in,out] node Node to use for digest calculation
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry, with comparison result set
*/
op_digest_cache_t *
-rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
- pe_node_t *node, pe_working_set_t *data_set)
+rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = NULL;
guint interval_ms = 0;
@@ -397,8 +405,9 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
data = rsc_action_digest(rsc, task, interval_ms, node, xml_op,
- pcmk_is_set(data_set->flags, pe_flag_sanitized),
- data_set);
+ pcmk_is_set(scheduler->flags,
+ pcmk_sched_sanitized),
+ scheduler);
if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s "
@@ -408,11 +417,11 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
data->digest_restart_calc,
op_version,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
- data->rc = RSC_DIGEST_RESTART;
+ data->rc = pcmk__digest_restart;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
- data->rc = RSC_DIGEST_UNKNOWN;
+ data->rc = pcmk__digest_unknown;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
/* Given a non-recurring operation with extra parameters configured,
@@ -421,11 +430,10 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
* So that it ensures any changes of the extra parameters get applied
* for this specific operation, and the digests calculated for the
* resulting lrm_rsc_op will be correct.
- * Preserve the implied rc RSC_DIGEST_RESTART for the case that the main
- * digest doesn't match.
+ * Preserve the implied rc pcmk__digest_restart for the case that the
+ * main digest doesn't match.
*/
- if (interval_ms == 0
- && data->rc == RSC_DIGEST_RESTART) {
+ if ((interval_ms == 0) && (data->rc == pcmk__digest_restart)) {
pe_rsc_info(rsc, "Parameters containing extra ones to %ums-interval"
" %s action for %s on %s "
"changed: hash was %s vs. now %s (restart:%s) %s",
@@ -442,11 +450,11 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
(interval_ms > 0)? "reschedule" : "reload",
op_version,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
- data->rc = RSC_DIGEST_ALL;
+ data->rc = pcmk__digest_mismatch;
}
} else {
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
}
return data;
}
@@ -522,34 +530,34 @@ unfencing_digest_matches(const char *rsc_id, const char *agent,
* \internal
* \brief Calculate fence device digests and digest comparison result
*
- * \param[in,out] rsc Fence device resource
- * \param[in] agent Fence device's agent type
- * \param[in,out] node Node with digest cache to use
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Fence device resource
+ * \param[in] agent Fence device's agent type
+ * \param[in,out] node Node with digest cache to use
+ * \param[in,out] scheduler Scheduler data
*
* \return Node's digest cache entry
*/
op_digest_cache_t *
-pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
- pe_node_t *node, pe_working_set_t *data_set)
+pe__compare_fencing_digest(pcmk_resource_t *rsc, const char *agent,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
const char *node_summary = NULL;
// Calculate device's current parameter digests
op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U,
- node, NULL, TRUE, data_set);
+ node, NULL, TRUE, scheduler);
// Check whether node has special unfencing summary node attribute
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
if (node_summary == NULL) {
- data->rc = RSC_DIGEST_UNKNOWN;
+ data->rc = pcmk__digest_unknown;
return data;
}
// Check whether full parameter digest matches
if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
node_summary)) {
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
return data;
}
@@ -557,9 +565,9 @@ pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
node_summary)) {
- data->rc = RSC_DIGEST_MATCH;
- if (!pcmk__is_daemon && data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ data->rc = pcmk__digest_match;
+ if (!pcmk__is_daemon && scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
out->info(out, "Only 'private' parameters to %s "
"for unfencing %s changed", rsc->id,
pe__node_name(node));
@@ -568,10 +576,12 @@ pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
}
// Parameters don't match
- data->rc = RSC_DIGEST_ALL;
- if (pcmk_is_set(data_set->flags, pe_flag_sanitized) && data->digest_secure_calc) {
- if (data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ data->rc = pcmk__digest_mismatch;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)
+ && (data->digest_secure_calc != NULL)) {
+
+ if (scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
char *digest = create_unfencing_summary(rsc->id, agent,
data->digest_secure_calc);
diff --git a/lib/pengine/pe_health.c b/lib/pengine/pe_health.c
index 6419fdf..93028ae 100644
--- a/lib/pengine/pe_health.c
+++ b/lib/pengine/pe_health.c
@@ -17,12 +17,12 @@
* \internal
* \brief Set the node health values to use for "red", "yellow", and "green"
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pe__unpack_node_health_scores(pe_working_set_t *data_set)
+pe__unpack_node_health_scores(pcmk_scheduler_t *scheduler)
{
- switch (pe__health_strategy(data_set)) {
+ switch (pe__health_strategy(scheduler)) {
case pcmk__health_strategy_none:
pcmk__score_red = 0;
pcmk__score_yellow = 0;
@@ -43,11 +43,11 @@ pe__unpack_node_health_scores(pe_working_set_t *data_set)
default: // progressive or custom
pcmk__score_red = pe__health_score(PCMK__OPT_NODE_HEALTH_RED,
- data_set);
+ scheduler);
pcmk__score_green = pe__health_score(PCMK__OPT_NODE_HEALTH_GREEN,
- data_set);
+ scheduler);
pcmk__score_yellow = pe__health_score(PCMK__OPT_NODE_HEALTH_YELLOW,
- data_set);
+ scheduler);
break;
}
@@ -93,7 +93,7 @@ add_node_health_value(gpointer key, gpointer value, gpointer user_data)
* \return Sum of all health attribute scores of \p node plus \p base_health
*/
int
-pe__sum_node_health_scores(const pe_node_t *node, int base_health)
+pe__sum_node_health_scores(const pcmk_node_t *node, int base_health)
{
CRM_ASSERT(node != NULL);
g_hash_table_foreach(node->details->attrs, add_node_health_value,
@@ -111,7 +111,7 @@ pe__sum_node_health_scores(const pe_node_t *node, int base_health)
* otherwise 0 if any attribute is yellow, otherwise a positive value.
*/
int
-pe__node_health(pe_node_t *node)
+pe__node_health(pcmk_node_t *node)
{
GHashTableIter iter;
const char *name = NULL;
diff --git a/lib/pengine/pe_notif.c b/lib/pengine/pe_notif.c
index 7ed490f..0e1e239 100644
--- a/lib/pengine/pe_notif.c
+++ b/lib/pengine/pe_notif.c
@@ -9,13 +9,15 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
+
+#include <crm/pengine/internal.h>
#include <pacemaker-internal.h>
#include "pe_status_private.h"
typedef struct notify_entry_s {
- const pe_resource_t *rsc;
- const pe_node_t *node;
+ const pcmk_resource_t *rsc;
+ const pcmk_node_t *node;
} notify_entry_t;
/*!
@@ -105,7 +107,7 @@ dup_notify_entry(const notify_entry_t *entry)
* \internal
* \brief Given a list of nodes, create strings with node names
*
- * \param[in] list List of nodes (as pe_node_t *)
+ * \param[in] list List of nodes (as pcmk_node_t *)
* \param[out] all_node_names If not NULL, will be set to space-separated list
* of the names of all nodes in \p list
* \param[out] host_node_names Same as \p all_node_names, except active
@@ -126,7 +128,7 @@ get_node_names(const GList *list, GString **all_node_names,
}
for (const GList *iter = list; iter != NULL; iter = iter->next) {
- const pe_node_t *node = (const pe_node_t *) iter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
if (node->details->uname == NULL) {
continue;
@@ -242,7 +244,7 @@ notify_entries_to_strings(GList *list, GString **rsc_names,
static void
copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
{
- pe_action_t *notify = (pe_action_t *) user_data;
+ pcmk_action_t *notify = (pcmk_action_t *) user_data;
/* Any existing meta-attributes (for example, the action timeout) are for
* the notify action itself, so don't override those.
@@ -256,7 +258,8 @@ copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
}
static void
-add_notify_data_to_action_meta(const notify_data_t *n_data, pe_action_t *action)
+add_notify_data_to_action_meta(const notify_data_t *n_data,
+ pcmk_action_t *action)
{
for (const GSList *item = n_data->keys; item; item = item->next) {
const pcmk_nvpair_t *nvpair = (const pcmk_nvpair_t *) item->data;
@@ -271,23 +274,23 @@ add_notify_data_to_action_meta(const notify_data_t *n_data, pe_action_t *action)
*
* \param[in,out] rsc Clone resource that notification is for
* \param[in] action Action to use in notify action key
- * \param[in] notif_action RSC_NOTIFY or RSC_NOTIFIED
+ * \param[in] notif_action PCMK_ACTION_NOTIFY or PCMK_ACTION_NOTIFIED
* \param[in] notif_type "pre", "post", "confirmed-pre", "confirmed-post"
*
* \return Newly created notify pseudo-action
*/
-static pe_action_t *
-new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action,
+static pcmk_action_t *
+new_notify_pseudo_action(pcmk_resource_t *rsc, const pcmk_action_t *action,
const char *notif_action, const char *notif_type)
{
- pe_action_t *notify = NULL;
+ pcmk_action_t *notify = NULL;
notify = custom_action(rsc,
pcmk__notify_key(rsc->id, notif_type, action->task),
notif_action, NULL,
- pcmk_is_set(action->flags, pe_action_optional),
- TRUE, rsc->cluster);
- pe__set_action_flags(notify, pe_action_pseudo);
+ pcmk_is_set(action->flags, pcmk_action_optional),
+ rsc->cluster);
+ pe__set_action_flags(notify, pcmk_action_pseudo);
add_hash_param(notify->meta, "notify_key_type", notif_type);
add_hash_param(notify->meta, "notify_key_operation", action->task);
return notify;
@@ -305,12 +308,13 @@ new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action,
*
* \return Newly created notify action
*/
-static pe_action_t *
-new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
- pe_action_t *notify_done, const notify_data_t *n_data)
+static pcmk_action_t *
+new_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_action_t *op, pcmk_action_t *notify_done,
+ const notify_data_t *n_data)
{
char *key = NULL;
- pe_action_t *notify_action = NULL;
+ pcmk_action_t *notify_action = NULL;
const char *value = NULL;
const char *task = NULL;
const char *skip_reason = NULL;
@@ -324,7 +328,7 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
skip_reason = "no parent notification";
} else if (!node->details->online) {
skip_reason = "node offline";
- } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
skip_reason = "original action not runnable";
}
if (skip_reason != NULL) {
@@ -342,16 +346,16 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
// Create the notify action
key = pcmk__notify_key(rsc->id, value, task);
notify_action = custom_action(rsc, key, op->task, node,
- pcmk_is_set(op->flags, pe_action_optional),
- TRUE, rsc->cluster);
+ pcmk_is_set(op->flags, pcmk_action_optional),
+ rsc->cluster);
// Add meta-data to notify action
g_hash_table_foreach(op->meta, copy_meta_to_notify, notify_action);
add_notify_data_to_action_meta(n_data, notify_action);
// Order notify after original action and before parent notification
- order_actions(op, notify_action, pe_order_optional);
- order_actions(notify_action, notify_done, pe_order_optional);
+ order_actions(op, notify_action, pcmk__ar_ordered);
+ order_actions(notify_action, notify_done, pcmk__ar_ordered);
return notify_action;
}
@@ -364,10 +368,10 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
* \param[in,out] n_data Notification values to add to action meta-data
*/
static void
-new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
+new_post_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
notify_data_t *n_data)
{
- pe_action_t *notify = NULL;
+ pcmk_action_t *notify = NULL;
CRM_ASSERT(n_data != NULL);
@@ -383,16 +387,16 @@ new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
return;
}
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *mon = (pe_action_t *) iter->data;
+ pcmk_action_t *mon = (pcmk_action_t *) iter->data;
const char *interval_ms_s = NULL;
interval_ms_s = g_hash_table_lookup(mon->meta,
XML_LRM_ATTR_INTERVAL_MS);
if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
- || pcmk__str_eq(mon->task, RSC_CANCEL, pcmk__str_none)) {
+ || pcmk__str_eq(mon->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
continue; // Not a recurring monitor
}
- order_actions(n_data->post_done, mon, pe_order_optional);
+ order_actions(n_data->post_done, mon, pcmk__ar_ordered);
}
}
@@ -428,12 +432,12 @@ new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
* \return Newly created notification data
*/
notify_data_t *
-pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
- pe_action_t *action, pe_action_t *complete)
+pe__action_notif_pseudo_ops(pcmk_resource_t *rsc, const char *task,
+ pcmk_action_t *action, pcmk_action_t *complete)
{
notify_data_t *n_data = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_notify)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_notify)) {
return NULL;
}
@@ -445,60 +449,63 @@ pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
if (action != NULL) { // Need "pre-" pseudo-actions
// Create "pre-" notify pseudo-action for clone
- n_data->pre = new_notify_pseudo_action(rsc, action, RSC_NOTIFY, "pre");
- pe__set_action_flags(n_data->pre, pe_action_runnable);
+ n_data->pre = new_notify_pseudo_action(rsc, action, PCMK_ACTION_NOTIFY,
+ "pre");
+ pe__set_action_flags(n_data->pre, pcmk_action_runnable);
add_hash_param(n_data->pre->meta, "notify_type", "pre");
add_hash_param(n_data->pre->meta, "notify_operation", n_data->action);
// Create "pre-" notifications complete pseudo-action for clone
- n_data->pre_done = new_notify_pseudo_action(rsc, action, RSC_NOTIFIED,
+ n_data->pre_done = new_notify_pseudo_action(rsc, action,
+ PCMK_ACTION_NOTIFIED,
"confirmed-pre");
- pe__set_action_flags(n_data->pre_done, pe_action_runnable);
+ pe__set_action_flags(n_data->pre_done, pcmk_action_runnable);
add_hash_param(n_data->pre_done->meta, "notify_type", "pre");
add_hash_param(n_data->pre_done->meta,
"notify_operation", n_data->action);
// Order "pre-" -> "pre-" complete -> original action
- order_actions(n_data->pre, n_data->pre_done, pe_order_optional);
- order_actions(n_data->pre_done, action, pe_order_optional);
+ order_actions(n_data->pre, n_data->pre_done, pcmk__ar_ordered);
+ order_actions(n_data->pre_done, action, pcmk__ar_ordered);
}
if (complete != NULL) { // Need "post-" pseudo-actions
// Create "post-" notify pseudo-action for clone
- n_data->post = new_notify_pseudo_action(rsc, complete, RSC_NOTIFY,
- "post");
+ n_data->post = new_notify_pseudo_action(rsc, complete,
+ PCMK_ACTION_NOTIFY, "post");
n_data->post->priority = INFINITY;
- if (pcmk_is_set(complete->flags, pe_action_runnable)) {
- pe__set_action_flags(n_data->post, pe_action_runnable);
+ if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
+ pe__set_action_flags(n_data->post, pcmk_action_runnable);
} else {
- pe__clear_action_flags(n_data->post, pe_action_runnable);
+ pe__clear_action_flags(n_data->post, pcmk_action_runnable);
}
add_hash_param(n_data->post->meta, "notify_type", "post");
add_hash_param(n_data->post->meta, "notify_operation", n_data->action);
// Create "post-" notifications complete pseudo-action for clone
n_data->post_done = new_notify_pseudo_action(rsc, complete,
- RSC_NOTIFIED,
+ PCMK_ACTION_NOTIFIED,
"confirmed-post");
n_data->post_done->priority = INFINITY;
- if (pcmk_is_set(complete->flags, pe_action_runnable)) {
- pe__set_action_flags(n_data->post_done, pe_action_runnable);
+ if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
+ pe__set_action_flags(n_data->post_done, pcmk_action_runnable);
} else {
- pe__clear_action_flags(n_data->post_done, pe_action_runnable);
+ pe__clear_action_flags(n_data->post_done, pcmk_action_runnable);
}
add_hash_param(n_data->post_done->meta, "notify_type", "post");
add_hash_param(n_data->post_done->meta,
"notify_operation", n_data->action);
// Order original action complete -> "post-" -> "post-" complete
- order_actions(complete, n_data->post, pe_order_implies_then);
- order_actions(n_data->post, n_data->post_done, pe_order_implies_then);
+ order_actions(complete, n_data->post, pcmk__ar_first_implies_then);
+ order_actions(n_data->post, n_data->post_done,
+ pcmk__ar_first_implies_then);
}
// If we created both, order "pre-" complete -> "post-"
if ((action != NULL) && (complete != NULL)) {
- order_actions(n_data->pre_done, n_data->post, pe_order_optional);
+ order_actions(n_data->pre_done, n_data->post, pcmk__ar_ordered);
}
return n_data;
}
@@ -514,7 +521,7 @@ pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
* \note The caller is responsible for freeing the return value.
*/
static notify_entry_t *
-new_notify_entry(const pe_resource_t *rsc, const pe_node_t *node)
+new_notify_entry(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
notify_entry_t *entry = calloc(1, sizeof(notify_entry_t));
@@ -533,12 +540,12 @@ new_notify_entry(const pe_resource_t *rsc, const pe_node_t *node)
* \param[in,out] n_data Notification data for clone
*/
static void
-collect_resource_data(const pe_resource_t *rsc, bool activity,
+collect_resource_data(const pcmk_resource_t *rsc, bool activity,
notify_data_t *n_data)
{
const GList *iter = NULL;
notify_entry_t *entry = NULL;
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
if (n_data == NULL) {
return;
@@ -551,7 +558,7 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// If this is a clone, call recursively for each instance
if (rsc->children != NULL) {
for (iter = rsc->children; iter != NULL; iter = iter->next) {
- const pe_resource_t *child = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
collect_resource_data(child, activity, n_data);
}
@@ -567,21 +574,21 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// Add notification indicating the resource state
switch (rsc->role) {
- case RSC_ROLE_STOPPED:
+ case pcmk_role_stopped:
n_data->inactive = g_list_prepend(n_data->inactive, entry);
break;
- case RSC_ROLE_STARTED:
+ case pcmk_role_started:
n_data->active = g_list_prepend(n_data->active, entry);
break;
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_unpromoted:
n_data->unpromoted = g_list_prepend(n_data->unpromoted, entry);
n_data->active = g_list_prepend(n_data->active,
dup_notify_entry(entry));
break;
- case RSC_ROLE_PROMOTED:
+ case pcmk_role_promoted:
n_data->promoted = g_list_prepend(n_data->promoted, entry);
n_data->active = g_list_prepend(n_data->active,
dup_notify_entry(entry));
@@ -601,30 +608,31 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// Add notification entries for each of the resource's actions
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- const pe_action_t *op = (const pe_action_t *) iter->data;
+ const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
- if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ if (!pcmk_is_set(op->flags, pcmk_action_optional)
+ && (op->node != NULL)) {
enum action_tasks task = text2task(op->task);
- if ((task == stop_rsc) && op->node->details->unclean) {
+ if ((task == pcmk_action_stop) && op->node->details->unclean) {
// Create anyway (additional noise if node can't be fenced)
- } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
continue;
}
entry = new_notify_entry(rsc, op->node);
switch (task) {
- case start_rsc:
+ case pcmk_action_start:
n_data->start = g_list_prepend(n_data->start, entry);
break;
- case stop_rsc:
+ case pcmk_action_stop:
n_data->stop = g_list_prepend(n_data->stop, entry);
break;
- case action_promote:
+ case pcmk_action_promote:
n_data->promote = g_list_prepend(n_data->promote, entry);
break;
- case action_demote:
+ case pcmk_action_demote:
n_data->demote = g_list_prepend(n_data->demote, entry);
break;
default:
@@ -661,7 +669,7 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
* \param[in,out] n_data Notification data
*/
static void
-add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
+add_notif_keys(const pcmk_resource_t *rsc, notify_data_t *n_data)
{
bool required = false; // Whether to make notify actions required
GString *rsc_list = NULL;
@@ -673,14 +681,14 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
n_data->stop = notify_entries_to_strings(n_data->stop,
&rsc_list, &node_list);
if ((strcmp(" ", (const char *) rsc_list->str) != 0)
- && pcmk__str_eq(n_data->action, RSC_STOP, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_STOP, pcmk__str_none)) {
required = true;
}
add_notify_env_free_gs(n_data, "notify_stop_resource", rsc_list);
add_notify_env_free_gs(n_data, "notify_stop_uname", node_list);
if ((n_data->start != NULL)
- && pcmk__str_eq(n_data->action, RSC_START, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_START, pcmk__str_none)) {
required = true;
}
n_data->start = notify_entries_to_strings(n_data->start,
@@ -689,7 +697,7 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_start_uname", node_list);
if ((n_data->demote != NULL)
- && pcmk__str_eq(n_data->action, RSC_DEMOTE, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
required = true;
}
n_data->demote = notify_entries_to_strings(n_data->demote,
@@ -698,7 +706,7 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_demote_uname", node_list);
if ((n_data->promote != NULL)
- && pcmk__str_eq(n_data->action, RSC_PROMOTE, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
required = true;
}
n_data->promote = notify_entries_to_strings(n_data->promote,
@@ -755,13 +763,13 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_all_uname", node_list);
if (required && (n_data->pre != NULL)) {
- pe__clear_action_flags(n_data->pre, pe_action_optional);
- pe__clear_action_flags(n_data->pre_done, pe_action_optional);
+ pe__clear_action_flags(n_data->pre, pcmk_action_optional);
+ pe__clear_action_flags(n_data->pre_done, pcmk_action_optional);
}
if (required && (n_data->post != NULL)) {
- pe__clear_action_flags(n_data->post, pe_action_optional);
- pe__clear_action_flags(n_data->post_done, pe_action_optional);
+ pe__clear_action_flags(n_data->post, pcmk_action_optional);
+ pe__clear_action_flags(n_data->post_done, pcmk_action_optional);
}
}
@@ -773,14 +781,15 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
*
* \return If action is behind a remote connection, connection's start
*/
-static pe_action_t *
-find_remote_start(pe_action_t *action)
+static pcmk_action_t *
+find_remote_start(pcmk_action_t *action)
{
if ((action != NULL) && (action->node != NULL)) {
- pe_resource_t *remote_rsc = action->node->details->remote_rsc;
+ pcmk_resource_t *remote_rsc = action->node->details->remote_rsc;
if (remote_rsc != NULL) {
- return find_first_action(remote_rsc->actions, NULL, RSC_START,
+ return find_first_action(remote_rsc->actions, NULL,
+ PCMK_ACTION_START,
NULL);
}
}
@@ -795,11 +804,11 @@ find_remote_start(pe_action_t *action)
* \param[in,out] n_data Clone notification data for some action
*/
static void
-create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
+create_notify_actions(pcmk_resource_t *rsc, notify_data_t *n_data)
{
GList *iter = NULL;
- pe_action_t *stop = NULL;
- pe_action_t *start = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *start = NULL;
enum action_tasks task = text2task(n_data->action);
// If this is a clone, call recursively for each instance
@@ -810,14 +819,15 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
// Add notification meta-attributes to original actions
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *op = (pe_action_t *) iter->data;
+ pcmk_action_t *op = (pcmk_action_t *) iter->data;
- if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ if (!pcmk_is_set(op->flags, pcmk_action_optional)
+ && (op->node != NULL)) {
switch (text2task(op->task)) {
- case start_rsc:
- case stop_rsc:
- case action_promote:
- case action_demote:
+ case pcmk_action_start:
+ case pcmk_action_stop:
+ case pcmk_action_promote:
+ case pcmk_action_demote:
add_notify_data_to_action_meta(n_data, op);
break;
default:
@@ -828,7 +838,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
// Skip notify action itself if original action was not needed
switch (task) {
- case start_rsc:
+ case pcmk_action_start:
if (n_data->start == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -836,7 +846,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
}
break;
- case action_promote:
+ case pcmk_action_promote:
if (n_data->promote == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -844,7 +854,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
}
break;
- case action_demote:
+ case pcmk_action_demote:
if (n_data->demote == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -861,18 +871,19 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
rsc->id, n_data->action);
// Create notify actions for stop or demote
- if ((rsc->role != RSC_ROLE_STOPPED)
- && ((task == stop_rsc) || (task == action_demote))) {
+ if ((rsc->role != pcmk_role_stopped)
+ && ((task == pcmk_action_stop) || (task == pcmk_action_demote))) {
- stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL);
+ stop = find_first_action(rsc->actions, NULL, PCMK_ACTION_STOP, NULL);
for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *current_node = (pe_node_t *) iter->data;
+ pcmk_node_t *current_node = (pcmk_node_t *) iter->data;
/* If a stop is a pseudo-action implied by fencing, don't try to
* notify the node getting fenced.
*/
- if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)
+ if ((stop != NULL)
+ && pcmk_is_set(stop->flags, pcmk_action_pseudo)
&& (current_node->details->unclean
|| current_node->details->remote_requires_reset)) {
continue;
@@ -881,23 +892,23 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
new_notify_action(rsc, current_node, n_data->pre,
n_data->pre_done, n_data);
- if ((task == action_demote) || (stop == NULL)
- || pcmk_is_set(stop->flags, pe_action_optional)) {
+ if ((task == pcmk_action_demote) || (stop == NULL)
+ || pcmk_is_set(stop->flags, pcmk_action_optional)) {
new_post_notify_action(rsc, current_node, n_data);
}
}
}
// Create notify actions for start or promote
- if ((rsc->next_role != RSC_ROLE_STOPPED)
- && ((task == start_rsc) || (task == action_promote))) {
+ if ((rsc->next_role != pcmk_role_stopped)
+ && ((task == pcmk_action_start) || (task == pcmk_action_promote))) {
- start = find_first_action(rsc->actions, NULL, RSC_START, NULL);
+ start = find_first_action(rsc->actions, NULL, PCMK_ACTION_START, NULL);
if (start != NULL) {
- pe_action_t *remote_start = find_remote_start(start);
+ pcmk_action_t *remote_start = find_remote_start(start);
if ((remote_start != NULL)
- && !pcmk_is_set(remote_start->flags, pe_action_runnable)) {
+ && !pcmk_is_set(remote_start->flags, pcmk_action_runnable)) {
/* Start and promote actions for a clone instance behind
* a Pacemaker Remote connection happen after the
* connection starts. If the connection start is blocked, do
@@ -911,8 +922,8 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
role2text(rsc->next_role), rsc->id);
return;
}
- if ((task != start_rsc) || (start == NULL)
- || pcmk_is_set(start->flags, pe_action_optional)) {
+ if ((task != pcmk_action_start) || (start == NULL)
+ || pcmk_is_set(start->flags, pcmk_action_optional)) {
new_notify_action(rsc, rsc->allocated_to, n_data->pre,
n_data->pre_done, n_data);
@@ -929,7 +940,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
* \param[in,out] n_data Clone notification data for some action
*/
void
-pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data)
+pe__create_action_notifications(pcmk_resource_t *rsc, notify_data_t *n_data)
{
if ((rsc == NULL) || (n_data == NULL)) {
return;
@@ -978,13 +989,14 @@ pe__free_action_notification_data(notify_data_t *n_data)
* \param[in,out] stonith_op Fencing action that implies \p stop
*/
void
-pe__order_notifs_after_fencing(const pe_action_t *stop, pe_resource_t *rsc,
- pe_action_t *stonith_op)
+pe__order_notifs_after_fencing(const pcmk_action_t *stop, pcmk_resource_t *rsc,
+ pcmk_action_t *stonith_op)
{
notify_data_t *n_data;
crm_info("Ordering notifications for implied %s after fencing", stop->uuid);
- n_data = pe__action_notif_pseudo_ops(rsc, RSC_STOP, NULL, stonith_op);
+ n_data = pe__action_notif_pseudo_ops(rsc, PCMK_ACTION_STOP, NULL,
+ stonith_op);
if (n_data != NULL) {
collect_resource_data(rsc, false, n_data);
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index 68cc867..65f3c18 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -8,28 +8,31 @@
*/
#include <crm_internal.h>
+
#include <stdint.h>
+
#include <crm/common/xml_internal.h>
#include <crm/common/output.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/cib/util.h>
#include <crm/msg_xml.h>
#include <crm/pengine/internal.h>
const char *
-pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts)
+pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts)
{
const char * desc = NULL;
// User-supplied description
- if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
- || pcmk__list_of_multiple(rsc->running_on)) {
+ if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)) {
desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
}
return desc;
}
/* Never display node attributes whose name starts with one of these prefixes */
-#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
- "shutdown", "terminate", "standby", "#", NULL }
+#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
+ "shutdown", PCMK_NODE_ATTR_TERMINATE, "standby", "#", \
+ NULL }
static int
compare_attribute(gconstpointer a, gconstpointer b)
@@ -47,7 +50,7 @@ compare_attribute(gconstpointer a, gconstpointer b)
*
* \param[in] node Node that ran this resource
* \param[in,out] rsc_list List of resources for this node
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in] attrname Attribute to find
* \param[out] expected_score Expected value for this attribute
*
@@ -57,19 +60,20 @@ compare_attribute(gconstpointer a, gconstpointer b)
* or degraded.
*/
static bool
-add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_set,
- const char *attrname, int *expected_score)
+add_extra_info(const pcmk_node_t *node, GList *rsc_list,
+ pcmk_scheduler_t *scheduler, const char *attrname,
+ int *expected_score)
{
GList *gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
const char *name = NULL;
GHashTable *params = NULL;
if (rsc->children != NULL) {
- if (add_extra_info(node, rsc->children, data_set, attrname,
+ if (add_extra_info(node, rsc->children, scheduler, attrname,
expected_score)) {
return true;
}
@@ -79,7 +83,7 @@ add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_se
continue;
}
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
name = g_hash_table_lookup(params, "name");
if (name == NULL) {
@@ -150,13 +154,15 @@ get_operation_list(xmlNode *rsc_entry) {
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
- if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
/* Ignore notifies and some probes */
- if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
+ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)
+ || (pcmk__str_eq(task, "probe", pcmk__str_none)
+ && (op_rc_i == CRM_EX_NOT_RUNNING))) {
continue;
}
@@ -188,10 +194,10 @@ append_dump_text(gpointer key, gpointer value, gpointer user_data)
}
static const char *
-get_cluster_stack(pe_working_set_t *data_set)
+get_cluster_stack(pcmk_scheduler_t *scheduler)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
@@ -290,7 +296,7 @@ op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
}
static char *
-resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
+resource_history_string(pcmk_resource_t *rsc, const char *rsc_id, bool all,
int failcount, time_t last_failure) {
char *buf = NULL;
@@ -325,27 +331,39 @@ resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
return buf;
}
+/*!
+ * \internal
+ * \brief Get a node's feature set for status display purposes
+ *
+ * \param[in] node Node to check
+ *
+ * \return String representation of feature set if the node is fully up (using
+ * "<3.15.1" for older nodes that don't set the #feature-set attribute),
+ * otherwise NULL
+ */
static const char *
-get_node_feature_set(pe_node_t *node) {
- const char *feature_set = NULL;
+get_node_feature_set(const pcmk_node_t *node)
+{
+ if (node->details->online && node->details->expected_up
+ && !pe__is_guest_or_remote_node(node)) {
- if (node->details->online && !pe__is_guest_or_remote_node(node)) {
- feature_set = g_hash_table_lookup(node->details->attrs,
- CRM_ATTR_FEATURE_SET);
- /* The feature set attribute is present since 3.15.1. If it is missing
- * then the node must be running an earlier version. */
- if (feature_set == NULL) {
- feature_set = "<3.15.1";
- }
+ const char *feature_set = g_hash_table_lookup(node->details->attrs,
+ CRM_ATTR_FEATURE_SET);
+
+ /* The feature set attribute is present since 3.15.1. If it is missing,
+ * then the node must be running an earlier version.
+ */
+ return pcmk__s(feature_set, "<3.15.1");
}
- return feature_set;
+ return NULL;
}
static bool
-is_mixed_version(pe_working_set_t *data_set) {
+is_mixed_version(pcmk_scheduler_t *scheduler)
+{
const char *feature_set = NULL;
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *node = gIter->data;
const char *node_feature_set = get_node_feature_set(node);
if (node_feature_set != NULL) {
if (feature_set == NULL) {
@@ -359,7 +377,7 @@ is_mixed_version(pe_working_set_t *data_set) {
}
static char *
-formatted_xml_buf(pe_resource_t *rsc, bool raw)
+formatted_xml_buf(const pcmk_resource_t *rsc, bool raw)
{
if (raw) {
return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
@@ -368,18 +386,18 @@ formatted_xml_buf(pe_resource_t *rsc, bool raw)
}
}
-PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
static int
cluster_summary(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
- const char *stack_s = get_cluster_stack(data_set);
+ const char *stack_s = get_cluster_stack(scheduler);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
@@ -388,47 +406,52 @@ cluster_summary(pcmk__output_t *out, va_list args) {
if (pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
- const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
- char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
- bool mixed_version = is_mixed_version(data_set);
+ const char *quorum = crm_element_value(scheduler->input,
+ XML_ATTR_HAVE_QUORUM);
+ char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(scheduler);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ out->message(out, "cluster-dc", scheduler->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
- const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
- const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
- const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
- const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+ const char *last_written = crm_element_value(scheduler->input,
+ XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times",
- data_set->localhost, last_written, user, client, origin);
+ scheduler->localhost, last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-counts", g_list_length(data_set->nodes),
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ out->message(out, "cluster-counts", g_list_length(scheduler->nodes),
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-options", data_set);
+ out->message(out, "cluster-options", scheduler);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
- if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -436,18 +459,18 @@ cluster_summary(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
static int
cluster_summary_html(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
- const char *stack_s = get_cluster_stack(data_set);
+ const char *stack_s = get_cluster_stack(scheduler);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
@@ -455,38 +478,44 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
}
/* Always print DC if none, even if not requested */
- if (data_set->dc_node == NULL || pcmk_is_set(section_opts, pcmk_section_dc)) {
+ if ((scheduler->dc_node == NULL)
+ || pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
- const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
- char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
- bool mixed_version = is_mixed_version(data_set);
+ const char *quorum = crm_element_value(scheduler->input,
+ XML_ATTR_HAVE_QUORUM);
+ char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(scheduler);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ out->message(out, "cluster-dc", scheduler->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
- const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
- const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
- const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
- const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+ const char *last_written = crm_element_value(scheduler->input,
+ XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times",
- data_set->localhost, last_written, user, client, origin);
+ scheduler->localhost, last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-counts", g_list_length(data_set->nodes),
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ out->message(out, "cluster-counts", g_list_length(scheduler->nodes),
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
@@ -497,13 +526,13 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
PCMK__OUTPUT_LIST_FOOTER(out, rc);
out->begin_list(out, NULL, NULL, "Config Options");
- out->message(out, "cluster-options", data_set);
+ out->message(out, "cluster-options", scheduler);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
- if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -512,7 +541,7 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
}
char *
-pe__node_display_name(pe_node_t *node, bool print_detail)
+pe__node_display_name(pcmk_node_t *node, bool print_detail)
{
char *node_name;
const char *node_host = NULL;
@@ -523,8 +552,8 @@ pe__node_display_name(pe_node_t *node, bool print_detail)
/* Host is displayed only if this is a guest node and detail is requested */
if (print_detail && pe__is_guest_node(node)) {
- const pe_resource_t *container = node->details->remote_rsc->container;
- const pe_node_t *host_node = pe__current_node(container);
+ const pcmk_resource_t *container = node->details->remote_rsc->container;
+ const pcmk_node_t *host_node = pe__current_node(container);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
@@ -575,9 +604,7 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
xml_node = pcmk__output_xml_peek_parent(out);
CRM_ASSERT(xml_node != NULL);
- xml_node = is_list
- ? create_xml_node(xml_node, tag_name)
- : xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
+ xml_node = create_xml_node(xml_node, tag_name);
va_start(args, pairs_count);
while(pairs_count--) {
@@ -598,20 +625,20 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
static const char *
role_desc(enum rsc_role_e role)
{
- if (role == RSC_ROLE_PROMOTED) {
+ if (role == pcmk_role_promoted) {
#ifdef PCMK__COMPAT_2_0
- return "as " RSC_ROLE_PROMOTED_LEGACY_S " ";
+ return "as " PCMK__ROLE_PROMOTED_LEGACY " ";
#else
- return "in " RSC_ROLE_PROMOTED_S " role ";
+ return "in " PCMK__ROLE_PROMOTED " role ";
#endif
}
return "";
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_html(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -628,10 +655,10 @@ ban_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_text(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -645,14 +672,14 @@ ban_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
- const char *promoted_only = pcmk__btoa(location->role_filter == RSC_ROLE_PROMOTED);
+ const char *promoted_only = pcmk__btoa(location->role_filter == pcmk_role_promoted);
char *weight_s = pcmk__itoa(pe_node->weight);
pcmk__output_create_xml_node(out, "ban",
@@ -674,11 +701,11 @@ ban_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban-list", "pe_working_set_t *", "const char *", "GList *",
+PCMK__OUTPUT_ARGS("ban-list", "pcmk_scheduler_t *", "const char *", "GList *",
"uint32_t", "bool")
static int
ban_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
const char *prefix = va_arg(args, const char *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -688,9 +715,10 @@ ban_list(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
/* Print each ban */
- for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
+ for (gIter = scheduler->placement_constraints;
+ gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
- const pe_resource_t *rsc = location->rsc_lh;
+ const pcmk_resource_t *rsc = location->rsc_lh;
if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) {
continue;
@@ -704,7 +732,7 @@ ban_list(pcmk__output_t *out, va_list args) {
}
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_node_t *node = (pe_node_t *) gIter2->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
if (node->weight < 0) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
@@ -843,11 +871,11 @@ cluster_counts_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_html(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
@@ -881,11 +909,11 @@ cluster_dc_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_text(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
@@ -904,11 +932,11 @@ cluster_dc_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
@@ -937,11 +965,11 @@ static int
cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
unsigned long long flags = va_arg(args, unsigned long long);
- if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(flags, pcmk_sched_in_maintenance)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n");
return pcmk_rc_ok;
- } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(flags, pcmk_sched_stop_all)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will keep all resources stopped\n");
return pcmk_rc_ok;
@@ -950,48 +978,54 @@ cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
}
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_html(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- out->list_item(out, NULL, "STONITH of failed nodes %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ out->list_item(out, NULL, "STONITH of failed nodes enabled");
+ } else {
+ out->list_item(out, NULL, "STONITH of failed nodes disabled");
+ }
- out->list_item(out, NULL, "Cluster is %s",
- pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
+ out->list_item(out, NULL, "Cluster is symmetric");
+ } else {
+ out->list_item(out, NULL, "Cluster is asymmetric");
+ }
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will not attempt to start, stop, or recover services)");
- } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
@@ -1005,50 +1039,56 @@ cluster_options_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_log(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
return out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
- } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
return out->info(out, "Resource management is DISABLED. The cluster has stopped all resources.");
} else {
return pcmk_rc_no_output;
}
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_text(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- out->list_item(out, NULL, "STONITH of failed nodes %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ out->list_item(out, NULL, "STONITH of failed nodes enabled");
+ } else {
+ out->list_item(out, NULL, "STONITH of failed nodes disabled");
+ }
- out->list_item(out, NULL, "Cluster is %s",
- pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
+ out->list_item(out, NULL, "Cluster is symmetric");
+ } else {
+ out->list_item(out, NULL, "Cluster is asymmetric");
+ }
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
@@ -1056,43 +1096,48 @@ cluster_options_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+#define bv(flag) pcmk__btoa(pcmk_is_set(scheduler->flags, (flag)))
+
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_xml(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
const char *no_quorum_policy = NULL;
- char *stonith_timeout_str = pcmk__itoa(data_set->stonith_timeout);
- char *priority_fencing_delay_str = pcmk__itoa(data_set->priority_fencing_delay * 1000);
+ char *stonith_timeout_str = pcmk__itoa(scheduler->stonith_timeout);
+ char *priority_fencing_delay_str = pcmk__itoa(scheduler->priority_fencing_delay * 1000);
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
no_quorum_policy = "freeze";
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
no_quorum_policy = "stop";
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
no_quorum_policy = "demote";
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
no_quorum_policy = "ignore";
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
no_quorum_policy = "suicide";
break;
}
pcmk__output_create_xml_node(out, "cluster_options",
- "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
- "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
+ "stonith-enabled",
+ bv(pcmk_sched_fencing_enabled),
+ "symmetric-cluster",
+ bv(pcmk_sched_symmetric_cluster),
"no-quorum-policy", no_quorum_policy,
- "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
- "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
+ "maintenance-mode",
+ bv(pcmk_sched_in_maintenance),
+ "stop-all-resources", bv(pcmk_sched_stop_all),
"stonith-timeout-ms", stonith_timeout_str,
"priority-fencing-delay-ms", priority_fencing_delay_str,
NULL);
@@ -1288,8 +1333,8 @@ failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op,
pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ",
NULL);
}
- pcmk__g_strcat(str, crm_action_str(task, interval_ms), " on ", node_name,
- NULL);
+ pcmk__g_strcat(str, pcmk__readable_action(task, interval_ms), " on ",
+ node_name, NULL);
if (status == PCMK_EXEC_DONE) {
pcmk__g_strcat(str, " returned '", services_ocf_exitcode_str(rc), "'",
@@ -1496,11 +1541,11 @@ failed_action_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("failed-action-list", "pe_working_set_t *", "GList *",
+PCMK__OUTPUT_ARGS("failed-action-list", "pcmk_scheduler_t *", "GList *",
"GList *", "uint32_t", "bool")
static int
failed_action_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -1509,11 +1554,11 @@ failed_action_list(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = NULL;
int rc = pcmk_rc_no_output;
- if (xmlChildElementCount(data_set->failed) == 0) {
+ if (xmlChildElementCount(scheduler->failed) == 0) {
return rc;
}
- for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
+ for (xml_op = pcmk__xml_first_child(scheduler->failed); xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
char *rsc = NULL;
@@ -1546,7 +1591,7 @@ failed_action_list(pcmk__output_t *out, va_list args) {
}
static void
-status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
+status_node(pcmk_node_t *node, xmlNodePtr parent, uint32_t show_opts)
{
int health = pe__node_health(node);
@@ -1598,11 +1643,11 @@ status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
}
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool",
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool",
"GList *", "GList *")
static int
node_html(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1641,7 +1686,7 @@ node_html(pcmk__output_t *out, va_list args) {
status_node(node, item_node, show_opts);
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc2->data;
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources");
show_opts |= pcmk_show_rsc_only;
@@ -1679,7 +1724,7 @@ node_html(pcmk__output_t *out, va_list args) {
* \return String representation of node's status
*/
static const char *
-node_text_status(const pe_node_t *node)
+node_text_status(const pcmk_node_t *node)
{
if (node->details->unclean) {
if (node->details->online) {
@@ -1723,10 +1768,11 @@ node_text_status(const pe_node_t *node)
return "OFFLINE";
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *",
+ "GList *")
static int
node_text(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1784,7 +1830,7 @@ node_text(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "Resources");
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter2->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
@@ -1809,10 +1855,11 @@ node_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *",
+ "GList *")
static int
node_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1826,10 +1873,10 @@ node_xml(pcmk__output_t *out, va_list args) {
const char *feature_set;
switch (node->details->type) {
- case node_member:
+ case pcmk_node_variant_cluster:
node_type = "member";
break;
- case node_remote:
+ case pcmk_node_variant_remote:
node_type = "remote";
break;
case node_ping:
@@ -1873,7 +1920,7 @@ node_xml(pcmk__output_t *out, va_list args) {
GList *lpc = NULL;
for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
@@ -1959,13 +2006,13 @@ node_attribute_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr")
static int
node_and_op(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
gchar *node_str = NULL;
char *last_change_str = NULL;
@@ -1976,10 +2023,10 @@ node_and_op(pcmk__output_t *out, va_list args) {
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
&status, PCMK_EXEC_UNKNOWN);
- rsc = pe_find_resource(data_set->resources, op_rsc);
+ rsc = pe_find_resource(scheduler->resources, op_rsc);
if (rsc) {
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending;
@@ -2014,13 +2061,13 @@ node_and_op(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr")
static int
node_and_op_xml(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *op_rsc = crm_element_value(xml_op, "resource");
int status;
time_t last_change = 0;
@@ -2036,7 +2083,7 @@ node_and_op_xml(pcmk__output_t *out, va_list args) {
"status", pcmk_exec_status_str(status),
NULL);
- rsc = pe_find_resource(data_set->resources, op_rsc);
+ rsc = pe_find_resource(scheduler->resources, op_rsc);
if (rsc) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -2086,11 +2133,11 @@ node_attribute_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-attribute-list", "pe_working_set_t *", "uint32_t",
+PCMK__OUTPUT_ARGS("node-attribute-list", "pcmk_scheduler_t *", "uint32_t",
"bool", "GList *", "GList *")
static int
node_attribute_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -2099,8 +2146,8 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
/* Display each node's attributes */
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *node = gIter->data;
GList *attr_list = NULL;
GHashTableIter iter;
@@ -2137,7 +2184,7 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
value = pe_node_attribute_raw(node, name);
add_extra = add_extra_info(node, node->details->running_rsc,
- data_set, name, &expected_score);
+ scheduler, name, &expected_score);
/* Print attribute name and value */
out->message(out, "node-attribute", name, value, add_extra,
@@ -2152,11 +2199,11 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *")
static int
node_capacity(pcmk__output_t *out, va_list args)
{
- const pe_node_t *node = va_arg(args, pe_node_t *);
+ const pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *comment = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s capacity:",
@@ -2169,11 +2216,11 @@ node_capacity(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *")
static int
node_capacity_xml(pcmk__output_t *out, va_list args)
{
- const pe_node_t *node = va_arg(args, pe_node_t *);
+ const pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *comment = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity",
@@ -2185,12 +2232,12 @@ node_capacity_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-history-list", "pe_working_set_t *", "pe_node_t *", "xmlNodePtr",
- "GList *", "GList *", "uint32_t", "uint32_t")
+PCMK__OUTPUT_ARGS("node-history-list", "pcmk_scheduler_t *", "pcmk_node_t *",
+ "xmlNodePtr", "GList *", "GList *", "uint32_t", "uint32_t")
static int
node_history_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
xmlNode *node_state = va_arg(args, xmlNode *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -2208,8 +2255,8 @@ node_history_list(pcmk__output_t *out, va_list args) {
for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
- pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id);
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
/* We can't use is_filtered here to filter group resources. For is_filtered,
* we have to decide whether to check the parent or not. If we check the
@@ -2219,7 +2266,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
*
* For other resource types, is_filtered is okay.
*/
- if (parent->variant == pe_group) {
+ if (parent->variant == pcmk_rsc_variant_group) {
if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
pcmk__str_star_matches)
&& !pcmk__str_in_list(rsc_printable_id(parent), only_rsc,
@@ -2234,8 +2281,8 @@ node_history_list(pcmk__output_t *out, va_list args) {
if (!pcmk_is_set(section_opts, pcmk_section_operations)) {
time_t last_failure = 0;
- int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
- NULL);
+ int failcount = pe_get_failcount(node, rsc, &last_failure,
+ pcmk__fc_default, NULL);
if (failcount <= 0) {
continue;
@@ -2251,7 +2298,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
failcount, last_failure, false);
} else {
GList *op_list = get_operation_list(rsc_entry);
- pe_resource_t *rsc = pe_find_resource(data_set->resources,
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources,
crm_element_value(rsc_entry, XML_ATTR_ID));
if (op_list == NULL) {
@@ -2264,7 +2311,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
only_rsc);
}
- out->message(out, "resource-operation-list", data_set, rsc, node,
+ out->message(out, "resource-operation-list", scheduler, rsc, node,
op_list, show_opts);
}
}
@@ -2285,7 +2332,7 @@ node_list_html(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -2320,7 +2367,7 @@ node_list_text(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
if (!pcmk__str_in_list(node->details->uname, only_node,
@@ -2416,7 +2463,7 @@ node_list_xml(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "nodes");
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -2430,11 +2477,11 @@ node_list_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-summary", "pe_working_set_t *", "GList *", "GList *",
+PCMK__OUTPUT_ARGS("node-summary", "pcmk_scheduler_t *", "GList *", "GList *",
"uint32_t", "uint32_t", "bool")
static int
node_summary(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t section_opts = va_arg(args, uint32_t);
@@ -2442,7 +2489,7 @@ node_summary(pcmk__output_t *out, va_list args) {
bool print_spacer = va_arg(args, int);
xmlNode *node_state = NULL;
- xmlNode *cib_status = pcmk_find_cib_element(data_set->input,
+ xmlNode *cib_status = pcmk_find_cib_element(scheduler->input,
XML_CIB_TAG_STATUS);
int rc = pcmk_rc_no_output;
@@ -2452,7 +2499,7 @@ node_summary(pcmk__output_t *out, va_list args) {
for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE);
node_state != NULL; node_state = crm_next_same_xml(node_state)) {
- pe_node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
+ pcmk_node_t *node = pe_find_node_id(scheduler->nodes, ID(node_state));
if (!node || !node->details || !node->details->online) {
continue;
@@ -2466,7 +2513,7 @@ node_summary(pcmk__output_t *out, va_list args) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc,
pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary");
- out->message(out, "node-history-list", data_set, node, node_state,
+ out->message(out, "node-history-list", scheduler, node, node_state,
only_node, only_rsc, section_opts, show_opts);
}
@@ -2474,12 +2521,12 @@ node_summary(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *",
"const char *", "const char *")
static int
node_weight(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
@@ -2494,12 +2541,12 @@ node_weight(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *",
"const char *", "const char *")
static int
node_weight_xml(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
@@ -2587,12 +2634,13 @@ op_history_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
promotion_score(pcmk__output_t *out, va_list args)
{
- pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
- pe_node_t *chosen = va_arg(args, pe_node_t *);
+ pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *chosen = va_arg(args, pcmk_node_t *);
const char *score = va_arg(args, const char *);
out->list_item(out, NULL, "%s promotion score on %s: %s",
@@ -2602,12 +2650,13 @@ promotion_score(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
promotion_score_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
- pe_node_t *chosen = va_arg(args, pe_node_t *);
+ pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *chosen = va_arg(args, pcmk_node_t *);
const char *score = va_arg(args, const char *);
xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score",
@@ -2622,10 +2671,10 @@ promotion_score_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool")
static int
resource_config(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
@@ -2636,10 +2685,10 @@ resource_config(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool")
static int
resource_config_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
@@ -2651,10 +2700,11 @@ resource_config_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *",
+ "bool", "int", "time_t", "bool")
static int
resource_history_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
@@ -2673,10 +2723,11 @@ resource_history_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *",
+ "bool", "int", "time_t", "bool")
static int
resource_history_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
@@ -2733,12 +2784,12 @@ print_resource_header(pcmk__output_t *out, uint32_t show_opts)
}
-PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "uint32_t", "bool",
+PCMK__OUTPUT_ARGS("resource-list", "pcmk_scheduler_t *", "uint32_t", "bool",
"GList *", "GList *", "bool")
static int
resource_list(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_summary = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -2759,8 +2810,9 @@ resource_list(pcmk__output_t *out, va_list args)
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
- if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
- GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
+ if (pcmk_is_set(show_opts, pcmk_show_brief)
+ && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ GList *rscs = pe__filter_rsc_list(scheduler->resources, only_rsc);
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
print_resource_header(out, show_opts);
@@ -2771,8 +2823,8 @@ resource_list(pcmk__output_t *out, va_list args)
}
/* For each resource, display it if appropriate */
- for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
+ for (rsc_iter = scheduler->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) rsc_iter->data;
int x;
/* Complex resources may have some sub-resources active and some inactive */
@@ -2780,7 +2832,7 @@ resource_list(pcmk__output_t *out, va_list args)
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
@@ -2790,7 +2842,8 @@ resource_list(pcmk__output_t *out, va_list args)
}
/* Skip primitives already counted in a brief summary */
- } else if (pcmk_is_set(show_opts, pcmk_show_brief) && (rsc->variant == pe_native)) {
+ } else if (pcmk_is_set(show_opts, pcmk_show_brief)
+ && (rsc->variant == pcmk_rsc_variant_primitive)) {
continue;
/* Skip resources that aren't at least partially active,
@@ -2840,14 +2893,15 @@ resource_list(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("resource-operation-list", "pe_working_set_t *", "pe_resource_t *",
- "pe_node_t *", "GList *", "uint32_t")
+PCMK__OUTPUT_ARGS("resource-operation-list", "pcmk_scheduler_t *",
+ "pcmk_resource_t *", "pcmk_node_t *", "GList *", "uint32_t")
static int
resource_operation_list(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_scheduler_t *scheduler G_GNUC_UNUSED = va_arg(args,
+ pcmk_scheduler_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
GList *op_list = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -2866,7 +2920,7 @@ resource_operation_list(pcmk__output_t *out, va_list args)
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
- if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
@@ -2874,8 +2928,8 @@ resource_operation_list(pcmk__output_t *out, va_list args)
/* If this is the first printed operation, print heading for resource */
if (rc == pcmk_rc_no_output) {
time_t last_failure = 0;
- int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
- NULL);
+ int failcount = pe_get_failcount(node, rsc, &last_failure,
+ pcmk__fc_default, NULL);
out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true,
failcount, last_failure, true);
@@ -2894,12 +2948,13 @@ resource_operation_list(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
resource_util(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *fn = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
@@ -2912,12 +2967,13 @@ resource_util(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
resource_util_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *fn = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization",
@@ -2930,10 +2986,10 @@ resource_util_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_html(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
if (ticket->last_granted > -1) {
char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
@@ -2952,10 +3008,10 @@ ticket_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_text(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
if (ticket->last_granted > -1) {
char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
@@ -2974,10 +3030,10 @@ ticket_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_xml(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
xmlNodePtr node = NULL;
@@ -2997,16 +3053,16 @@ ticket_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket-list", "pe_working_set_t *", "bool")
+PCMK__OUTPUT_ARGS("ticket-list", "pcmk_scheduler_t *", "bool")
static int
ticket_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
bool print_spacer = va_arg(args, int);
GHashTableIter iter;
gpointer key, value;
- if (g_hash_table_size(data_set->tickets) == 0) {
+ if (g_hash_table_size(scheduler->tickets) == 0) {
return pcmk_rc_no_output;
}
@@ -3016,9 +3072,9 @@ ticket_list(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "Tickets");
/* Print each ticket */
- g_hash_table_iter_init(&iter, data_set->tickets);
+ g_hash_table_iter_init(&iter, scheduler->tickets);
while (g_hash_table_iter_next(&iter, &key, &value)) {
- pe_ticket_t *ticket = (pe_ticket_t *) value;
+ pcmk_ticket_t *ticket = (pcmk_ticket_t *) value;
out->message(out, "ticket", ticket);
}
diff --git a/lib/pengine/pe_status_private.h b/lib/pengine/pe_status_private.h
index ae8d131..bb0ee4e 100644
--- a/lib/pengine/pe_status_private.h
+++ b/lib/pengine/pe_status_private.h
@@ -19,6 +19,11 @@
#define G_GNUC_INTERNAL
#endif
+#include <glib.h> // GSList, GList, GHashTable
+#include <libxml/tree.h> // xmlNode
+
+#include <crm/pengine/status.h> // pcmk_action_t, pcmk_resource_t, etc.
+
/*!
* \internal
* \deprecated This macro will be removed in a future release
@@ -43,10 +48,10 @@ typedef struct notify_data_s {
const char *action;
- pe_action_t *pre;
- pe_action_t *post;
- pe_action_t *pre_done;
- pe_action_t *post_done;
+ pcmk_action_t *pre;
+ pcmk_action_t *post;
+ pcmk_action_t *pre_done;
+ pcmk_action_t *post_done;
GList *active; /* notify_entry_t* */
GList *inactive; /* notify_entry_t* */
@@ -60,62 +65,86 @@ typedef struct notify_data_s {
} notify_data_t;
G_GNUC_INTERNAL
-pe_resource_t *pe__create_clone_child(pe_resource_t *rsc,
- pe_working_set_t *data_set);
+pcmk_resource_t *pe__create_clone_child(pcmk_resource_t *rsc,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data);
+void pe__create_action_notifications(pcmk_resource_t *rsc,
+ notify_data_t *n_data);
G_GNUC_INTERNAL
void pe__free_action_notification_data(notify_data_t *n_data);
G_GNUC_INTERNAL
-notify_data_t *pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
- pe_action_t *action,
- pe_action_t *complete);
+notify_data_t *pe__action_notif_pseudo_ops(pcmk_resource_t *rsc,
+ const char *task,
+ pcmk_action_t *action,
+ pcmk_action_t *complete);
G_GNUC_INTERNAL
-void pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
- pe_working_set_t *data_set);
+void pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gint pe__cmp_rsc_priority(gconstpointer a, gconstpointer b);
G_GNUC_INTERNAL
-gboolean pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
- pe_resource_t *parent, pe_working_set_t *data_set);
+gboolean pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
+ pcmk_resource_t *parent,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_remote_nodes(xmlNode *xml_resources, pe_working_set_t *data_set);
+gboolean unpack_remote_nodes(xmlNode *xml_resources,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_resources(const xmlNode *xml_resources,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_config(xmlNode *config, pe_working_set_t *data_set);
+gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_nodes(xmlNode *xml_nodes, pe_working_set_t *data_set);
+gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_tags(xmlNode *xml_tags, pe_working_set_t *data_set);
+gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_status(xmlNode *status, pe_working_set_t *data_set);
+gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-op_digest_cache_t *pe__compare_fencing_digest(pe_resource_t *rsc,
+op_digest_cache_t *pe__compare_fencing_digest(pcmk_resource_t *rsc,
const char *agent,
- pe_node_t *node,
- pe_working_set_t *data_set);
+ pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler);
+
+G_GNUC_INTERNAL
+void pe__unpack_node_health_scores(pcmk_scheduler_t *scheduler);
+
+// Primitive resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__primitive_max_per_node(const pcmk_resource_t *rsc);
+
+// Group resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__group_max_per_node(const pcmk_resource_t *rsc);
+
+// Clone resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__clone_max_per_node(const pcmk_resource_t *rsc);
+
+// Bundle resource methods
G_GNUC_INTERNAL
-void pe__unpack_node_health_scores(pe_working_set_t *data_set);
+pcmk_node_t *pe__bundle_active_node(const pcmk_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
G_GNUC_INTERNAL
-pe_node_t *pe__bundle_active_node(const pe_resource_t *rsc,
- unsigned int *count_all,
- unsigned int *count_clean);
+unsigned int pe__bundle_max_per_node(const pcmk_resource_t *rsc);
#endif // PE_STATUS_PRIVATE__H
diff --git a/lib/pengine/remote.c b/lib/pengine/remote.c
index 769635f..6b5058c 100644
--- a/lib/pengine/remote.c
+++ b/lib/pengine/remote.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2013-2022 the Pacemaker project contributors
+ * Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,41 +10,41 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
#include <glib.h>
bool
-pe__resource_is_remote_conn(const pe_resource_t *rsc,
- const pe_working_set_t *data_set)
+pe__resource_is_remote_conn(const pcmk_resource_t *rsc)
{
return (rsc != NULL) && rsc->is_remote_node
- && pe__is_remote_node(pe_find_node(data_set->nodes, rsc->id));
+ && pe__is_remote_node(pe_find_node(rsc->cluster->nodes, rsc->id));
}
bool
-pe__is_remote_node(const pe_node_t *node)
+pe__is_remote_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote)
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote)
&& ((node->details->remote_rsc == NULL)
|| (node->details->remote_rsc->container == NULL));
}
bool
-pe__is_guest_node(const pe_node_t *node)
+pe__is_guest_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote)
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote)
&& (node->details->remote_rsc != NULL)
&& (node->details->remote_rsc->container != NULL);
}
bool
-pe__is_guest_or_remote_node(const pe_node_t *node)
+pe__is_guest_or_remote_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote);
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote);
}
bool
-pe__is_bundle_node(const pe_node_t *node)
+pe__is_bundle_node(const pcmk_node_t *node)
{
return pe__is_guest_node(node)
&& pe_rsc_is_bundled(node->details->remote_rsc);
@@ -57,20 +57,20 @@ pe__is_bundle_node(const pe_node_t *node)
* If a given resource contains a filler resource that is a remote connection,
* return that filler resource (or NULL if none is found).
*
- * \param[in] data_set Working set of cluster
- * \param[in] rsc Resource to check
+ * \param[in] scheduler Scheduler data
+ * \param[in] rsc Resource to check
*
* \return Filler resource with remote connection, or NULL if none found
*/
-pe_resource_t *
-pe__resource_contains_guest_node(const pe_working_set_t *data_set,
- const pe_resource_t *rsc)
+pcmk_resource_t *
+pe__resource_contains_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_resource_t *rsc)
{
- if ((rsc != NULL) && (data_set != NULL)
- && pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ if ((rsc != NULL) && (scheduler != NULL)
+ && pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
for (GList *gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *filler = gIter->data;
+ pcmk_resource_t *filler = gIter->data;
if (filler->is_remote_node) {
return filler;
@@ -111,26 +111,28 @@ xml_contains_remote_node(xmlNode *xml)
* \internal
* \brief Execute a supplied function for each guest node running on a host
*
- * \param[in] data_set Working set for cluster
+ * \param[in] scheduler Scheduler data
* \param[in] host Host node to check
* \param[in] helper Function to call for each guest node
* \param[in,out] user_data Pointer to pass to helper function
*/
void
-pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
- void (*helper)(const pe_node_t*, void*), void *user_data)
+pe_foreach_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_node_t *host,
+ void (*helper)(const pcmk_node_t*, void*),
+ void *user_data)
{
GList *iter;
- CRM_CHECK(data_set && host && host->details && helper, return);
- if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ CRM_CHECK(scheduler && host && host->details && helper, return);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
return;
}
for (iter = host->details->running_rsc; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->is_remote_node && (rsc->container != NULL)) {
- pe_node_t *guest_node = pe_find_node(data_set->nodes, rsc->id);
+ pcmk_node_t *guest_node = pe_find_node(scheduler->nodes, rsc->id);
if (guest_node) {
(*helper)(guest_node, user_data);
@@ -203,29 +205,30 @@ pe_create_remote_xml(xmlNode *parent, const char *uname,
// Add operations
xml_sub = create_xml_node(remote, "operations");
- crm_create_op_xml(xml_sub, uname, "monitor", "30s", "30s");
+ crm_create_op_xml(xml_sub, uname, PCMK_ACTION_MONITOR, "30s", "30s");
if (start_timeout) {
- crm_create_op_xml(xml_sub, uname, "start", "0", start_timeout);
+ crm_create_op_xml(xml_sub, uname, PCMK_ACTION_START, "0",
+ start_timeout);
}
return remote;
}
// History entry to be checked for fail count clearing
struct check_op {
- const xmlNode *rsc_op; // History entry XML
- pe_resource_t *rsc; // Known resource corresponding to history entry
- pe_node_t *node; // Known node corresponding to history entry
- enum pe_check_parameters check_type; // What needs checking
+ const xmlNode *rsc_op; // History entry XML
+ pcmk_resource_t *rsc; // Known resource corresponding to history entry
+ pcmk_node_t *node; // Known node corresponding to history entry
+ enum pcmk__check_parameters check_type; // What needs checking
};
void
-pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
- pe_node_t *node, enum pe_check_parameters flag,
- pe_working_set_t *data_set)
+pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
+ pcmk_node_t *node, enum pcmk__check_parameters flag,
+ pcmk_scheduler_t *scheduler)
{
struct check_op *check_op = NULL;
- CRM_CHECK(data_set && rsc_op && rsc && node, return);
+ CRM_CHECK(scheduler && rsc_op && rsc && node, return);
check_op = calloc(1, sizeof(struct check_op));
CRM_ASSERT(check_op != NULL);
@@ -235,24 +238,25 @@ pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
check_op->rsc = rsc;
check_op->node = node;
check_op->check_type = flag;
- data_set->param_check = g_list_prepend(data_set->param_check, check_op);
+ scheduler->param_check = g_list_prepend(scheduler->param_check, check_op);
}
/*!
* \internal
* \brief Call a function for each action to be checked for addr substitution
*
- * \param[in,out] data_set Working set for cluster
- * \param[in] cb Function to be called
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] cb Function to be called
*/
void
-pe__foreach_param_check(pe_working_set_t *data_set,
- void (*cb)(pe_resource_t*, pe_node_t*, const xmlNode*,
- enum pe_check_parameters))
+pe__foreach_param_check(pcmk_scheduler_t *scheduler,
+ void (*cb)(pcmk_resource_t*, pcmk_node_t*,
+ const xmlNode*, enum pcmk__check_parameters))
{
- CRM_CHECK(data_set && cb, return);
+ CRM_CHECK(scheduler && cb, return);
- for (GList *item = data_set->param_check; item != NULL; item = item->next) {
+ for (GList *item = scheduler->param_check;
+ item != NULL; item = item->next) {
struct check_op *check_op = item->data;
cb(check_op->rsc, check_op->node, check_op->rsc_op,
@@ -261,10 +265,10 @@ pe__foreach_param_check(pe_working_set_t *data_set,
}
void
-pe__free_param_checks(pe_working_set_t *data_set)
+pe__free_param_checks(pcmk_scheduler_t *scheduler)
{
- if (data_set && data_set->param_check) {
- g_list_free_full(data_set->param_check, free);
- data_set->param_check = NULL;
+ if (scheduler && scheduler->param_check) {
+ g_list_free_full(scheduler->param_check, free);
+ scheduler->param_check = NULL;
}
}
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 7021d3c..50f9f64 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -41,7 +41,7 @@ pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -104,25 +104,23 @@ pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
enum expression_type
find_expression_type(xmlNode * expr)
{
- const char *tag = NULL;
const char *attr = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
- tag = crm_element_name(expr);
- if (pcmk__str_eq(tag, PCMK_XE_DATE_EXPRESSION, pcmk__str_none)) {
+ if (pcmk__xe_is(expr, PCMK_XE_DATE_EXPRESSION)) {
return time_expr;
- } else if (pcmk__str_eq(tag, PCMK_XE_RSC_EXPRESSION, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, PCMK_XE_RSC_EXPRESSION)) {
return rsc_expr;
- } else if (pcmk__str_eq(tag, PCMK_XE_OP_EXPRESSION, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, PCMK_XE_OP_EXPRESSION)) {
return op_expr;
- } else if (pcmk__str_eq(tag, XML_TAG_RULE, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, XML_TAG_RULE)) {
return nested_rule;
- } else if (!pcmk__str_eq(tag, XML_TAG_EXPRESSION, pcmk__str_none)) {
+ } else if (!pcmk__xe_is(expr, XML_TAG_EXPRESSION)) {
return not_expr;
} else if (pcmk__str_any_of(attr, CRM_ATTR_UNAME, CRM_ATTR_KIND, CRM_ATTR_ID, NULL)) {
@@ -320,6 +318,7 @@ typedef struct sorted_set_s {
const char *name; // This block's ID
const char *special_name; // ID that should sort first
xmlNode *attr_set; // This block
+ gboolean overwrite; // Whether existing values will be overwritten
} sorted_set_t;
static gint
@@ -343,10 +342,14 @@ sort_pairs(gconstpointer a, gconstpointer b)
return 1;
}
+ /* If we're overwriting values, we want lowest score first, so the highest
+ * score is processed last; if we're not overwriting values, we want highest
+ * score first, so nothing else overwrites it.
+ */
if (pair_a->score < pair_b->score) {
- return 1;
+ return pair_a->overwrite? -1 : 1;
} else if (pair_a->score > pair_b->score) {
- return -1;
+ return pair_a->overwrite? 1 : -1;
}
return 0;
}
@@ -360,8 +363,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN
xmlNode *list = nvpair_list;
xmlNode *an_attr = NULL;
- name = crm_element_name(list->children);
- if (pcmk__str_eq(XML_TAG_ATTRS, name, pcmk__str_casei)) {
+ if (pcmk__xe_is(list->children, XML_TAG_ATTRS)) {
list = list->children;
}
@@ -446,7 +448,7 @@ unpack_attr_set(gpointer data, gpointer user_data)
*/
static GList *
make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
- const char *always_first)
+ const char *always_first, gboolean overwrite)
{
GList *unsorted = NULL;
@@ -471,6 +473,7 @@ make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
pair->name = ID(expanded_attr_set);
pair->special_name = always_first;
pair->attr_set = expanded_attr_set;
+ pair->overwrite = overwrite;
score = crm_element_value(expanded_attr_set, XML_RULE_ATTR_SCORE);
pair->score = char2score(score);
@@ -499,7 +502,7 @@ pe_eval_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
const char *always_first, gboolean overwrite,
crm_time_t *next_change)
{
- GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
+ GList *pairs = make_pairs(top, xml_obj, set_name, always_first, overwrite);
if (pairs) {
unpack_data_t data = {
@@ -536,7 +539,7 @@ pe_unpack_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -1161,7 +1164,7 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
const char *op = NULL;
const char *value = NULL;
- if (rule_data->role == RSC_ROLE_UNKNOWN) {
+ if (rule_data->role == pcmk_role_unknown) {
return accept;
}
@@ -1169,13 +1172,13 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
- if (rule_data->role > RSC_ROLE_STARTED) {
+ if (rule_data->role > pcmk_role_started) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
- if ((rule_data->role > RSC_ROLE_UNKNOWN)
- && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ if ((rule_data->role > pcmk_role_unknown)
+ && (rule_data->role < pcmk_role_unpromoted)) {
accept = TRUE;
}
@@ -1186,8 +1189,8 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
// Test "ne" only with promotable clone roles
- if ((rule_data->role > RSC_ROLE_UNKNOWN)
- && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ if ((rule_data->role > pcmk_role_unknown)
+ && (rule_data->role < pcmk_role_unpromoted)) {
accept = FALSE;
} else if (text2role(value) != rule_data->role) {
@@ -1301,7 +1304,7 @@ unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
diff --git a/lib/pengine/rules_alerts.c b/lib/pengine/rules_alerts.c
index 073b0c1..9eed7ff 100644
--- a/lib/pengine/rules_alerts.c
+++ b/lib/pengine/rules_alerts.c
@@ -123,21 +123,16 @@ unpack_alert_filter(xmlNode *basenode, pcmk__alert_t *entry)
for (event_type = pcmk__xe_first_child(select); event_type != NULL;
event_type = pcmk__xe_next(event_type)) {
- const char *tagname = crm_element_name(event_type);
-
- if (tagname == NULL) {
- continue;
-
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_FENCING)) {
+ if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_FENCING)) {
flags |= pcmk__alert_fencing;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_NODES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_NODES)) {
flags |= pcmk__alert_node;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_RESOURCES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_RESOURCES)) {
flags |= pcmk__alert_resource;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_ATTRIBUTES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_ATTRIBUTES)) {
xmlNode *attr;
const char *attr_name;
int nattrs = 0;
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index b1144eb..e6ec237 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -21,38 +21,38 @@
#include <pe_status_private.h>
/*!
- * \brief Create a new working set
+ * \brief Create a new object to hold scheduler data
*
- * \return New, initialized working set on success, else NULL (and set errno)
- * \note Only pe_working_set_t objects created with this function (as opposed
+ * \return New, initialized scheduler data on success, else NULL (and set errno)
+ * \note Only pcmk_scheduler_t objects created with this function (as opposed
* to statically declared or directly allocated) should be used with the
* functions in this library, to allow for future extensions to the
* data type. The caller is responsible for freeing the memory with
* pe_free_working_set() when the instance is no longer needed.
*/
-pe_working_set_t *
+pcmk_scheduler_t *
pe_new_working_set(void)
{
- pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+ pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
- if (data_set != NULL) {
- set_working_set_defaults(data_set);
+ if (scheduler != NULL) {
+ set_working_set_defaults(scheduler);
}
- return data_set;
+ return scheduler;
}
/*!
- * \brief Free a working set
+ * \brief Free scheduler data
*
- * \param[in,out] data_set Working set to free
+ * \param[in,out] scheduler Scheduler data to free
*/
void
-pe_free_working_set(pe_working_set_t *data_set)
+pe_free_working_set(pcmk_scheduler_t *scheduler)
{
- if (data_set != NULL) {
- pe_reset_working_set(data_set);
- data_set->priv = NULL;
- free(data_set);
+ if (scheduler != NULL) {
+ pe_reset_working_set(scheduler);
+ scheduler->priv = NULL;
+ free(scheduler);
}
}
@@ -68,105 +68,105 @@ pe_free_working_set(pe_working_set_t *data_set)
* - A list of the possible stop/start actions (without dependencies)
*/
gboolean
-cluster_status(pe_working_set_t * data_set)
+cluster_status(pcmk_scheduler_t * scheduler)
{
xmlNode *section = NULL;
- if ((data_set == NULL) || (data_set->input == NULL)) {
+ if ((scheduler == NULL) || (scheduler->input == NULL)) {
return FALSE;
}
crm_trace("Beginning unpack");
- if (data_set->failed != NULL) {
- free_xml(data_set->failed);
+ if (scheduler->failed != NULL) {
+ free_xml(scheduler->failed);
}
- data_set->failed = create_xml_node(NULL, "failed-ops");
+ scheduler->failed = create_xml_node(NULL, "failed-ops");
- if (data_set->now == NULL) {
- data_set->now = crm_time_new(NULL);
+ if (scheduler->now == NULL) {
+ scheduler->now = crm_time_new(NULL);
}
- if (data_set->dc_uuid == NULL) {
- data_set->dc_uuid = crm_element_value_copy(data_set->input,
- XML_ATTR_DC_UUID);
+ if (scheduler->dc_uuid == NULL) {
+ scheduler->dc_uuid = crm_element_value_copy(scheduler->input,
+ XML_ATTR_DC_UUID);
}
- if (pcmk__xe_attr_is_true(data_set->input, XML_ATTR_HAVE_QUORUM)) {
- pe__set_working_set_flags(data_set, pe_flag_have_quorum);
+ if (pcmk__xe_attr_is_true(scheduler->input, XML_ATTR_HAVE_QUORUM)) {
+ pe__set_working_set_flags(scheduler, pcmk_sched_quorate);
} else {
- pe__clear_working_set_flags(data_set, pe_flag_have_quorum);
+ pe__clear_working_set_flags(scheduler, pcmk_sched_quorate);
}
- data_set->op_defaults = get_xpath_object("//" XML_CIB_TAG_OPCONFIG,
- data_set->input, LOG_NEVER);
- data_set->rsc_defaults = get_xpath_object("//" XML_CIB_TAG_RSCCONFIG,
- data_set->input, LOG_NEVER);
+ scheduler->op_defaults = get_xpath_object("//" XML_CIB_TAG_OPCONFIG,
+ scheduler->input, LOG_NEVER);
+ scheduler->rsc_defaults = get_xpath_object("//" XML_CIB_TAG_RSCCONFIG,
+ scheduler->input, LOG_NEVER);
- section = get_xpath_object("//" XML_CIB_TAG_CRMCONFIG, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_CRMCONFIG, scheduler->input,
LOG_TRACE);
- unpack_config(section, data_set);
+ unpack_config(section, scheduler);
- if (!pcmk_any_flags_set(data_set->flags,
- pe_flag_quick_location|pe_flag_have_quorum)
- && (data_set->no_quorum_policy != no_quorum_ignore)) {
+ if (!pcmk_any_flags_set(scheduler->flags,
+ pcmk_sched_location_only|pcmk_sched_quorate)
+ && (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) {
crm_warn("Fencing and resource management disabled due to lack of quorum");
}
- section = get_xpath_object("//" XML_CIB_TAG_NODES, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_NODES, scheduler->input,
LOG_TRACE);
- unpack_nodes(section, data_set);
+ unpack_nodes(section, scheduler);
- section = get_xpath_object("//" XML_CIB_TAG_RESOURCES, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_RESOURCES, scheduler->input,
LOG_TRACE);
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
- unpack_remote_nodes(section, data_set);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
+ unpack_remote_nodes(section, scheduler);
}
- unpack_resources(section, data_set);
+ unpack_resources(section, scheduler);
- section = get_xpath_object("//" XML_CIB_TAG_TAGS, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_TAGS, scheduler->input,
LOG_NEVER);
- unpack_tags(section, data_set);
+ unpack_tags(section, scheduler);
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
- section = get_xpath_object("//"XML_CIB_TAG_STATUS, data_set->input,
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
+ section = get_xpath_object("//"XML_CIB_TAG_STATUS, scheduler->input,
LOG_TRACE);
- unpack_status(section, data_set);
+ unpack_status(section, scheduler);
}
- if (!pcmk_is_set(data_set->flags, pe_flag_no_counts)) {
- for (GList *item = data_set->resources; item != NULL;
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_no_counts)) {
+ for (GList *item = scheduler->resources; item != NULL;
item = item->next) {
- ((pe_resource_t *) (item->data))->fns->count(item->data);
+ ((pcmk_resource_t *) (item->data))->fns->count(item->data);
}
crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
- pe__set_working_set_flags(data_set, pe_flag_have_status);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_status);
return TRUE;
}
/*!
* \internal
- * \brief Free a list of pe_resource_t
+ * \brief Free a list of pcmk_resource_t
*
* \param[in,out] resources List to free
*
- * \note When a working set's resource list is freed, that includes the original
+ * \note When the scheduler's resource list is freed, that includes the original
* storage for the uname and id of any Pacemaker Remote nodes in the
- * working set's node list, so take care not to use those afterward.
- * \todo Refactor pe_node_t to strdup() the node name.
+ * scheduler's node list, so take care not to use those afterward.
+ * \todo Refactor pcmk_node_t to strdup() the node name.
*/
static void
pe_free_resources(GList *resources)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
GList *iterator = resources;
while (iterator != NULL) {
- rsc = (pe_resource_t *) iterator->data;
+ rsc = (pcmk_resource_t *) iterator->data;
iterator = iterator->next;
rsc->fns->free(rsc);
}
@@ -193,7 +193,7 @@ static void
pe_free_nodes(GList *nodes)
{
for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
- pe_node_t *node = (pe_node_t *) iterator->data;
+ pcmk_node_t *node = (pcmk_node_t *) iterator->data;
// Shouldn't be possible, but to be safe ...
if (node == NULL) {
@@ -268,140 +268,140 @@ pe__free_location(GList *constraints)
}
/*!
- * \brief Reset working set to default state without freeing it or constraints
+ * \brief Reset scheduler data to defaults without freeing it or constraints
*
- * \param[in,out] data_set Working set to reset
+ * \param[in,out] scheduler Scheduler data to reset
*
* \deprecated This function is deprecated as part of the API;
* pe_reset_working_set() should be used instead.
*/
void
-cleanup_calculations(pe_working_set_t * data_set)
+cleanup_calculations(pcmk_scheduler_t *scheduler)
{
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return;
}
- pe__clear_working_set_flags(data_set, pe_flag_have_status);
- if (data_set->config_hash != NULL) {
- g_hash_table_destroy(data_set->config_hash);
+ pe__clear_working_set_flags(scheduler, pcmk_sched_have_status);
+ if (scheduler->config_hash != NULL) {
+ g_hash_table_destroy(scheduler->config_hash);
}
- if (data_set->singletons != NULL) {
- g_hash_table_destroy(data_set->singletons);
+ if (scheduler->singletons != NULL) {
+ g_hash_table_destroy(scheduler->singletons);
}
- if (data_set->tickets) {
- g_hash_table_destroy(data_set->tickets);
+ if (scheduler->tickets) {
+ g_hash_table_destroy(scheduler->tickets);
}
- if (data_set->template_rsc_sets) {
- g_hash_table_destroy(data_set->template_rsc_sets);
+ if (scheduler->template_rsc_sets) {
+ g_hash_table_destroy(scheduler->template_rsc_sets);
}
- if (data_set->tags) {
- g_hash_table_destroy(data_set->tags);
+ if (scheduler->tags) {
+ g_hash_table_destroy(scheduler->tags);
}
- free(data_set->dc_uuid);
+ free(scheduler->dc_uuid);
crm_trace("deleting resources");
- pe_free_resources(data_set->resources);
+ pe_free_resources(scheduler->resources);
crm_trace("deleting actions");
- pe_free_actions(data_set->actions);
+ pe_free_actions(scheduler->actions);
crm_trace("deleting nodes");
- pe_free_nodes(data_set->nodes);
+ pe_free_nodes(scheduler->nodes);
- pe__free_param_checks(data_set);
- g_list_free(data_set->stop_needed);
- free_xml(data_set->graph);
- crm_time_free(data_set->now);
- free_xml(data_set->input);
- free_xml(data_set->failed);
+ pe__free_param_checks(scheduler);
+ g_list_free(scheduler->stop_needed);
+ free_xml(scheduler->graph);
+ crm_time_free(scheduler->now);
+ free_xml(scheduler->input);
+ free_xml(scheduler->failed);
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
- CRM_CHECK(data_set->ordering_constraints == NULL,;
+ CRM_CHECK(scheduler->ordering_constraints == NULL,;
);
- CRM_CHECK(data_set->placement_constraints == NULL,;
+ CRM_CHECK(scheduler->placement_constraints == NULL,;
);
}
/*!
- * \brief Reset a working set to default state without freeing it
+ * \brief Reset scheduler data to default state without freeing it
*
- * \param[in,out] data_set Working set to reset
+ * \param[in,out] scheduler Scheduler data to reset
*/
void
-pe_reset_working_set(pe_working_set_t *data_set)
+pe_reset_working_set(pcmk_scheduler_t *scheduler)
{
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return;
}
crm_trace("Deleting %d ordering constraints",
- g_list_length(data_set->ordering_constraints));
- pe__free_ordering(data_set->ordering_constraints);
- data_set->ordering_constraints = NULL;
+ g_list_length(scheduler->ordering_constraints));
+ pe__free_ordering(scheduler->ordering_constraints);
+ scheduler->ordering_constraints = NULL;
crm_trace("Deleting %d location constraints",
- g_list_length(data_set->placement_constraints));
- pe__free_location(data_set->placement_constraints);
- data_set->placement_constraints = NULL;
+ g_list_length(scheduler->placement_constraints));
+ pe__free_location(scheduler->placement_constraints);
+ scheduler->placement_constraints = NULL;
crm_trace("Deleting %d colocation constraints",
- g_list_length(data_set->colocation_constraints));
- g_list_free_full(data_set->colocation_constraints, free);
- data_set->colocation_constraints = NULL;
+ g_list_length(scheduler->colocation_constraints));
+ g_list_free_full(scheduler->colocation_constraints, free);
+ scheduler->colocation_constraints = NULL;
crm_trace("Deleting %d ticket constraints",
- g_list_length(data_set->ticket_constraints));
- g_list_free_full(data_set->ticket_constraints, free);
- data_set->ticket_constraints = NULL;
+ g_list_length(scheduler->ticket_constraints));
+ g_list_free_full(scheduler->ticket_constraints, free);
+ scheduler->ticket_constraints = NULL;
- cleanup_calculations(data_set);
+ cleanup_calculations(scheduler);
}
void
-set_working_set_defaults(pe_working_set_t * data_set)
+set_working_set_defaults(pcmk_scheduler_t *scheduler)
{
- void *priv = data_set->priv;
+ void *priv = scheduler->priv;
- memset(data_set, 0, sizeof(pe_working_set_t));
+ memset(scheduler, 0, sizeof(pcmk_scheduler_t));
- data_set->priv = priv;
- data_set->order_id = 1;
- data_set->action_id = 1;
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->priv = priv;
+ scheduler->order_id = 1;
+ scheduler->action_id = 1;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
- data_set->flags = 0x0ULL;
+ scheduler->flags = 0x0ULL;
- pe__set_working_set_flags(data_set,
- pe_flag_stop_rsc_orphans
- |pe_flag_symmetric_cluster
- |pe_flag_stop_action_orphans);
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_symmetric_cluster
+ |pcmk_sched_stop_removed_resources
+ |pcmk_sched_cancel_removed_actions);
if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
- pe__set_working_set_flags(data_set, pe_flag_concurrent_fencing);
+ pe__set_working_set_flags(scheduler, pcmk_sched_concurrent_fencing);
}
}
-pe_resource_t *
+pcmk_resource_t *
pe_find_resource(GList *rsc_list, const char *id)
{
- return pe_find_resource_with_flags(rsc_list, id, pe_find_renamed);
+ return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history);
}
-pe_resource_t *
+pcmk_resource_t *
pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
{
GList *rIter = NULL;
for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
- pe_resource_t *parent = rIter->data;
+ pcmk_resource_t *parent = rIter->data;
- pe_resource_t *match =
+ pcmk_resource_t *match =
parent->fns->find_rsc(parent, id, NULL, flags);
if (match != NULL) {
return match;
@@ -414,7 +414,7 @@ pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
/*!
* \brief Find a node by name or ID in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id If not NULL, ID of node to find
* \param[in] node_name If not NULL, name of node to find
*
@@ -422,10 +422,10 @@ pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
* otherwise node from \p nodes that matches \p uname if any,
* otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node_any(const GList *nodes, const char *id, const char *uname)
{
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
if (id != NULL) {
match = pe_find_node_id(nodes, id);
@@ -439,16 +439,16 @@ pe_find_node_any(const GList *nodes, const char *id, const char *uname)
/*!
* \brief Find a node by ID in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id ID of node to find
*
* \return Node from \p nodes that matches \p id if any, otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node_id(const GList *nodes, const char *id)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* @TODO Whether node IDs should be considered case-sensitive should
* probably depend on the node type, so functionizing the comparison
@@ -464,16 +464,16 @@ pe_find_node_id(const GList *nodes, const char *id)
/*!
* \brief Find a node by name in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] node_name Name of node to find
*
* \return Node from \p nodes that matches \p node_name if any, otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node(const GList *nodes, const char *node_name)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
if (pcmk__str_eq(node->details->uname, node_name, pcmk__str_casei)) {
return node;
diff --git a/lib/pengine/tags.c b/lib/pengine/tags.c
index 81c27e4..d8d8ac9 100644
--- a/lib/pengine/tags.c
+++ b/lib/pengine/tags.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2021 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -13,29 +13,30 @@
#include <stdbool.h>
#include <crm/common/util.h>
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
-#include <crm/pengine/pe_types.h>
GList *
-pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name)
+pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name)
{
gpointer value;
GList *retval = NULL;
- if (data_set->tags == NULL) {
+ if (scheduler->tags == NULL) {
return retval;
}
- value = g_hash_table_lookup(data_set->tags, tag_name);
+ value = g_hash_table_lookup(scheduler->tags, tag_name);
if (value == NULL) {
return retval;
}
- for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ for (GList *refs = ((pcmk_tag_t *) value)->refs; refs; refs = refs->next) {
const char *id = (const char *) refs->data;
- pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, id,
- pe_find_renamed|pe_find_any);
+ const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
+ pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
+ id, flags);
if (!rsc) {
continue;
@@ -48,26 +49,26 @@ pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name)
}
GList *
-pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name)
+pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name)
{
gpointer value;
GList *retval = NULL;
- if (data_set->tags == NULL) {
+ if (scheduler->tags == NULL) {
return retval;
}
- value = g_hash_table_lookup(data_set->tags, tag_name);
+ value = g_hash_table_lookup(scheduler->tags, tag_name);
if (value == NULL) {
return retval;
}
/* Iterate over the list of node IDs. */
- for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ for (GList *refs = ((pcmk_tag_t *) value)->refs; refs; refs = refs->next) {
/* Find the node that has this ID. */
const char *id = (const char *) refs->data;
- pe_node_t *node = pe_find_node_id(data_set->nodes, id);
+ pcmk_node_t *node = pe_find_node_id(scheduler->nodes, id);
if (!node) {
continue;
@@ -81,9 +82,10 @@ pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name)
}
bool
-pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc_name, const char *tag_name)
+pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc_name,
+ const char *tag_name)
{
- GList *rscs = pe__rscs_with_tag(data_set, tag_name);
+ GList *rscs = pe__rscs_with_tag(scheduler, tag_name);
bool retval = false;
if (rscs == NULL) {
@@ -96,9 +98,10 @@ pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc_name, const char *ta
}
bool
-pe__uname_has_tag(pe_working_set_t *data_set, const char *node_name, const char *tag_name)
+pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node_name,
+ const char *tag_name)
{
- GList *unames = pe__unames_with_tag(data_set, tag_name);
+ GList *unames = pe__unames_with_tag(scheduler, tag_name);
bool retval = false;
if (unames == NULL) {
diff --git a/lib/pengine/tests/Makefile.am b/lib/pengine/tests/Makefile.am
index 4986ef2..48ec5b4 100644
--- a/lib/pengine/tests/Makefile.am
+++ b/lib/pengine/tests/Makefile.am
@@ -1 +1,14 @@
-SUBDIRS = rules native status unpack utils
+#
+# Copyright 2020-2023 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+SUBDIRS = rules \
+ native \
+ status \
+ unpack \
+ utils
diff --git a/lib/pengine/tests/native/Makefile.am b/lib/pengine/tests/native/Makefile.am
index 5046ff1..07cc1a1 100644
--- a/lib/pengine/tests/native/Makefile.am
+++ b/lib/pengine/tests/native/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -17,6 +17,6 @@ AM_TESTS_ENVIRONMENT += PCMK_CTS_CLI_DIR=$(top_srcdir)/cts/cli
# Add "_test" to the end of all test program names to simplify .gitignore.
check_PROGRAMS = native_find_rsc_test \
- pe_base_name_eq_test
+ pe_base_name_eq_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/native/native_find_rsc_test.c b/lib/pengine/tests/native/native_find_rsc_test.c
index 22aaf41..b85ca24 100644
--- a/lib/pengine/tests/native/native_find_rsc_test.c
+++ b/lib/pengine/tests/native/native_find_rsc_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,21 +10,18 @@
#include <crm_internal.h>
#include <crm/common/unittest_internal.h>
+#include <crm/common/scheduler.h>
#include <crm/common/xml.h>
#include <crm/pengine/internal.h>
#include <crm/pengine/status.h>
-#include <crm/pengine/pe_types.h>
-
-/* Needed to access replicas inside a bundle. */
-#define PE__VARIANT_BUNDLE 1
-#include <lib/pengine/variant.h>
xmlNode *input = NULL;
-pe_working_set_t *data_set = NULL;
+pcmk_scheduler_t *scheduler = NULL;
-pe_node_t *cluster01, *cluster02, *httpd_bundle_0;
-pe_resource_t *exim_group, *inactive_group, *promotable_clone, *inactive_clone;
-pe_resource_t *httpd_bundle, *mysql_clone_group;
+pcmk_node_t *cluster01, *cluster02, *httpd_bundle_0;
+pcmk_resource_t *exim_group, *inactive_group;
+pcmk_resource_t *promotable_clone, *inactive_clone;
+pcmk_resource_t *httpd_bundle, *mysql_clone_group;
static int
setup(void **state) {
@@ -40,25 +37,26 @@ setup(void **state) {
return 1;
}
- data_set = pe_new_working_set();
+ scheduler = pe_new_working_set();
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return 1;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- data_set->input = input;
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ scheduler->input = input;
- cluster_status(data_set);
+ cluster_status(scheduler);
/* Get references to the cluster nodes so we don't have to find them repeatedly. */
- cluster01 = pe_find_node(data_set->nodes, "cluster01");
- cluster02 = pe_find_node(data_set->nodes, "cluster02");
- httpd_bundle_0 = pe_find_node(data_set->nodes, "httpd-bundle-0");
+ cluster01 = pe_find_node(scheduler->nodes, "cluster01");
+ cluster02 = pe_find_node(scheduler->nodes, "cluster02");
+ httpd_bundle_0 = pe_find_node(scheduler->nodes, "httpd-bundle-0");
/* Get references to several resources we use frequently. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "exim-group") == 0) {
exim_group = rsc;
@@ -80,14 +78,14 @@ setup(void **state) {
static int
teardown(void **state) {
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return 0;
}
static void
bad_args(void **state) {
- pe_resource_t *rsc = (pe_resource_t *) g_list_first(data_set->resources)->data;
+ pcmk_resource_t *rsc = g_list_first(scheduler->resources)->data;
char *id = rsc->id;
char *name = NULL;
@@ -117,11 +115,11 @@ bad_args(void **state) {
static void
primitive_rsc(void **state) {
- pe_resource_t *dummy = NULL;
+ pcmk_resource_t *dummy = NULL;
/* Find the "dummy" resource, which is the only one with that ID in the set. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
@@ -133,20 +131,27 @@ primitive_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, 0));
- assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, pe_find_current));
+ assert_ptr_equal(dummy,
+ native_find_rsc(dummy, "dummy", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(dummy, "dummy", NULL, pe_find_clone));
- assert_null(native_find_rsc(dummy, "dummy", cluster02, pe_find_clone));
+ assert_null(native_find_rsc(dummy, "dummy", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(dummy, "dummy", cluster02,
+ pcmk_rsc_match_clone_only));
/* Fails because dummy is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(dummy, "dummy", cluster01, pe_find_current));
+ assert_null(native_find_rsc(dummy, "dummy", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(dummy, "dummy", cluster02, 0));
/* Passes because dummy is running on cluster02. */
- assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", cluster02, pe_find_current));
+ assert_ptr_equal(dummy,
+ native_find_rsc(dummy, "dummy", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -155,20 +160,27 @@ group_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, 0));
- assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, pe_find_current));
+ assert_ptr_equal(exim_group,
+ native_find_rsc(exim_group, "exim-group", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(exim_group, "exim-group", NULL, pe_find_clone));
- assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(exim_group, "exim-group", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because none of exim-group's children are running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_current));
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(exim_group, "exim-group", cluster01, 0));
/* Passes because one of exim-group's children is running on cluster02. */
- assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", cluster02, pe_find_current));
+ assert_ptr_equal(exim_group,
+ native_find_rsc(exim_group, "exim-group", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -177,30 +189,30 @@ inactive_group_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, 0));
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_current));
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_inactive));
+ assert_ptr_equal(inactive_group,
+ native_find_rsc(inactive_group, "inactive-group", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_clone));
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because none of inactive-group's children are running. */
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_current));
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_current));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
group_member_rsc(void **state) {
- pe_resource_t *public_ip = NULL;
+ pcmk_resource_t *public_ip = NULL;
/* Find the "Public-IP" resource, a member of "exim-group". */
for (GList *iter = exim_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "Public-IP") == 0) {
public_ip = rsc;
@@ -212,29 +224,36 @@ group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, 0));
- assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, pe_find_current));
+ assert_ptr_equal(public_ip,
+ native_find_rsc(public_ip, "Public-IP", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(public_ip, "Public-IP", NULL, pe_find_clone));
- assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_clone));
+ assert_null(native_find_rsc(public_ip, "Public-IP", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster02,
+ pcmk_rsc_match_clone_only));
/* Fails because Public-IP is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(public_ip, "Public-IP", cluster01, pe_find_current));
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, 0));
/* Passes because Public-IP is running on cluster02. */
- assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_current));
+ assert_ptr_equal(public_ip,
+ native_find_rsc(public_ip, "Public-IP", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
inactive_group_member_rsc(void **state) {
- pe_resource_t *inactive_dummy_1 = NULL;
+ pcmk_resource_t *inactive_dummy_1 = NULL;
/* Find the "inactive-dummy-1" resource, a member of "inactive-group". */
for (GList *iter = inactive_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "inactive-dummy-1") == 0) {
inactive_dummy_1 = rsc;
@@ -246,20 +265,21 @@ inactive_group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, 0));
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_current));
+ assert_ptr_equal(inactive_dummy_1,
+ native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_clone));
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because inactive-dummy-1 is not running. */
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_current));
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_current));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -268,24 +288,40 @@ clone_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, 0));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_current));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_clone));
-
- /* Fails because pe_find_current is required if a node is given. */
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone", NULL,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone", NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster01, 0));
/* Passes because one of ping-clone's children is running on cluster01. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_current));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster01, pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster02, 0));
/* Passes because one of ping_clone's children is running on cluster02. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_current));
-
- /* Passes for previous reasons, plus includes pe_find_clone check. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_clone|pe_find_current));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster02, pcmk_rsc_match_current_node));
+
+ // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster01,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
}
static void
@@ -294,28 +330,30 @@ inactive_clone_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, 0));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_current));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_clone));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_inactive));
+ assert_ptr_equal(inactive_clone,
+ native_find_rsc(inactive_clone, "inactive-clone", NULL,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(inactive_clone,
+ native_find_rsc(inactive_clone, "inactive-clone", NULL,
+ pcmk_rsc_match_clone_only));
/* Fails because none of inactive-clone's children are running. */
- assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_current|pe_find_clone));
- assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_current|pe_find_clone));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01,
+ pcmk_rsc_match_current_node
+ |pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02,
+ pcmk_rsc_match_current_node
+ |pcmk_rsc_match_clone_only));
}
static void
clone_instance_rsc(void **state) {
- pe_resource_t *promotable_0 = NULL;
- pe_resource_t *promotable_1 = NULL;
+ pcmk_resource_t *promotable_0 = NULL;
+ pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
@@ -329,70 +367,132 @@ clone_instance_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, 0));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc:0", NULL,
+ pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, 0));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, pe_find_current));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc:1", NULL,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01, pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02, pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02,
+ pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and primitive name was given, with correct flags. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_clone));
-
- /* Passes because pe_find_any matches any instance's base name. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_any));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_anon));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_anon));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Passes because pcmk_rsc_match_basename matches any instance's base name
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_basename));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_anon_basename));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with primitive name. */
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_current));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing promotable_clone
* instead of promotable_X as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
}
static void
renamed_rsc(void **state) {
- pe_resource_t *promotable_0 = NULL;
- pe_resource_t *promotable_1 = NULL;
+ pcmk_resource_t *promotable_0 = NULL;
+ pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
@@ -404,9 +504,13 @@ renamed_rsc(void **state) {
assert_non_null(promotable_0);
assert_non_null(promotable_1);
- /* Passes because pe_find_renamed means the base name matches clone_name. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_renamed));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_renamed));
+ // Passes because pcmk_rsc_match_history means base name matches clone_name
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_history));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_history));
}
static void
@@ -415,36 +519,32 @@ bundle_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, 0));
- assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_current));
+ assert_ptr_equal(httpd_bundle,
+ native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_clone));
- assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
+ pcmk_rsc_match_clone_only));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, 0));
/* Passes because one of httpd_bundle's children is running on cluster01. */
- assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_current));
+ assert_ptr_equal(httpd_bundle,
+ native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
+ pcmk_rsc_match_current_node));
}
-static void
-bundle_replica_rsc(void **state) {
- pe__bundle_variant_data_t *bundle_data = NULL;
- pe__bundle_replica_t *replica_0 = NULL;
-
- pe_resource_t *ip_0 = NULL;
- pe_resource_t *child_0 = NULL;
- pe_resource_t *container_0 = NULL;
- pe_resource_t *remote_0 = NULL;
-
- get_bundle_variant_data(bundle_data, httpd_bundle);
- replica_0 = (pe__bundle_replica_t *) bundle_data->replicas->data;
-
- ip_0 = replica_0->ip;
- child_0 = replica_0->child;
- container_0 = replica_0->container;
- remote_0 = replica_0->remote;
+static bool
+bundle_first_replica(pe__bundle_replica_t *replica, void *user_data)
+{
+ pcmk_resource_t *ip_0 = replica->ip;
+ pcmk_resource_t *child_0 = replica->child;
+ pcmk_resource_t *container_0 = replica->container;
+ pcmk_resource_t *remote_0 = replica->remote;
assert_non_null(ip_0);
assert_non_null(child_0);
@@ -457,58 +557,109 @@ bundle_replica_rsc(void **state) {
assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", NULL, 0));
assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", NULL, 0));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(child_0, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
- assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster02, pe_find_current));
- assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", httpd_bundle_0, pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd:0", cluster01, pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd:0", cluster02, pe_find_current));
- assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, pe_find_current));
- assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02, pe_find_current));
- assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", cluster01, pe_find_current));
- assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02, pe_find_current));
- assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0, pe_find_current));
-
- /* Passes because pe_find_any matches any replica's base name. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_anon));
+ assert_ptr_equal(ip_0,
+ native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ httpd_bundle_0, pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd:0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(container_0,
+ native_find_rsc(container_0, "httpd-bundle-docker-0",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0",
+ httpd_bundle_0, pcmk_rsc_match_current_node));
+ assert_ptr_equal(remote_0,
+ native_find_rsc(remote_0, "httpd-bundle-0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+
+ // Passes because pcmk_rsc_match_basename matches any replica's base name
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_any|pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", httpd_bundle_0,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", httpd_bundle_0,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
- assert_null(native_find_rsc(child_0, "httpd", NULL, pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing httpd-bundle
* instead of X_0 as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(ip_0, native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(container_0, native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, pe_find_current));
- assert_ptr_equal(remote_0, native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, pe_find_current));
+ assert_ptr_equal(ip_0,
+ native_find_rsc(httpd_bundle,
+ "httpd-bundle-ip-192.168.122.131",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(container_0,
+ native_find_rsc(httpd_bundle, "httpd-bundle-docker-0",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(remote_0,
+ native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01,
+ pcmk_rsc_match_current_node));
+ return false; // Do not iterate through any further replicas
+}
+
+static void
+bundle_replica_rsc(void **state)
+{
+ pe__foreach_bundle_replica(httpd_bundle, bundle_first_replica, NULL);
}
static void
@@ -517,34 +668,50 @@ clone_group_rsc(void **rsc) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, 0));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_current));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_clone));
-
- /* Fails because pe_find_current is required if a node is given. */
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ NULL, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ NULL, pcmk_rsc_match_clone_only));
+
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, 0));
/* Passes because one of mysql-clone-group's children is running on cluster01. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_current));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster01, pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, 0));
/* Passes because one of mysql-clone-group's children is running on cluster02. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_current));
-
- /* Passes for previous reasons, plus includes pe_find_clone check. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_clone|pe_find_current));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster02, pcmk_rsc_match_current_node));
+
+ // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster01,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
}
static void
clone_group_instance_rsc(void **rsc) {
- pe_resource_t *mysql_group_0 = NULL;
- pe_resource_t *mysql_group_1 = NULL;
+ pcmk_resource_t *mysql_group_0 = NULL;
+ pcmk_resource_t *mysql_group_1 = NULL;
/* Find the "mysql-group:0" and "mysql-group:1" resources, members of "mysql-clone-group". */
for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
mysql_group_0 = rsc;
@@ -558,73 +725,135 @@ clone_group_instance_rsc(void **rsc) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, 0));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group:0", NULL,
+ pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, 0));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, pe_find_current));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group:1", NULL,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01, pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02, pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group:0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group:1", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02,
+ pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and base name was given, with correct flags. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_clone));
-
- /* Passes because pe_find_any matches any base name. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_any));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_anon));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_anon));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Passes because pcmk_rsc_match_basename matches any base name
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_basename));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group" , NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_anon_basename));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group" , NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_anon|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL, pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL, pe_find_current));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing mysql_clone_group
* instead of mysql_group_X as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_anon|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
}
static void
clone_group_member_rsc(void **state) {
- pe_resource_t *mysql_proxy = NULL;
+ pcmk_resource_t *mysql_proxy = NULL;
/* Find the "mysql-proxy" resource, a member of "mysql-group". */
for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
for (GList *iter2 = rsc->children; iter2 != NULL; iter2 = iter2->next) {
- pe_resource_t *child = (pe_resource_t *) iter2->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter2->data;
if (strcmp(child->id, "mysql-proxy:0") == 0) {
mysql_proxy = child;
@@ -640,24 +869,35 @@ clone_group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, 0));
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
+ pcmk_rsc_match_current_node));
/* Passes because resource's parent is a clone. */
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_clone));
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
/* Fails because mysql-proxy:0 is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01, pe_find_current));
+ assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, 0));
/* Passes because mysql-proxy:0 is running on cluster02. */
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
+ pcmk_rsc_match_current_node));
}
-/* TODO: Add tests for finding on allocated node (passing a node without
- * pe_find_current, after scheduling, for a resource that is starting/stopping/moving.
+/* TODO: Add tests for finding on assigned node (passing a node without
+ * pcmk_rsc_match_current_node, after scheduling, for a resource that is
+ * starting/stopping/moving.
*/
PCMK__UNIT_TEST(setup, teardown,
cmocka_unit_test(bad_args),
diff --git a/lib/pengine/tests/native/pe_base_name_eq_test.c b/lib/pengine/tests/native/pe_base_name_eq_test.c
index 67a62f8..cb3c908 100644
--- a/lib/pengine/tests/native/pe_base_name_eq_test.c
+++ b/lib/pengine/tests/native/pe_base_name_eq_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -12,15 +12,15 @@
#include <crm/common/unittest_internal.h>
#include <crm/common/xml.h>
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
#include <crm/pengine/status.h>
-#include <crm/pengine/pe_types.h>
xmlNode *input = NULL;
-pe_working_set_t *data_set = NULL;
+pcmk_scheduler_t *scheduler = NULL;
-pe_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
-pe_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
+pcmk_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
+pcmk_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
static int
setup(void **state) {
@@ -36,20 +36,21 @@ setup(void **state) {
return 1;
}
- data_set = pe_new_working_set();
+ scheduler = pe_new_working_set();
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return 1;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- data_set->input = input;
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ scheduler->input = input;
- cluster_status(data_set);
+ cluster_status(scheduler);
/* Get references to several resources we use frequently. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
@@ -59,7 +60,7 @@ setup(void **state) {
httpd_bundle = rsc;
} else if (strcmp(rsc->id, "mysql-clone-group") == 0) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "mysql-group:0") == 0) {
mysql_group_0 = child;
@@ -69,7 +70,7 @@ setup(void **state) {
}
} else if (strcmp(rsc->id, "promotable-clone") == 0) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "promotable-rsc:0") == 0) {
promotable_0 = child;
@@ -85,7 +86,7 @@ setup(void **state) {
static int
teardown(void **state) {
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return 0;
}
diff --git a/lib/pengine/tests/status/Makefile.am b/lib/pengine/tests/status/Makefile.am
index 3f95496..c7ddb70 100644
--- a/lib/pengine/tests/status/Makefile.am
+++ b/lib/pengine/tests/status/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,10 +13,10 @@ include $(top_srcdir)/mk/unittest.mk
LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pe_find_node_any_test \
- pe_find_node_id_test \
- pe_find_node_test \
- pe_new_working_set_test \
- set_working_set_defaults_test
+check_PROGRAMS = pe_find_node_any_test \
+ pe_find_node_id_test \
+ pe_find_node_test \
+ pe_new_working_set_test \
+ set_working_set_defaults_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/status/pe_find_node_any_test.c b/lib/pengine/tests/status/pe_find_node_any_test.c
index b911424..5f5a27e 100644
--- a/lib/pengine/tests/status/pe_find_node_any_test.c
+++ b/lib/pengine/tests/status/pe_find_node_any_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -24,8 +24,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->uname = "cluster1";
diff --git a/lib/pengine/tests/status/pe_find_node_id_test.c b/lib/pengine/tests/status/pe_find_node_id_test.c
index 832a40a..c6b8773 100644
--- a/lib/pengine/tests/status/pe_find_node_id_test.c
+++ b/lib/pengine/tests/status/pe_find_node_id_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,8 +22,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->id = "id1";
diff --git a/lib/pengine/tests/status/pe_find_node_test.c b/lib/pengine/tests/status/pe_find_node_test.c
index 7c7ea30..305ddc9 100644
--- a/lib/pengine/tests/status/pe_find_node_test.c
+++ b/lib/pengine/tests/status/pe_find_node_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,8 +22,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->uname = "cluster1";
diff --git a/lib/pengine/tests/status/pe_new_working_set_test.c b/lib/pengine/tests/status/pe_new_working_set_test.c
index cf2df4f..b385f9c 100644
--- a/lib/pengine/tests/status/pe_new_working_set_test.c
+++ b/lib/pengine/tests/status/pe_new_working_set_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -19,7 +19,7 @@ calloc_fails(void **state) {
pcmk__mock_calloc = true; // calloc() will return NULL
expect_value(__wrap_calloc, nmemb, 1);
- expect_value(__wrap_calloc, size, sizeof(pe_working_set_t));
+ expect_value(__wrap_calloc, size, sizeof(pcmk_scheduler_t));
assert_null(pe_new_working_set());
pcmk__mock_calloc = false; // Use real calloc()
@@ -27,18 +27,18 @@ calloc_fails(void **state) {
static void
calloc_succeeds(void **state) {
- pe_working_set_t *data_set = pe_new_working_set();
+ pcmk_scheduler_t *scheduler = pe_new_working_set();
/* Nothing else to test about this function, as all it does is call
* set_working_set_defaults which is also a public function and should
* get its own unit test.
*/
- assert_non_null(data_set);
+ assert_non_null(scheduler);
/* Avoid calling pe_free_working_set here so we don't artificially
* inflate the coverage numbers.
*/
- free(data_set);
+ free(scheduler);
}
PCMK__UNIT_TEST(NULL, NULL,
diff --git a/lib/pengine/tests/status/set_working_set_defaults_test.c b/lib/pengine/tests/status/set_working_set_defaults_test.c
index c822278..7045a33 100644
--- a/lib/pengine/tests/status/set_working_set_defaults_test.c
+++ b/lib/pengine/tests/status/set_working_set_defaults_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,8 +10,9 @@
#include <crm_internal.h>
#include <crm/common/unittest_internal.h>
+
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
-#include <crm/pengine/pe_types.h>
#include <crm/pengine/status.h>
#include "mock_private.h"
@@ -19,27 +20,29 @@
static void
check_defaults(void **state) {
uint32_t flags;
- pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+ pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
- flags = pe_flag_stop_rsc_orphans|pe_flag_symmetric_cluster|pe_flag_stop_action_orphans;
+ flags = pcmk_sched_symmetric_cluster
+ |pcmk_sched_stop_removed_resources
+ |pcmk_sched_cancel_removed_actions;
if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
- flags |= pe_flag_concurrent_fencing;
+ flags |= pcmk_sched_concurrent_fencing;
}
- assert_null(data_set->priv);
- assert_int_equal(data_set->order_id, 1);
- assert_int_equal(data_set->action_id, 1);
- assert_int_equal(data_set->no_quorum_policy, no_quorum_stop);
- assert_int_equal(data_set->flags, flags);
+ assert_null(scheduler->priv);
+ assert_int_equal(scheduler->order_id, 1);
+ assert_int_equal(scheduler->action_id, 1);
+ assert_int_equal(scheduler->no_quorum_policy, pcmk_no_quorum_stop);
+ assert_int_equal(scheduler->flags, flags);
/* Avoid calling pe_free_working_set here so we don't artificially
* inflate the coverage numbers.
*/
- free(data_set);
+ free(scheduler);
}
PCMK__UNIT_TEST(NULL, NULL,
diff --git a/lib/pengine/tests/utils/Makefile.am b/lib/pengine/tests/utils/Makefile.am
index 4a3e8a2..64421e2 100644
--- a/lib/pengine/tests/utils/Makefile.am
+++ b/lib/pengine/tests/utils/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -14,8 +14,7 @@ AM_CPPFLAGS += -I$(top_srcdir)/lib/pengine
LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pe__cmp_node_name_test \
+check_PROGRAMS = pe__cmp_node_name_test \
pe__cmp_rsc_priority_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/utils/pe__cmp_node_name_test.c b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
index 45d87ee..4d602e4 100644
--- a/lib/pengine/tests/utils/pe__cmp_node_name_test.c
+++ b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,8 +15,8 @@
struct pe_node_shared_s node1_details;
struct pe_node_shared_s node2_details;
-pe_node_t node1 = {.details = &node1_details};
-pe_node_t node2 = {.details = &node2_details};
+pcmk_node_t node1 = { .details = &node1_details };
+pcmk_node_t node2 = { .details = &node2_details };
static void
nodes_equal(void **state)
diff --git a/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
index 669e7a9..24c1731 100644
--- a/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
+++ b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
@@ -14,8 +14,8 @@
#include "pe_status_private.h"
-pe_resource_t rsc1;
-pe_resource_t rsc2;
+pcmk_resource_t rsc1;
+pcmk_resource_t rsc2;
static void
rscs_equal(void **state)
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 2bd6707..3429d56 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -29,8 +29,8 @@ CRM_TRACE_INIT_DATA(pe_status);
// A (parsed) resource action history entry
struct action_history {
- pe_resource_t *rsc; // Resource that history is for
- pe_node_t *node; // Node that history is for
+ pcmk_resource_t *rsc; // Resource that history is for
+ pcmk_node_t *node; // Node that history is for
xmlNode *xml; // History entry XML
// Parsed from entry XML
@@ -49,43 +49,40 @@ struct action_history {
* use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the
* flag is stringified more readably in log messages.
*/
-#define set_config_flag(data_set, option, flag) do { \
- const char *scf_value = pe_pref((data_set)->config_hash, (option)); \
- if (scf_value != NULL) { \
- if (crm_is_true(scf_value)) { \
- (data_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
- LOG_TRACE, "Working set", \
- crm_system_name, (data_set)->flags, \
- (flag), #flag); \
- } else { \
- (data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\
- LOG_TRACE, "Working set", \
- crm_system_name, (data_set)->flags, \
- (flag), #flag); \
- } \
- } \
+#define set_config_flag(scheduler, option, flag) do { \
+ const char *scf_value = pe_pref((scheduler)->config_hash, (option)); \
+ if (scf_value != NULL) { \
+ if (crm_is_true(scf_value)) { \
+ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", \
+ crm_system_name, (scheduler)->flags, \
+ (flag), #flag); \
+ } else { \
+ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", \
+ crm_system_name, (scheduler)->flags, \
+ (flag), #flag); \
+ } \
+ } \
} while(0)
-static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
- xmlNode **last_failure,
+static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
+ xmlNode *xml_op, xmlNode **last_failure,
enum action_fail_response *failed);
-static void determine_remote_online_status(pe_working_set_t *data_set,
- pe_node_t *this_node);
-static void add_node_attrs(const xmlNode *xml_obj, pe_node_t *node,
- bool overwrite, pe_working_set_t *data_set);
+static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
+ pcmk_node_t *this_node);
+static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
+ bool overwrite, pcmk_scheduler_t *scheduler);
static void determine_online_status(const xmlNode *node_state,
- pe_node_t *this_node,
- pe_working_set_t *data_set);
+ pcmk_node_t *this_node,
+ pcmk_scheduler_t *scheduler);
-static void unpack_node_lrm(pe_node_t *node, const xmlNode *xml,
- pe_working_set_t *data_set);
+static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
+ pcmk_scheduler_t *scheduler);
-// Bitmask for warnings we only want to print once
-uint32_t pe_wo = 0;
-
static gboolean
-is_dangling_guest_node(pe_node_t *node)
+is_dangling_guest_node(pcmk_node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
@@ -94,7 +91,7 @@ is_dangling_guest_node(pe_node_t *node)
node->details->remote_rsc &&
node->details->remote_rsc->container == NULL &&
pcmk_is_set(node->details->remote_rsc->flags,
- pe_rsc_orphan_container_filler)) {
+ pcmk_rsc_removed_filler)) {
return TRUE;
}
@@ -104,23 +101,23 @@ is_dangling_guest_node(pe_node_t *node)
/*!
* \brief Schedule a fence action for a node
*
- * \param[in,out] data_set Current working set of cluster
- * \param[in,out] node Node to fence
- * \param[in] reason Text description of why fencing is needed
+ * \param[in,out] scheduler Scheduler data
+ * \param[in,out] node Node to fence
+ * \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider `priority-fencing-delay`
*/
void
-pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
+pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (pe__is_guest_node(node)) {
- pe_resource_t *rsc = node->details->remote_rsc->container;
+ pcmk_resource_t *rsc = node->details->remote_rsc->container;
- if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
@@ -135,7 +132,8 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
}
@@ -145,12 +143,12 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
"and guest resource no longer exists",
pe__node_name(node), reason);
pe__set_resource_flags(node->details->remote_rsc,
- pe_rsc_failed|pe_rsc_stop);
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
} else if (pe__is_remote_node(node)) {
- pe_resource_t *rsc = node->details->remote_rsc;
+ pcmk_resource_t *rsc = node->details->remote_rsc;
- if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pe__node_name(node), reason);
@@ -158,26 +156,26 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
node->details->remote_requires_reset = TRUE;
crm_warn("Remote node %s %s: %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply `priority-fencing-delay` for remote nodes
- pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set);
+ pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean",
+ pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
reason);
} else {
crm_warn("Cluster node %s %s: %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
- pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set);
+ pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
}
}
@@ -197,215 +195,258 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
"/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR
static void
-set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set)
+set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
{
xmlXPathObjectPtr result = NULL;
- if (!pcmk_is_set(data_set->flags, flag)) {
- result = xpath_search(data_set->input, xpath);
+ if (!pcmk_is_set(scheduler->flags, flag)) {
+ result = xpath_search(scheduler->input, xpath);
if (result && (numXpathResults(result) > 0)) {
- pe__set_working_set_flags(data_set, flag);
+ pe__set_working_set_flags(scheduler, flag);
}
freeXpathObject(result);
}
}
gboolean
-unpack_config(xmlNode * config, pe_working_set_t * data_set)
+unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
GHashTable *config_hash = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
- data_set->config_hash = config_hash;
+ scheduler->config_hash = config_hash;
pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
- CIB_OPTIONS_FIRST, FALSE, data_set);
+ CIB_OPTIONS_FIRST, FALSE, scheduler);
- verify_pe_options(data_set->config_hash);
+ verify_pe_options(scheduler->config_hash);
- set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
- if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
+ set_config_flag(scheduler, "enable-startup-probes",
+ pcmk_sched_probe_resources);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_info("Startup probes: disabled (dangerous)");
}
- value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
+ value = pe_pref(scheduler->config_hash, XML_ATTR_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and stonith-watchdog-timeout is nonzero");
- pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_fencing);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
- set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set);
+ set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
+ scheduler);
- value = pe_pref(data_set->config_hash, "stonith-timeout");
- data_set->stonith_timeout = (int) crm_parse_interval_spec(value);
- crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
+ value = pe_pref(scheduler->config_hash, "stonith-timeout");
+ scheduler->stonith_timeout = (int) crm_parse_interval_spec(value);
+ crm_debug("STONITH timeout: %d", scheduler->stonith_timeout);
- set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
- crm_debug("STONITH of failed nodes is %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
+ set_config_flag(scheduler, "stonith-enabled", pcmk_sched_fencing_enabled);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ crm_debug("STONITH of failed nodes is enabled");
+ } else {
+ crm_debug("STONITH of failed nodes is disabled");
+ }
- data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
- if (!strcmp(data_set->stonith_action, "poweroff")) {
- pe_warn_once(pe_wo_poweroff,
+ scheduler->stonith_action = pe_pref(scheduler->config_hash,
+ "stonith-action");
+ if (!strcmp(scheduler->stonith_action, "poweroff")) {
+ pe_warn_once(pcmk__wo_poweroff,
"Support for stonith-action of 'poweroff' is deprecated "
"and will be removed in a future release (use 'off' instead)");
- data_set->stonith_action = "off";
+ scheduler->stonith_action = PCMK_ACTION_OFF;
}
- crm_trace("STONITH will %s nodes", data_set->stonith_action);
+ crm_trace("STONITH will %s nodes", scheduler->stonith_action);
- set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
- crm_debug("Concurrent fencing is %s",
- pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled");
+ set_config_flag(scheduler, "concurrent-fencing",
+ pcmk_sched_concurrent_fencing);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
+ crm_debug("Concurrent fencing is enabled");
+ } else {
+ crm_debug("Concurrent fencing is disabled");
+ }
- value = pe_pref(data_set->config_hash,
+ value = pe_pref(scheduler->config_hash,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY);
if (value) {
- data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000;
- crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay);
+ scheduler->priority_fencing_delay = crm_parse_interval_spec(value)
+ / 1000;
+ crm_trace("Priority fencing delay is %ds",
+ scheduler->priority_fencing_delay);
}
- set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
+ set_config_flag(scheduler, "stop-all-resources", pcmk_sched_stop_all);
crm_debug("Stop all active resources: %s",
- pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
+ pcmk__btoa(pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)));
- set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
- if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ set_config_flag(scheduler, "symmetric-cluster",
+ pcmk_sched_symmetric_cluster);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
- value = pe_pref(data_set->config_hash, "no-quorum-policy");
+ value = pe_pref(scheduler->config_hash, "no-quorum-policy");
if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_ignore;
+ scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
} else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_freeze;
+ scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_demote;
+ scheduler->no_quorum_policy = pcmk_no_quorum_demote;
} else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
int do_panic = 0;
- crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC,
+ crm_element_value_int(scheduler->input, XML_ATTR_QUORUM_PANIC,
&do_panic);
- if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- data_set->no_quorum_policy = no_quorum_suicide;
+ if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
+ scheduler->no_quorum_policy = pcmk_no_quorum_fence;
} else {
crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum");
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
pcmk__config_err("Resetting no-quorum-policy to 'stop' because "
"fencing is disabled");
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
- set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
- crm_trace("Orphan resources are %s",
- pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored");
+ set_config_flag(scheduler, "stop-orphan-resources",
+ pcmk_sched_stop_removed_resources);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ crm_trace("Orphan resources are stopped");
+ } else {
+ crm_trace("Orphan resources are ignored");
+ }
- set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
- crm_trace("Orphan resource actions are %s",
- pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored");
+ set_config_flag(scheduler, "stop-orphan-actions",
+ pcmk_sched_cancel_removed_actions);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) {
+ crm_trace("Orphan resource actions are stopped");
+ } else {
+ crm_trace("Orphan resource actions are ignored");
+ }
- value = pe_pref(data_set->config_hash, "remove-after-stop");
+ value = pe_pref(scheduler->config_hash, "remove-after-stop");
if (value != NULL) {
if (crm_is_true(value)) {
- pe__set_working_set_flags(data_set, pe_flag_remove_after_stop);
+ pe__set_working_set_flags(scheduler, pcmk_sched_remove_after_stop);
#ifndef PCMK__COMPAT_2_0
- pe_warn_once(pe_wo_remove_after,
+ pe_warn_once(pcmk__wo_remove_after,
"Support for the remove-after-stop cluster property is"
" deprecated and will be removed in a future release");
#endif
} else {
- pe__clear_working_set_flags(data_set, pe_flag_remove_after_stop);
+ pe__clear_working_set_flags(scheduler,
+ pcmk_sched_remove_after_stop);
}
}
- set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
+ set_config_flag(scheduler, "maintenance-mode", pcmk_sched_in_maintenance);
crm_trace("Maintenance mode: %s",
- pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
+ pcmk__btoa(pcmk_is_set(scheduler->flags,
+ pcmk_sched_in_maintenance)));
- set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
- crm_trace("Start failures are %s",
- pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount");
+ set_config_flag(scheduler, "start-failure-is-fatal",
+ pcmk_sched_start_failure_fatal);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
+ crm_trace("Start failures are always fatal");
+ } else {
+ crm_trace("Start failures are handled by failcount");
+ }
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ set_config_flag(scheduler, "startup-fencing",
+ pcmk_sched_startup_fencing);
}
- if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
- pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes");
+ pe_warn_once(pcmk__wo_blind, "Blind faith: not fencing unseen nodes");
}
- pe__unpack_node_health_scores(data_set);
+ pe__unpack_node_health_scores(scheduler);
- data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
- crm_trace("Placement strategy: %s", data_set->placement_strategy);
+ scheduler->placement_strategy = pe_pref(scheduler->config_hash,
+ "placement-strategy");
+ crm_trace("Placement strategy: %s", scheduler->placement_strategy);
- set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock);
- crm_trace("Resources will%s be locked to cleanly shut down nodes",
- (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not"));
- if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
- value = pe_pref(data_set->config_hash,
+ set_config_flag(scheduler, "shutdown-lock", pcmk_sched_shutdown_lock);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
+ value = pe_pref(scheduler->config_hash,
XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT);
- data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000;
- crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock);
+ scheduler->shutdown_lock = crm_parse_interval_spec(value) / 1000;
+ crm_trace("Resources will be locked to nodes that were cleanly "
+ "shut down (locks expire after %s)",
+ pcmk__readable_interval(scheduler->shutdown_lock));
+ } else {
+ crm_trace("Resources will not be locked to nodes that were cleanly "
+ "shut down");
+ }
+
+ value = pe_pref(scheduler->config_hash,
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT);
+ scheduler->node_pending_timeout = crm_parse_interval_spec(value) / 1000;
+ if (scheduler->node_pending_timeout == 0) {
+ crm_trace("Do not fence pending nodes");
+ } else {
+ crm_trace("Fence pending nodes after %s",
+ pcmk__readable_interval(scheduler->node_pending_timeout
+ * 1000));
}
return TRUE;
}
-pe_node_t *
+pcmk_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
- const char *score, pe_working_set_t * data_set)
+ const char *score, pcmk_scheduler_t *scheduler)
{
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
- if (pe_find_node(data_set->nodes, uname) != NULL) {
+ if (pe_find_node(scheduler->nodes, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
- new_node = calloc(1, sizeof(pe_node_t));
+ new_node = calloc(1, sizeof(pcmk_node_t));
if (new_node == NULL) {
return NULL;
}
@@ -425,14 +466,14 @@ pe_create_node(const char *id, const char *uname, const char *type,
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
- new_node->details->data_set = data_set;
+ new_node->details->data_set = scheduler;
if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) {
- new_node->details->type = node_member;
+ new_node->details->type = pcmk_node_variant_cluster;
} else if (pcmk__str_eq(type, "remote", pcmk__str_casei)) {
- new_node->details->type = node_remote;
- pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes);
+ new_node->details->type = pcmk_node_variant_remote;
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_remote_nodes);
} else {
/* @COMPAT 'ping' is the default for backward compatibility, but it
@@ -443,7 +484,7 @@ pe_create_node(const char *id, const char *uname, const char *type,
"assuming 'ping'", pcmk__s(uname, "without name"),
type);
}
- pe_warn_once(pe_wo_ping_node,
+ pe_warn_once(pcmk__wo_ping_node,
"Support for nodes of type 'ping' (such as %s) is "
"deprecated and will be removed in a future release",
pcmk__s(uname, "unnamed node"));
@@ -464,13 +505,13 @@ pe_create_node(const char *id, const char *uname, const char *type,
new_node->details->digest_cache = pcmk__strkey_table(free,
pe__free_digests);
- data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node,
- pe__cmp_node_name);
+ scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
+ pe__cmp_node_name);
return new_node;
}
static const char *
-expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
+expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
@@ -527,9 +568,10 @@ expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data
}
static void
-handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
+handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
{
- if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
+ if ((new_node->details->type == pcmk_node_variant_remote)
+ && (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
@@ -537,7 +579,7 @@ handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
@@ -552,10 +594,10 @@ handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
}
gboolean
-unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
+unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
@@ -578,46 +620,48 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
"> entry in configuration without id");
continue;
}
- new_node = pe_create_node(id, uname, type, score, data_set);
+ new_node = pe_create_node(id, uname, type, score, scheduler);
if (new_node == NULL) {
return FALSE;
}
- handle_startup_fencing(data_set, new_node);
+ handle_startup_fencing(scheduler, new_node);
- add_node_attrs(xml_obj, new_node, FALSE, data_set);
+ add_node_attrs(xml_obj, new_node, FALSE, scheduler);
crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
}
}
- if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
+ if (scheduler->localhost
+ && (pe_find_node(scheduler->nodes, scheduler->localhost) == NULL)) {
crm_info("Creating a fake local node");
- pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
- data_set);
+ pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0,
+ scheduler);
}
return TRUE;
}
static void
-setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
+setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *container_id = NULL;
if (rsc->children) {
- g_list_foreach(rsc->children, (GFunc) setup_container, data_set);
+ g_list_foreach(rsc->children, (GFunc) setup_container, scheduler);
return;
}
container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
- pe_resource_t *container = pe_find_resource(data_set->resources, container_id);
+ pcmk_resource_t *container = pe_find_resource(scheduler->resources,
+ container_id);
if (container) {
rsc->container = container;
- pe__set_resource_flags(container, pe_rsc_is_container);
+ pe__set_resource_flags(container, pcmk_rsc_has_filler);
container->fillers = g_list_append(container->fillers, rsc);
pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
} else {
@@ -627,7 +671,7 @@ setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
+unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
@@ -646,11 +690,12 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
new_node_id = ID(xml_obj);
/* The "pe_find_node" check is here to make sure we don't iterate over
* an expanded node that has already been added to the node list. */
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
continue;
}
@@ -663,12 +708,14 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
* configuration for the guest node's connection, to be unpacked
* later.
*/
- new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
+ scheduler);
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
continue;
}
@@ -681,13 +728,15 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL;
xml_obj2 = pcmk__xe_next(xml_obj2)) {
- new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
+ new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
+ scheduler);
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, ID(xml_obj2), ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
}
}
@@ -704,20 +753,20 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
* easy access to the connection resource during the scheduler calculations.
*/
static void
-link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
+link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
{
- pe_node_t *remote_node = NULL;
+ pcmk_node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
- remote_node = pe_find_node(data_set->nodes, new_rsc->id);
+ remote_node = pe_find_node(scheduler->nodes, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pe_rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
@@ -728,7 +777,7 @@ link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
- handle_startup_fencing(data_set, remote_node);
+ handle_startup_fencing(scheduler, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
@@ -742,7 +791,7 @@ link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
static void
destroy_tag(gpointer data)
{
- pe_tag_t *tag = data;
+ pcmk_tag_t *tag = data;
if (tag) {
free(tag->id);
@@ -756,7 +805,7 @@ destroy_tag(gpointer data)
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
- * \param[in,out] data_set Where to put resource information
+ * \param[in,out] scheduler Scheduler data
*
* \return TRUE
*
@@ -764,63 +813,64 @@ destroy_tag(gpointer data)
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
-unpack_resources(const xmlNode *xml_resources, pe_working_set_t * data_set)
+unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
- data_set->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
+ scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
xml_obj = pcmk__xe_next(xml_obj)) {
- pe_resource_t *new_rsc = NULL;
+ pcmk_resource_t *new_rsc = NULL;
const char *id = ID(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
- crm_element_name(xml_obj));
+ xml_obj->name);
continue;
}
if (pcmk__str_eq((const char *) xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE,
pcmk__str_none)) {
- if (g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
+ if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
- g_hash_table_insert(data_set->template_rsc_sets, strdup(id), NULL);
+ g_hash_table_insert(scheduler->template_rsc_sets, strdup(id),
+ NULL);
}
continue;
}
crm_trace("Unpacking <%s " XML_ATTR_ID "='%s'>",
- crm_element_name(xml_obj), id);
+ xml_obj->name, id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
- data_set) == pcmk_rc_ok) {
- data_set->resources = g_list_append(data_set->resources, new_rsc);
+ scheduler) == pcmk_rc_ok) {
+ scheduler->resources = g_list_append(scheduler->resources, new_rsc);
pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
- crm_element_name(xml_obj), id);
+ xml_obj->name, id);
}
}
- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
- setup_container(rsc, data_set);
- link_rsc2remotenode(data_set, rsc);
+ setup_container(rsc, scheduler);
+ link_rsc2remotenode(scheduler, rsc);
}
- data_set->resources = g_list_sort(data_set->resources,
+ scheduler->resources = g_list_sort(scheduler->resources,
pe__cmp_rsc_priority);
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* Ignore */
- } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
- && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
+ && !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option");
@@ -831,11 +881,11 @@ unpack_resources(const xmlNode *xml_resources, pe_working_set_t * data_set)
}
gboolean
-unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
+unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_tag = NULL;
- data_set->tags = pcmk__strkey_table(free, destroy_tag);
+ scheduler->tags = pcmk__strkey_table(free, destroy_tag);
for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL;
xml_tag = pcmk__xe_next(xml_tag)) {
@@ -849,7 +899,7 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID,
- crm_element_name(xml_tag));
+ (const char *) xml_tag->name);
continue;
}
@@ -864,11 +914,11 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID,
- crm_element_name(xml_obj_ref), tag_id);
+ xml_obj_ref->name, tag_id);
continue;
}
- if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
+ if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
@@ -880,7 +930,7 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
-unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
+unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler)
{
const char *ticket_id = NULL;
const char *granted = NULL;
@@ -888,7 +938,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
ticket_id = ID(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
@@ -897,9 +947,9 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
crm_trace("Processing ticket state for %s", ticket_id);
- ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
- ticket = ticket_new(ticket_id, data_set);
+ ticket = ticket_new(ticket_id, scheduler);
if (ticket == NULL) {
return FALSE;
}
@@ -907,7 +957,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
- const char *prop_value = crm_element_value(xml_ticket, prop_name);
+ const char *prop_value = pcmk__xml_attr_value(xIter);
if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) {
continue;
@@ -948,7 +998,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
}
static gboolean
-unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
+unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
@@ -958,19 +1008,19 @@ unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) {
continue;
}
- unpack_ticket_state(xml_obj, data_set);
+ unpack_ticket_state(xml_obj, scheduler);
}
return TRUE;
}
static void
-unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
- pe_working_set_t *data_set)
+unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
+ pcmk_scheduler_t *scheduler)
{
const char *resource_discovery_enabled = NULL;
const xmlNode *attrs = NULL;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
return;
@@ -990,7 +1040,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
this_node->details->unseen = FALSE;
}
attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
- add_node_attrs(attrs, this_node, TRUE, data_set);
+ add_node_attrs(attrs, this_node, TRUE, scheduler);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pe__node_name(this_node));
@@ -1003,7 +1053,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
}
if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) ||
- ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) {
+ ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) {
crm_info("%s is in maintenance mode", pe__node_name(this_node));
this_node->details->maintenance = TRUE;
}
@@ -1011,7 +1061,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
if (pe__is_remote_node(this_node)
- && !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ && !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
crm_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
@@ -1033,19 +1083,19 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
* \internal
* \brief Unpack a cluster node's transient attributes
*
- * \param[in] state CIB node state XML
- * \param[in,out] node Cluster node whose attributes are being unpacked
- * \param[in,out] data_set Cluster working set
+ * \param[in] state CIB node state XML
+ * \param[in,out] node Cluster node whose attributes are being unpacked
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_transient_attributes(const xmlNode *state, pe_node_t *node,
- pe_working_set_t *data_set)
+unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS,
FALSE);
- add_node_attrs(attrs, node, TRUE, data_set);
+ add_node_attrs(attrs, node, TRUE, scheduler);
if (crm_is_true(pe_node_attribute_raw(node, "standby"))) {
crm_info("%s is in standby mode", pe__node_name(node));
@@ -1074,15 +1124,15 @@ unpack_transient_attributes(const xmlNode *state, pe_node_t *node,
* resource history inside it. Multiple passes through the status are needed to
* fully unpack everything.
*
- * \param[in] state CIB node state XML
- * \param[in,out] data_set Cluster working set
+ * \param[in] state CIB node state XML
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
+unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *uname = NULL;
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
id = crm_element_value(state, XML_ATTR_ID);
if (id == NULL) {
@@ -1093,15 +1143,21 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
uname = crm_element_value(state, XML_ATTR_UNAME);
if (uname == NULL) {
- crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
- XML_ATTR_UNAME);
- return;
+ /* If a joining peer makes the cluster acquire the quorum from corosync
+ * meanwhile it has not joined CPG membership of pacemaker-controld yet,
+ * it's possible that the created node_state entry doesn't have an uname
+ * yet. We should recognize the node as `pending` and wait for it to
+ * join CPG.
+ */
+ crm_trace("Handling " XML_CIB_TAG_STATE " entry with id=\"%s\" without "
+ XML_ATTR_UNAME, id);
}
- this_node = pe_find_node_any(data_set->nodes, id, uname);
+ this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
- pcmk__config_warn("Ignoring recorded node state for '%s' because "
- "it is no longer in the configuration", uname);
+ pcmk__config_warn("Ignoring recorded node state for id=\"%s\" (%s) "
+ "because it is no longer in the configuration",
+ id, pcmk__s(uname, "uname unknown"));
return;
}
@@ -1116,7 +1172,7 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
return;
}
- unpack_transient_attributes(state, this_node, data_set);
+ unpack_transient_attributes(state, this_node, scheduler);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
@@ -1126,16 +1182,16 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
crm_trace("Determining online status of cluster node %s (id %s)",
pe__node_name(this_node), id);
- determine_online_status(state, this_node, data_set);
+ determine_online_status(state, this_node, scheduler);
- if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& this_node->details->online
- && (data_set->no_quorum_policy == no_quorum_suicide)) {
+ && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
- pe_fence_node(data_set, this_node, "cluster does not have quorum",
+ pe_fence_node(scheduler, this_node, "cluster does not have quorum",
FALSE);
}
}
@@ -1150,16 +1206,16 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
* in another node's history, so it might take multiple passes to unpack
* everything.
*
- * \param[in] status CIB XML status section
- * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
- * \param[in,out] data_set Cluster working set
+ * \param[in] status CIB XML status section
+ * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(const xmlNode *status, bool fence,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
@@ -1169,7 +1225,7 @@ unpack_node_history(const xmlNode *status, bool fence,
const char *id = ID(state);
const char *uname = crm_element_value(state, XML_ATTR_UNAME);
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
@@ -1178,7 +1234,7 @@ unpack_node_history(const xmlNode *status, bool fence,
continue;
}
- this_node = pe_find_node_any(data_set->nodes, id, uname);
+ this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
@@ -1200,10 +1256,10 @@ unpack_node_history(const xmlNode *status, bool fence,
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
- pe_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
- if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED)
- || (rsc->container->role != RSC_ROLE_STARTED)) {
+ if ((rsc == NULL) || (rsc->role != pcmk_role_started)
+ || (rsc->container->role != pcmk_role_started)) {
crm_trace("Not unpacking resource history for guest node %s "
"because container and connection are not known to "
"be up", id);
@@ -1216,11 +1272,11 @@ unpack_node_history(const xmlNode *status, bool fence,
* connection is up, with the exception of when shutdown locks are
* in use.
*/
- pe_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL)
- || (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)
- && (rsc->role != RSC_ROLE_STARTED))) {
+ || (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)
+ && (rsc->role != pcmk_role_started))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
@@ -1231,8 +1287,9 @@ unpack_node_history(const xmlNode *status, bool fence,
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
- } else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled
- |pe_flag_shutdown_lock)
+ } else if (!pcmk_any_flags_set(scheduler->flags,
+ pcmk_sched_fencing_enabled
+ |pcmk_sched_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
@@ -1240,15 +1297,15 @@ unpack_node_history(const xmlNode *status, bool fence,
}
if (pe__is_guest_or_remote_node(this_node)) {
- determine_remote_online_status(data_set, this_node);
- unpack_handle_remote_attrs(this_node, state, data_set);
+ determine_remote_online_status(scheduler, this_node);
+ unpack_handle_remote_attrs(this_node, state, scheduler);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
this_node->details->unpacked = TRUE;
- unpack_node_lrm(this_node, state, data_set);
+ unpack_node_lrm(this_node, state, scheduler);
rc = EAGAIN; // Other node histories might depend on this one
}
@@ -1259,172 +1316,324 @@ unpack_node_history(const xmlNode *status, bool fence,
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
-unpack_status(xmlNode * status, pe_working_set_t * data_set)
+unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status); state != NULL;
state = pcmk__xe_next(state)) {
if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) {
- unpack_tickets_state((xmlNode *) state, data_set);
+ unpack_tickets_state((xmlNode *) state, scheduler);
} else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
- unpack_node_state(state, data_set);
+ unpack_node_state(state, scheduler);
}
}
- while (unpack_node_history(status, FALSE, data_set) == EAGAIN) {
+ while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled),
- data_set);
+ pcmk_is_set(scheduler->flags,
+ pcmk_sched_fencing_enabled),
+ scheduler);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
- if (data_set->stop_needed != NULL) {
- for (GList *item = data_set->stop_needed; item; item = item->next) {
- pe_resource_t *container = item->data;
- pe_node_t *node = pe__current_node(container);
+ if (scheduler->stop_needed != NULL) {
+ for (GList *item = scheduler->stop_needed; item; item = item->next) {
+ pcmk_resource_t *container = item->data;
+ pcmk_node_t *node = pe__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
- g_list_free(data_set->stop_needed);
- data_set->stop_needed = NULL;
+ g_list_free(scheduler->stop_needed);
+ scheduler->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *this_node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *this_node = gIter->data;
if (!pe__is_guest_or_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->details->remote_rsc != NULL)) {
- pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED,
+ pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped,
"remote shutdown");
}
if (!this_node->details->unpacked) {
- determine_remote_online_status(data_set, this_node);
+ determine_remote_online_status(scheduler, this_node);
}
}
return TRUE;
}
+/*!
+ * \internal
+ * \brief Unpack node's time when it became a member at the cluster layer
+ *
+ * \param[in] node_state Node's node_state entry
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return Epoch time when node became a cluster member
+ * (or scheduler effective time for legacy entries) if a member,
+ * 0 if not a member, or -1 if no valid information available
+ */
+static long long
+unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
+{
+ const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
+ int member = 0;
+
+ if (member_time == NULL) {
+ return -1LL;
+
+ } else if (crm_str_to_boolean(member_time, &member) == 1) {
+ /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
+ * recorded as a boolean for a DC < 2.1.7, or the node is pending
+ * shutdown and has left the CPG, in which case it was set to 1 to avoid
+ * fencing for node-pending-timeout.
+ *
+ * We return the effective time for in_ccm=1 because what's important to
+ * avoid fencing is that effective time minus this value is less than
+ * the pending node timeout.
+ */
+ return member? (long long) get_effective_time(scheduler) : 0LL;
+
+ } else {
+ long long when_member = 0LL;
+
+ if ((pcmk__scan_ll(member_time, &when_member,
+ 0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
+ crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
+ " in " XML_CIB_TAG_STATE " entry", member_time);
+ return -1LL;
+ }
+ return when_member;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack node's time when it became online in process group
+ *
+ * \param[in] node_state Node's node_state entry
+ *
+ * \return Epoch time when node became online in process group (or 0 if not
+ * online, or 1 for legacy online entries)
+ */
+static long long
+unpack_node_online(const xmlNode *node_state)
+{
+ const char *peer_time = crm_element_value(node_state, PCMK__XA_CRMD);
+
+ // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
+ if (pcmk__str_eq(peer_time, OFFLINESTATUS,
+ pcmk__str_casei|pcmk__str_null_matches)) {
+ return 0LL;
+
+ } else if (pcmk__str_eq(peer_time, ONLINESTATUS, pcmk__str_casei)) {
+ return 1LL;
+
+ } else {
+ long long when_online = 0LL;
+
+ if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
+ || (when_online < 0)) {
+ crm_warn("Unrecognized value '%s' for " PCMK__XA_CRMD " in "
+ XML_CIB_TAG_STATE " entry, assuming offline", peer_time);
+ return 0LL;
+ }
+ return when_online;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack node attribute for user-requested fencing
+ *
+ * \param[in] node Node to check
+ * \param[in] node_state Node's node_state entry in CIB status
+ *
+ * \return \c true if fencing has been requested for \p node, otherwise \c false
+ */
+static bool
+unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
+{
+ long long value = 0LL;
+ int value_i = 0;
+ const char *value_s = pe_node_attribute_raw(node, PCMK_NODE_ATTR_TERMINATE);
+
+ // Value may be boolean or an epoch time
+ if (crm_str_to_boolean(value_s, &value_i) == 1) {
+ return (value_i != 0);
+ }
+ if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) {
+ return (value > 0);
+ }
+ crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
+ "node attribute for %s", value_s, pe__node_name(node));
+ return false;
+}
+
static gboolean
-determine_online_status_no_fencing(pe_working_set_t *data_set,
+determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
- pe_node_t *this_node)
+ pcmk_node_t *this_node)
{
gboolean online = FALSE;
- const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
- const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
- const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
+ long long when_member = unpack_node_member(node_state, scheduler);
+ long long when_online = unpack_node_online(node_state);
- if (!crm_is_true(in_cluster)) {
- crm_trace("Node is down: in_cluster=%s",
- pcmk__s(in_cluster, "<null>"));
+ if (when_member <= 0) {
+ crm_trace("Node %s is %sdown", pe__node_name(this_node),
+ ((when_member < 0)? "presumed " : ""));
- } else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) {
+ } else if (when_online > 0) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
- crm_debug("Node is not ready to run resources: %s", join);
+ crm_debug("Node %s is not ready to run resources: %s",
+ pe__node_name(this_node), join);
}
} else if (this_node->details->expected_up == FALSE) {
- crm_trace("Controller is down: "
- "in_cluster=%s is_peer=%s join=%s expected=%s",
- pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ crm_trace("Node %s controller is down: "
+ "member@%lld online@%lld join=%s expected=%s",
+ pe__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
} else {
/* mark it unclean */
- pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE);
- crm_info("in_cluster=%s is_peer=%s join=%s expected=%s",
- pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
+ crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
+ pe__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
}
return online;
}
-static gboolean
-determine_online_status_fencing(pe_working_set_t *data_set,
- const xmlNode *node_state, pe_node_t *this_node)
+/*!
+ * \internal
+ * \brief Check whether a node has taken too long to join controller group
+ *
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] node Node to check
+ * \param[in] when_member Epoch time when node became a cluster member
+ * \param[in] when_online Epoch time when node joined controller group
+ *
+ * \return true if node has been pending (on the way up) longer than
+ * node-pending-timeout, otherwise false
+ * \note This will also update the cluster's recheck time if appropriate.
+ */
+static inline bool
+pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
+ long long when_member, long long when_online)
{
- gboolean online = FALSE;
- gboolean do_terminate = FALSE;
- bool crmd_online = FALSE;
- const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
- const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
- const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
- const char *terminate = pe_node_attribute_raw(this_node, "terminate");
-
-/*
- - XML_NODE_IN_CLUSTER ::= true|false
- - XML_NODE_IS_PEER ::= online|offline
- - XML_NODE_JOIN_STATE ::= member|down|pending|banned
- - XML_NODE_EXPECTED ::= member|down
-*/
+ if ((scheduler->node_pending_timeout > 0)
+ && (when_member > 0) && (when_online <= 0)) {
+ // There is a timeout on pending nodes, and node is pending
- if (crm_is_true(terminate)) {
- do_terminate = TRUE;
+ time_t timeout = when_member + scheduler->node_pending_timeout;
- } else if (terminate != NULL && strlen(terminate) > 0) {
- /* could be a time() value */
- char t = terminate[0];
-
- if (t != '0' && isdigit(t)) {
- do_terminate = TRUE;
+ if (get_effective_time(node->details->data_set) >= timeout) {
+ return true; // Node has timed out
}
+
+ // Node is pending, but still has time
+ pe__update_recheck_time(timeout, scheduler, "pending node timeout");
}
+ return false;
+}
+
+static bool
+determine_online_status_fencing(pcmk_scheduler_t *scheduler,
+ const xmlNode *node_state,
+ pcmk_node_t *this_node)
+{
+ bool termination_requested = unpack_node_terminate(this_node, node_state);
+ const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
+ long long when_member = unpack_node_member(node_state, scheduler);
+ long long when_online = unpack_node_online(node_state);
+
+/*
+ - PCMK__XA_JOIN ::= member|down|pending|banned
+ - PCMK__XA_EXPECTED ::= member|down
- crm_trace("%s: in_cluster=%s is_peer=%s join=%s expected=%s term=%d",
- pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
- pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
- pcmk__s(exp_state, "<null>"), do_terminate);
+ @COMPAT with entries recorded for DCs < 2.1.7
+ - PCMK__XA_IN_CCM ::= true|false
+ - PCMK__XA_CRMD ::= online|offline
- online = crm_is_true(in_cluster);
- crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei);
- if (exp_state == NULL) {
- exp_state = CRMD_JOINSTATE_DOWN;
- }
+ Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
+ - PCMK__XA_IN_CCM ::= <timestamp>|0
+ Since when node has been a cluster member. A value 0 of means the node is not
+ a cluster member.
+
+ - PCMK__XA_CRMD ::= <timestamp>|0
+ Since when peer has been online in CPG. A value 0 means the peer is offline
+ in CPG.
+*/
+
+ crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
+ pe__node_name(this_node), when_member, when_online,
+ pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
+ (termination_requested? " (termination requested)" : ""));
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pe__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
- online = crmd_online;
+ return (when_online > 0);
+ }
- } else if (in_cluster == NULL) {
- pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE);
+ if (when_member < 0) {
+ pe_fence_node(scheduler, this_node,
+ "peer has not been seen by the cluster", FALSE);
+ return false;
+ }
- } else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) {
- pe_fence_node(data_set, this_node,
+ if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
+ pe_fence_node(scheduler, this_node,
"peer failed Pacemaker membership criteria", FALSE);
- } else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) {
+ } else if (termination_requested) {
+ if ((when_member <= 0) && (when_online <= 0)
+ && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
+ crm_info("%s was fenced as requested", pe__node_name(this_node));
+ return false;
+ }
+ pe_fence_node(scheduler, this_node, "fencing was requested", false);
+
+ } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
+ pcmk__str_null_matches)) {
- if (crm_is_true(in_cluster) || crmd_online) {
+ if (pending_too_long(scheduler, this_node, when_member, when_online)) {
+ pe_fence_node(scheduler, this_node,
+ "peer pending timed out on joining the process group",
+ FALSE);
+
+ } else if ((when_member > 0) || (when_online > 0)) {
crm_info("- %s is not ready to run resources",
pe__node_name(this_node));
this_node->details->standby = TRUE;
@@ -1435,48 +1644,41 @@ determine_online_status_fencing(pe_working_set_t *data_set,
pe__node_name(this_node));
}
- } else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei)
- && crm_is_true(in_cluster) == FALSE && !crmd_online) {
- crm_info("%s was just shot", pe__node_name(this_node));
- online = FALSE;
-
- } else if (crm_is_true(in_cluster) == FALSE) {
+ } else if (when_member <= 0) {
// Consider `priority-fencing-delay` for lost nodes
- pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE);
+ pe_fence_node(scheduler, this_node,
+ "peer is no longer part of the cluster", TRUE);
- } else if (!crmd_online) {
- pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE);
+ } else if (when_online <= 0) {
+ pe_fence_node(scheduler, this_node,
+ "peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
- } else if (do_terminate) {
- pe_fence_node(data_set, this_node, "termination was requested", FALSE);
- } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
crm_info("%s is active", pe__node_name(this_node));
- } else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) {
+ } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
+ CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources", pe__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
- pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE);
- crm_warn("%s: in-cluster=%s is-peer=%s join=%s expected=%s term=%d shutdown=%d",
- pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
- pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
- pcmk__s(exp_state, "<null>"), do_terminate,
- this_node->details->shutdown);
+ pe_fence_node(scheduler, this_node, "peer was in an unknown state",
+ FALSE);
}
- return online;
+ return (when_member > 0);
}
static void
-determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node)
+determine_remote_online_status(pcmk_scheduler_t *scheduler,
+ pcmk_node_t *this_node)
{
- pe_resource_t *rsc = this_node->details->remote_rsc;
- pe_resource_t *container = NULL;
- pe_node_t *host = NULL;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *container = NULL;
+ pcmk_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
@@ -1494,33 +1696,36 @@ determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_nod
}
/* If the resource is currently started, mark it online. */
- if (rsc->role == RSC_ROLE_STARTED) {
+ if (rsc->role == pcmk_role_started) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
- if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
+ if ((rsc->role == pcmk_role_started)
+ && (rsc->next_role == pcmk_role_stopped)) {
+
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
- if(container && pcmk_is_set(container->flags, pe_rsc_failed)) {
+ if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
- } else if (rsc->role == RSC_ROLE_STOPPED
- || (container && container->role == RSC_ROLE_STOPPED)) {
+ } else if ((rsc->role == pcmk_role_stopped)
+ || ((container != NULL)
+ && (container->role == pcmk_role_stopped))) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
@@ -1541,11 +1746,11 @@ remote_online_done:
}
static void
-determine_online_status(const xmlNode *node_state, pe_node_t *this_node,
- pe_working_set_t *data_set)
+determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
+ pcmk_scheduler_t *scheduler)
{
gboolean online = FALSE;
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
CRM_CHECK(this_node != NULL, return);
@@ -1566,11 +1771,13 @@ determine_online_status(const xmlNode *node_state, pe_node_t *this_node,
* Anyone caught abusing this logic will be shot
*/
- } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- online = determine_online_status_no_fencing(data_set, node_state, this_node);
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ online = determine_online_status_no_fencing(scheduler, node_state,
+ this_node);
} else {
- online = determine_online_status_fencing(data_set, node_state, this_node);
+ online = determine_online_status_fencing(scheduler, node_state,
+ this_node);
}
if (online) {
@@ -1692,30 +1899,30 @@ clone_zero(const char *last_rsc_id)
return zero;
}
-static pe_resource_t *
+static pcmk_resource_t *
create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
copy_in_properties(xml_rsc, rsc_entry);
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
- if (pe__unpack_resource(xml_rsc, &rsc, NULL, data_set) != pcmk_rc_ok) {
+ if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
- pe_node_t *node;
+ pcmk_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
- node = pe_find_node(data_set->nodes, rsc_id);
+ node = pe_find_node(scheduler->nodes, rsc_id);
if (node == NULL) {
- node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
+ node = pe_create_node(rsc_id, rsc_id, "remote", NULL, scheduler);
}
- link_rsc2remotenode(data_set, rsc);
+ link_rsc2remotenode(scheduler, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
@@ -1726,10 +1933,10 @@ create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
- pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler);
+ pe__set_resource_flags(rsc, pcmk_rsc_removed_filler);
}
- pe__set_resource_flags(rsc, pe_rsc_orphan);
- data_set->resources = g_list_append(data_set->resources, rsc);
+ pe__set_resource_flags(rsc, pcmk_rsc_removed);
+ scheduler->resources = g_list_append(scheduler->resources, rsc);
return rsc;
}
@@ -1737,21 +1944,22 @@ create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
* \internal
* \brief Create orphan instance for anonymous clone resource history
*
- * \param[in,out] parent Clone resource that orphan will be added to
- * \param[in] rsc_id Orphan's resource ID
- * \param[in] node Where orphan is active (for logging only)
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] parent Clone resource that orphan will be added to
+ * \param[in] rsc_id Orphan's resource ID
+ * \param[in] node Where orphan is active (for logging only)
+ * \param[in,out] scheduler Scheduler data
*
* \return Newly added orphaned instance of \p parent
*/
-static pe_resource_t *
-create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
- const pe_node_t *node, pe_working_set_t *data_set)
+static pcmk_resource_t *
+create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *top = pe__create_clone_child(parent, data_set);
+ pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
// find_rsc() because we might be a cloned group
- pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
+ pcmk_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL,
+ pcmk_rsc_match_clone_only);
pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pe__node_name(node));
@@ -1767,30 +1975,30 @@ create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
* (2) an inactive instance (i.e. within the total of clone-max instances);
* (3) a newly created orphan (i.e. clone-max instances are already active).
*
- * \param[in,out] data_set Cluster information
- * \param[in] node Node on which to check for instance
- * \param[in,out] parent Clone to check
- * \param[in] rsc_id Name of cloned resource in history (without instance)
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] node Node on which to check for instance
+ * \param[in,out] parent Clone to check
+ * \param[in] rsc_id Name of cloned resource in history (no instance)
*/
-static pe_resource_t *
-find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
- pe_resource_t *parent, const char *rsc_id)
+static pcmk_resource_t *
+find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
+ pcmk_resource_t *parent, const char *rsc_id)
{
GList *rIter = NULL;
- pe_resource_t *rsc = NULL;
- pe_resource_t *inactive_instance = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(parent != NULL);
CRM_ASSERT(pe_rsc_is_clone(parent));
- CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique));
+ CRM_ASSERT(!pcmk_is_set(parent->flags, pcmk_rsc_unique));
// Check for active (or partially active, for cloned groups) instance
pe_rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pe__node_name(node), parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GList *locations = NULL;
- pe_resource_t *child = rIter->data;
+ pcmk_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
@@ -1804,8 +2012,8 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if globally-unique
* was flipped from true to false); and
- * (3) when we re-run calculations on the same data set as part of a
- * simulation.
+ * (3) when we re-run calculations on the same scheduler data as part of
+ * a simulation.
*/
child->fns->location(child, &locations, 2);
if (locations) {
@@ -1815,7 +2023,7 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
*/
CRM_LOG_ASSERT(locations->next == NULL);
- if (((pe_node_t *)locations->data)->details == node->details) {
+ if (((pcmk_node_t *) locations->data)->details == node->details) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
@@ -1823,7 +2031,8 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
*
* If the history entry is orphaned, rsc will be NULL.
*/
- rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
+ rsc = parent->fns->find_rsc(child, rsc_id, NULL,
+ pcmk_rsc_match_clone_only);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
@@ -1848,10 +2057,10 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
} else {
pe_rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
- && !pcmk_is_set(child->flags, pe_rsc_block)) {
+ && !pcmk_is_set(child->flags, pcmk_rsc_blocked)) {
// Remember one inactive instance in case we don't find active
inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
- pe_find_clone);
+ pcmk_rsc_match_clone_only);
/* ... but don't use it if it was already associated with a
* pending action on another node
@@ -1881,30 +2090,30 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
- if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pe__is_guest_node(node)
- && !pe__is_universal_clone(parent, data_set)) {
+ && !pe__is_universal_clone(parent, scheduler)) {
rsc = NULL;
}
if (rsc == NULL) {
- rsc = create_anonymous_orphan(parent, rsc_id, node, data_set);
+ rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
pe_rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
-static pe_resource_t *
-unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
+static pcmk_resource_t *
+unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
const char *rsc_id)
{
- pe_resource_t *rsc = NULL;
- pe_resource_t *parent = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
- rsc = pe_find_resource(data_set->resources, rsc_id);
+ rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
@@ -1912,9 +2121,10 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
* a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
- pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id);
+ pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources,
+ clone0_id);
- if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) {
+ if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
@@ -1924,7 +2134,7 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
}
free(clone0_id);
- } else if (rsc->variant > pe_native) {
+ } else if (rsc->variant > pcmk_rsc_variant_primitive) {
crm_trace("Resource history for %s is orphaned because it is no longer primitive",
rsc_id);
return NULL;
@@ -1940,7 +2150,7 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
} else {
char *base = clone_strip(rsc_id);
- rsc = find_anonymous_clone(data_set, node, parent, base);
+ rsc = find_anonymous_clone(scheduler, node, parent, base);
free(base);
CRM_ASSERT(rsc != NULL);
}
@@ -1952,42 +2162,43 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
pcmk__str_update(&rsc->clone_name, rsc_id);
pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pe__node_name(node), rsc->id,
- (pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : ""));
+ (pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : ""));
}
return rsc;
}
-static pe_resource_t *
-process_orphan_resource(const xmlNode *rsc_entry, const pe_node_t *node,
- pe_working_set_t *data_set)
+static pcmk_resource_t *
+process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
crm_debug("Detected orphan resource %s on %s", rsc_id, pe__node_name(node));
- rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
+ rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
if (rsc == NULL) {
return NULL;
}
- if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pe_rsc_trace(rsc, "Added orphan %s", rsc->id);
- resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set);
+ resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__",
+ scheduler);
}
return rsc;
}
static void
-process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
+process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
enum action_fail_response on_fail)
{
- pe_node_t *tmpnode = NULL;
+ pcmk_node_t *tmpnode = NULL;
char *reason = NULL;
- enum action_fail_response save_on_fail = action_fail_ignore;
+ enum action_fail_response save_on_fail = pcmk_on_fail_ignore;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
@@ -1995,12 +2206,12 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
fail2text(on_fail));
/* process current state */
- if (rsc->role != RSC_ROLE_UNKNOWN) {
- pe_resource_t *iter = rsc;
+ if (rsc->role != pcmk_role_unknown) {
+ pcmk_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
- pe_node_t *n = pe__copy_node(node);
+ pcmk_node_t *n = pe__copy_node(node);
pe_rsc_trace(rsc, "%s%s%s known on %s",
rsc->id,
@@ -2009,7 +2220,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
pe__node_name(n));
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
- if (pcmk_is_set(iter->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) {
break;
}
iter = iter->parent;
@@ -2017,10 +2228,10 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
/* If a managed resource is believed to be running, but node is down ... */
- if (rsc->role > RSC_ROLE_STOPPED
+ if ((rsc->role > pcmk_role_stopped)
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
- && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
gboolean should_fence = FALSE;
@@ -2032,12 +2243,15 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
* resource to run again once we are sure we know its state.
*/
if (pe__is_guest_node(node)) {
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
should_fence = TRUE;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
if (pe__is_remote_node(node) && node->details->remote_rsc
- && !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
+ && !pcmk_is_set(node->details->remote_rsc->flags,
+ pcmk_rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
@@ -2070,20 +2284,20 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
- on_fail = action_fail_ignore;
+ on_fail = pcmk_on_fail_ignore;
}
switch (on_fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
/* nothing to do */
break;
- case action_fail_demote:
- pe__set_resource_flags(rsc, pe_rsc_failed);
+ case pcmk_on_fail_demote:
+ pe__set_resource_flags(rsc, pcmk_rsc_failed);
demote_action(rsc, node, FALSE);
break;
- case action_fail_fence:
+ case pcmk_on_fail_fence_node:
/* treat it as if it is still running
* but also mark the node as unclean
*/
@@ -2092,20 +2306,20 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
free(reason);
break;
- case action_fail_standby:
+ case pcmk_on_fail_standby_node:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
- case action_fail_block:
+ case pcmk_on_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
break;
- case action_fail_migrate:
+ case pcmk_on_fail_ban:
/* make sure it comes up somewhere else
* or not at all
*/
@@ -2113,19 +2327,22 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
rsc->cluster);
break;
- case action_fail_stop:
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop");
+ case pcmk_on_fail_stop:
+ pe__set_next_role(rsc, pcmk_role_stopped, "on-fail=stop");
break;
- case action_fail_recover:
- if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ case pcmk_on_fail_restart:
+ if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
stop_action(rsc, node, FALSE);
}
break;
- case action_fail_restart_container:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ case pcmk_on_fail_restart_container:
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if (rsc->container && pe_rsc_is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
@@ -2136,14 +2353,16 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
g_list_prepend(rsc->cluster->stop_needed, rsc->container);
} else if (rsc->container) {
stop_action(rsc->container, node, FALSE);
- } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
+ } else if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
stop_action(rsc, node, FALSE);
}
break;
- case action_fail_reset_remote:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ case pcmk_on_fail_reset_remote:
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
@@ -2161,14 +2380,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
/* require the stop action regardless if fencing is occurring or not. */
- if (rsc->role > RSC_ROLE_STOPPED) {
+ if (rsc->role > pcmk_role_stopped) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_ms) {
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset");
+ pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
}
break;
}
@@ -2177,16 +2396,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
- if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) {
tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
- if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__config_warn("Detected active orphan %s running on %s",
rsc->id, pe__node_name(node));
} else {
@@ -2198,16 +2418,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
native_add_running(rsc, node, rsc->cluster,
- (save_on_fail != action_fail_ignore));
+ (save_on_fail != pcmk_on_fail_ignore));
switch (on_fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
break;
- case action_fail_demote:
- case action_fail_block:
- pe__set_resource_flags(rsc, pe_rsc_failed);
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_block:
+ pe__set_resource_flags(rsc, pcmk_rsc_failed);
break;
default:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
break;
}
@@ -2220,14 +2441,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
rsc->clone_name = NULL;
} else {
- GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP,
- FALSE);
+ GList *possible_matches = pe__resource_actions(rsc, node,
+ PCMK_ACTION_STOP, FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_t *stop = (pe_action_t *) gIter->data;
+ pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
- pe__set_action_flags(stop, pe_action_optional);
+ pe__set_action_flags(stop, pcmk_action_optional);
}
g_list_free(possible_matches);
@@ -2236,21 +2457,21 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
- if (rsc->role == RSC_ROLE_STOPPED
+ if ((rsc->role == pcmk_role_stopped)
&& rsc->partial_migration_source
&& rsc->partial_migration_source->details == node->details
&& rsc->partial_migration_target
&& rsc->running_on) {
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
/* create active recurring operations as optional */
static void
-process_recurring(pe_node_t * node, pe_resource_t * rsc,
+process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
int start_index, int stop_index,
- GList *sorted_op_list, pe_working_set_t * data_set)
+ GList *sorted_op_list, pcmk_scheduler_t *scheduler)
{
int counter = -1;
const char *task = NULL;
@@ -2303,7 +2524,7 @@ process_recurring(pe_node_t * node, pe_resource_t * rsc,
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pe_rsc_trace(rsc, "Creating %s on %s", key, pe__node_name(node));
- custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
+ custom_action(rsc, key, task, node, TRUE, scheduler);
}
}
@@ -2328,20 +2549,24 @@ calculate_active_ops(const GList *sorted_op_list, int *start_index,
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
- if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
- } else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) {
+ } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
*start_index = counter;
- } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ } else if ((implied_monitor_start <= *stop_index)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
- } else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) {
+ } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
@@ -2357,26 +2582,26 @@ calculate_active_ops(const GList *sorted_op_list, int *start_index,
// If resource history entry has shutdown lock, remember lock node and time
static void
-unpack_shutdown_lock(const xmlNode *rsc_entry, pe_resource_t *rsc,
- const pe_node_t *node, pe_working_set_t *data_set)
+unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
- if ((data_set->shutdown_lock > 0)
- && (get_effective_time(data_set)
- > (lock_time + data_set->shutdown_lock))) {
+ if ((scheduler->shutdown_lock > 0)
+ && (get_effective_time(scheduler)
+ > (lock_time + scheduler->shutdown_lock))) {
pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pe__node_name(node));
- pe__clear_resource_history(rsc, node, data_set);
+ pe__clear_resource_history(rsc, node);
} else {
/* @COMPAT I don't like breaking const signatures, but
* rsc->lock_node should really be const -- we just can't change it
* until the next API compatibility break.
*/
- rsc->lock_node = (pe_node_t *) node;
+ rsc->lock_node = (pcmk_node_t *) node;
rsc->lock_time = lock_time;
}
}
@@ -2388,30 +2613,30 @@ unpack_shutdown_lock(const xmlNode *rsc_entry, pe_resource_t *rsc,
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] rsc_entry lrm_resource XML being unpacked
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
-static pe_resource_t *
-unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
- pe_working_set_t *data_set)
+static pcmk_resource_t *
+unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
+ pcmk_scheduler_t *scheduler)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
- enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e req_role = pcmk_role_unknown;
const char *rsc_id = ID(lrm_resource);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
- enum action_fail_response on_fail = action_fail_ignore;
- enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
+ enum action_fail_response on_fail = pcmk_on_fail_ignore;
+ enum rsc_role_e saved_role = pcmk_role_unknown;
if (rsc_id == NULL) {
crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE
@@ -2428,7 +2653,7 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
op_list = g_list_prepend(op_list, rsc_op);
}
- if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
@@ -2436,25 +2661,25 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
}
/* find the resource */
- rsc = unpack_find_resource(data_set, node, rsc_id);
+ rsc = unpack_find_resource(scheduler, node, rsc_id);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
- rsc = process_orphan_resource(lrm_resource, node, data_set);
+ rsc = process_orphan_resource(lrm_resource, node, scheduler);
}
}
CRM_ASSERT(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
- if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
- unpack_shutdown_lock(lrm_resource, rsc, node, data_set);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
+ unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
}
/* process operations */
saved_role = rsc->role;
- rsc->role = RSC_ROLE_UNKNOWN;
+ rsc->role = pcmk_role_unknown;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
@@ -2465,7 +2690,8 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
- process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
+ process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
+ scheduler);
/* no need to free the contents */
g_list_free(sorted_op_list);
@@ -2473,7 +2699,9 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
process_rsc_state(rsc, node, on_fail);
if (get_target_role(rsc, &req_role)) {
- if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
+ if ((rsc->next_role == pcmk_role_unknown)
+ || (req_role < rsc->next_role)) {
+
pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE);
} else if (req_role > rsc->next_role) {
@@ -2492,13 +2720,13 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
static void
handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list);
rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
- pe_resource_t *rsc;
- pe_resource_t *container;
+ pcmk_resource_t *rsc;
+ pcmk_resource_t *container;
const char *rsc_id;
const char *container_id;
@@ -2512,15 +2740,14 @@ handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
continue;
}
- container = pe_find_resource(data_set->resources, container_id);
+ container = pe_find_resource(scheduler->resources, container_id);
if (container == NULL) {
continue;
}
- rsc = pe_find_resource(data_set->resources, rsc_id);
- if (rsc == NULL ||
- !pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) ||
- rsc->container != NULL) {
+ rsc = pe_find_resource(scheduler->resources, rsc_id);
+ if ((rsc == NULL) || (rsc->container != NULL)
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
continue;
}
@@ -2535,12 +2762,13 @@ handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
* \internal
* \brief Unpack one node's lrm status section
*
- * \param[in,out] node Node whose status is being unpacked
- * \param[in] xml CIB node state XML
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] node Node whose status is being unpacked
+ * \param[in] xml CIB node state XML
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
+unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
+ pcmk_scheduler_t *scheduler)
{
bool found_orphaned_container_filler = false;
@@ -2558,10 +2786,10 @@ unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
for (const xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
- pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set);
+ pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
if ((rsc != NULL)
- && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
found_orphaned_container_filler = true;
}
}
@@ -2570,26 +2798,26 @@ unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
* orphaned container fillers to their container resource.
*/
if (found_orphaned_container_filler) {
- handle_orphaned_container_fillers(xml, data_set);
+ handle_orphaned_container_fillers(xml, scheduler);
}
}
static void
-set_active(pe_resource_t * rsc)
+set_active(pcmk_resource_t *rsc)
{
- const pe_resource_t *top = pe__const_top_resource(rsc, false);
+ const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
- if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) {
- rsc->role = RSC_ROLE_UNPROMOTED;
+ if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) {
+ rsc->role = pcmk_role_unpromoted;
} else {
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
- pe_node_t *node = value;
+ pcmk_node_t *node = value;
int *score = user_data;
node->weight = *score;
@@ -2604,7 +2832,7 @@ set_node_score(gpointer key, gpointer value, gpointer user_data)
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
- int target_rc, pe_working_set_t *data_set)
+ int target_rc, pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
@@ -2620,12 +2848,13 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
NULL);
/* Need to check against transition_magic too? */
- if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATE) == 0)) {
+ if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_TARGET "='", source, "']",
NULL);
- } else if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATED) == 0)) {
+ } else if ((source != NULL)
+ && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_SOURCE "='", source, "']",
NULL);
@@ -2633,7 +2862,7 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
g_string_append_c(xpath, ']');
}
- xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
@@ -2652,7 +2881,7 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
@@ -2665,7 +2894,7 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc_id, "']",
NULL);
- xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
@@ -2682,7 +2911,7 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
* \return true if \p rsc_id is unknown on \p node_name, otherwise false
*/
static bool
-unknown_on_node(pe_resource_t *rsc, const char *node_name)
+unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
{
bool result = false;
xmlXPathObjectPtr search;
@@ -2708,20 +2937,20 @@ unknown_on_node(pe_resource_t *rsc, const char *node_name)
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
- xmlNode *monitor = find_lrm_op(rsc_id, CRMD_ACTION_STATUS, node_name,
- NULL, PCMK_OCF_NOT_RUNNING, data_set);
+ xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
+ NULL, PCMK_OCF_NOT_RUNNING, scheduler);
return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
}
@@ -2730,22 +2959,22 @@ monitor_not_running_after(const char *rsc_id, const char *node_name,
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
- * \param[in] rsc_id Resource being checked
- * \param[in] node_name Node being checked
- * \param[in] xml_op Event that non-monitor is being compared to
- * \param[in] same_node Whether the operations are on the same node
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] xml_op Event that non-monitor is being compared to
+ * \param[in] same_node Whether the operations are on the same node
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
xmlNode *lrm_resource = NULL;
- lrm_resource = find_lrm_resource(rsc_id, node_name, data_set);
+ lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
if (lrm_resource == NULL) {
return false;
}
@@ -2760,8 +2989,9 @@ non_monitor_after(const char *rsc_id, const char *node_name,
task = crm_element_value(op, XML_LRM_ATTR_TASK);
- if (pcmk__str_any_of(task, CRMD_ACTION_START, CRMD_ACTION_STOP,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)
+ if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
+ NULL)
&& pe__is_newer_op(op, xml_op, same_node) > 0) {
return true;
}
@@ -2774,11 +3004,11 @@ non_monitor_after(const char *rsc_id, const char *node_name,
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
- * \param[in] rsc_id Resource being checked
- * \param[in] node_name Node being checked
- * \param[in] migrate_to Any migrate_to event that is being compared to
- * \param[in] migrate_from Any migrate_from event that is being compared to
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] migrate_to Any migrate_to event that is being compared to
+ * \param[in] migrate_from Any migrate_from event that is being compared to
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
@@ -2786,7 +3016,7 @@ static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
const xmlNode *migrate_to,
const xmlNode *migrate_from,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const xmlNode *xml_op = migrate_to;
const char *source = NULL;
@@ -2826,9 +3056,9 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
- return non_monitor_after(rsc_id, node_name, xml_op, same_node, data_set)
+ return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler)
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
- data_set);
+ scheduler);
}
/*!
@@ -2844,8 +3074,8 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
* \return Standard Pacemaker return code
*/
static int
-get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
- const pe_node_t *target_node,
+get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
+ const pcmk_node_t *target_node,
const char **source_name, const char **target_name)
{
*source_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_SOURCE);
@@ -2891,11 +3121,11 @@ get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
* \param[in] node Migration source
*/
static void
-add_dangling_migration(pe_resource_t *rsc, const pe_node_t *node)
+add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
pe_rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
rsc->id, pe__node_name(node));
- rsc->role = RSC_ROLE_STOPPED;
+ rsc->role = pcmk_role_stopped;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
(gpointer) node);
}
@@ -2942,7 +3172,7 @@ unpack_migrate_to_success(struct action_history *history)
*/
int from_rc = PCMK_OCF_OK;
int from_status = PCMK_EXEC_PENDING;
- pe_node_t *target_node = NULL;
+ pcmk_node_t *target_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
@@ -2961,8 +3191,8 @@ unpack_migrate_to_success(struct action_history *history)
true, history->rsc->cluster);
// Check for a migrate_from action from this source on the target
- migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED, target,
- source, -1, history->rsc->cluster);
+ migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
+ target, source, -1, history->rsc->cluster);
if (migrate_from != NULL) {
if (source_newer_op) {
/* There's a newer non-monitor operation on the source and a
@@ -2998,7 +3228,7 @@ unpack_migrate_to_success(struct action_history *history)
/* Without newer state, this migrate_to implies the resource is active.
* (Clones are not allowed to migrate, so role can't be promoted.)
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
target_node = pe_find_node(history->rsc->cluster->nodes, target);
active_on_target = !target_newer_state && (target_node != NULL)
@@ -3010,8 +3240,9 @@ unpack_migrate_to_success(struct action_history *history)
TRUE);
} else {
// Mark resource as failed, require recovery, and prevent migration
- pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
- pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(history->rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable);
}
return;
}
@@ -3028,8 +3259,8 @@ unpack_migrate_to_success(struct action_history *history)
}
if (active_on_target) {
- pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
- source);
+ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
native_add_running(history->rsc, target_node, history->rsc->cluster,
FALSE);
@@ -3046,8 +3277,9 @@ unpack_migrate_to_success(struct action_history *history)
} else if (!source_newer_op) {
// Mark resource as failed, require recovery, and prevent migration
- pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
- pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(history->rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable);
}
}
@@ -3073,12 +3305,12 @@ unpack_migrate_to_failure(struct action_history *history)
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
// Check for migrate_from on the target
- target_migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED,
- target, source, PCMK_OCF_OK,
- history->rsc->cluster);
+ target_migrate_from = find_lrm_op(history->rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, target, source,
+ PCMK_OCF_OK, history->rsc->cluster);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
@@ -3096,8 +3328,8 @@ unpack_migrate_to_failure(struct action_history *history)
* active there.
* (if it is up).
*/
- pe_node_t *target_node = pe_find_node(history->rsc->cluster->nodes,
- target);
+ pcmk_node_t *target_node = pe_find_node(history->rsc->cluster->nodes,
+ target);
if (target_node && target_node->details->online) {
native_add_running(history->rsc, target_node, history->rsc->cluster,
@@ -3140,10 +3372,10 @@ unpack_migrate_from_failure(struct action_history *history)
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
// Check for a migrate_to on the source
- source_migrate_to = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATE,
+ source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
source, target, PCMK_OCF_OK,
history->rsc->cluster);
@@ -3162,8 +3394,8 @@ unpack_migrate_from_failure(struct action_history *history)
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
- pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
- source);
+ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
if (source_node && source_node->details->online) {
native_add_running(history->rsc, source_node, history->rsc->cluster,
@@ -3250,38 +3482,38 @@ static int
cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
{
switch (first) {
- case action_fail_demote:
+ case pcmk_on_fail_demote:
switch (second) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
return 1;
- case action_fail_demote:
+ case pcmk_on_fail_demote:
return 0;
default:
return -1;
}
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
switch (second) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
return 1;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
return 0;
default:
return -1;
}
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
switch (second) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_reset_remote:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_reset_remote:
return 1;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
return 0;
default:
return -1;
@@ -3292,26 +3524,26 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
break;
}
switch (second) {
- case action_fail_demote:
- return (first == action_fail_ignore)? -1 : 1;
+ case pcmk_on_fail_demote:
+ return (first == pcmk_on_fail_ignore)? -1 : 1;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
switch (first) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
return -1;
default:
return 1;
}
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
switch (first) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_reset_remote:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_reset_remote:
return -1;
default:
return 1;
@@ -3331,13 +3563,13 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
* \param[in,out] rsc Resource to ban
*/
static void
-ban_from_all_nodes(pe_resource_t *rsc)
+ban_from_all_nodes(pcmk_resource_t *rsc)
{
int score = -INFINITY;
- pe_resource_t *fail_rsc = rsc;
+ pcmk_resource_t *fail_rsc = rsc;
if (fail_rsc->parent != NULL) {
- pe_resource_t *parent = uber_parent(fail_rsc);
+ pcmk_resource_t *parent = uber_parent(fail_rsc);
if (pe_rsc_is_anon_clone(parent)) {
/* For anonymous clones, if an operation with on-fail=stop fails for
@@ -3358,18 +3590,50 @@ ban_from_all_nodes(pe_resource_t *rsc)
/*!
* \internal
+ * \brief Get configured failure handling and role after failure for an action
+ *
+ * \param[in,out] history Unpacked action history entry
+ * \param[out] on_fail Where to set configured failure handling
+ * \param[out] fail_role Where to set to role after failure
+ */
+static void
+unpack_failure_handling(struct action_history *history,
+ enum action_fail_response *on_fail,
+ enum rsc_role_e *fail_role)
+{
+ xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
+ history->interval_ms, true);
+
+ GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
+ history->task,
+ history->interval_ms, config);
+
+ const char *on_fail_str = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+
+ *on_fail = pcmk__parse_on_fail(history->rsc, history->task,
+ history->interval_ms, on_fail_str);
+ *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
+ meta);
+ g_hash_table_destroy(meta);
+}
+
+/*!
+ * \internal
* \brief Update resource role, failure handling, etc., after a failed action
*
- * \param[in,out] history Parsed action result history
- * \param[out] last_failure Set this to action XML
- * \param[in,out] on_fail What should be done about the result
+ * \param[in,out] history Parsed action result history
+ * \param[in] config_on_fail Action failure handling from configuration
+ * \param[in] fail_role Resource's role after failure of this action
+ * \param[out] last_failure This will be set to the history XML
+ * \param[in,out] on_fail Actual handling of action result
*/
static void
-unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
+unpack_rsc_op_failure(struct action_history *history,
+ enum action_fail_response config_on_fail,
+ enum rsc_role_e fail_role, xmlNode **last_failure,
enum action_fail_response *on_fail)
{
bool is_probe = false;
- pe_action_t *action = NULL;
char *last_change_s = NULL;
*last_failure = history->xml;
@@ -3377,7 +3641,7 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
is_probe = pcmk_xe_is_probe(history->xml);
last_change_s = last_change_str(history->xml);
- if (!pcmk_is_set(history->rsc->cluster->flags, pe_flag_symmetric_cluster)
+ if (!pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_symmetric_cluster)
&& (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
@@ -3414,36 +3678,34 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
free(last_change_s);
- action = custom_action(history->rsc, strdup(history->key), history->task,
- NULL, TRUE, FALSE, history->rsc->cluster);
- if (cmp_on_fail(*on_fail, action->on_fail) < 0) {
- pe_rsc_trace(history->rsc, "on-fail %s -> %s for %s (%s)",
- fail2text(*on_fail), fail2text(action->on_fail),
- action->uuid, history->key);
- *on_fail = action->on_fail;
+ if (cmp_on_fail(*on_fail, config_on_fail) < 0) {
+ pe_rsc_trace(history->rsc, "on-fail %s -> %s for %s",
+ fail2text(*on_fail), fail2text(config_on_fail),
+ history->key);
+ *on_fail = config_on_fail;
}
- if (strcmp(history->task, CRMD_ACTION_STOP) == 0) {
+ if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
resource_location(history->rsc, history->node, -INFINITY,
"__stop_fail__", history->rsc->cluster);
- } else if (strcmp(history->task, CRMD_ACTION_MIGRATE) == 0) {
+ } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
unpack_migrate_to_failure(history);
- } else if (strcmp(history->task, CRMD_ACTION_MIGRATED) == 0) {
+ } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
unpack_migrate_from_failure(history);
- } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
+ history->rsc->role = pcmk_role_promoted;
- } else if (strcmp(history->task, CRMD_ACTION_DEMOTE) == 0) {
- if (action->on_fail == action_fail_block) {
- history->rsc->role = RSC_ROLE_PROMOTED;
- pe__set_next_role(history->rsc, RSC_ROLE_STOPPED,
+ } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
+ if (config_on_fail == pcmk_on_fail_block) {
+ history->rsc->role = pcmk_role_promoted;
+ pe__set_next_role(history->rsc, pcmk_role_stopped,
"demote with on-fail=block");
} else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
} else {
/* Staying in the promoted role would put the scheduler and
@@ -3451,16 +3713,16 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
- history->rsc->role = RSC_ROLE_UNPROMOTED;
+ history->rsc->role = pcmk_role_unpromoted;
}
}
if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
/* leave stopped */
pe_rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
- } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ } else if (history->rsc->role < pcmk_role_started) {
pe_rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
set_active(history->rsc);
}
@@ -3469,18 +3731,16 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
"Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
history->rsc->id, role2text(history->rsc->role),
pcmk__btoa(history->node->details->unclean),
- fail2text(action->on_fail), role2text(action->fail_role));
+ fail2text(config_on_fail), role2text(fail_role));
- if ((action->fail_role != RSC_ROLE_STARTED)
- && (history->rsc->next_role < action->fail_role)) {
- pe__set_next_role(history->rsc, action->fail_role, "failure");
+ if ((fail_role != pcmk_role_started)
+ && (history->rsc->next_role < fail_role)) {
+ pe__set_next_role(history->rsc, fail_role, "failure");
}
- if (action->fail_role == RSC_ROLE_STOPPED) {
+ if (fail_role == pcmk_role_stopped) {
ban_from_all_nodes(history->rsc);
}
-
- pe_free_action(action);
}
/*!
@@ -3497,7 +3757,7 @@ block_if_unrecoverable(struct action_history *history)
{
char *last_change_s = NULL;
- if (strcmp(history->task, CRMD_ACTION_STOP) != 0) {
+ if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(history->node->details->data_set, history->node)) {
@@ -3516,8 +3776,8 @@ block_if_unrecoverable(struct action_history *history)
free(last_change_s);
- pe__clear_resource_flags(history->rsc, pe_rsc_managed);
- pe__set_resource_flags(history->rsc, pe_rsc_block);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(history->rsc, pcmk_rsc_blocked);
}
/*!
@@ -3556,8 +3816,8 @@ remap_because(struct action_history *history, const char **why, int value,
* \param[in] expired Whether result is expired
*
* \note If the result is remapped and the node is not shutting down or failed,
- * the operation will be recorded in the data set's list of failed operations
- * to highlight it for the user.
+ * the operation will be recorded in the scheduler data's list of failed
+ * operations to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
@@ -3664,16 +3924,16 @@ remap_operation(struct action_history *history,
case PCMK_OCF_NOT_RUNNING:
if (is_probe
|| (history->expected_exit_status == history->exit_status)
- || !pcmk_is_set(history->rsc->flags, pe_rsc_managed)) {
+ || !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) {
/* For probes, recurring monitors for the Stopped role, and
* unmanaged resources, "not running" is not considered a
* failure.
*/
remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
- history->rsc->role = RSC_ROLE_STOPPED;
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ history->rsc->role = pcmk_role_stopped;
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"not running");
}
break;
@@ -3692,13 +3952,13 @@ remap_operation(struct action_history *history,
}
if (!expired
|| (history->exit_status == history->expected_exit_status)) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
}
break;
case PCMK_OCF_FAILED_PROMOTED:
if (!expired) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
}
remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
break;
@@ -3765,16 +4025,15 @@ remap_done:
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(const xmlNode *xml_op, const char *task,
- pe_resource_t *rsc, pe_node_t *node)
+ pcmk_resource_t *rsc, pcmk_node_t *node)
{
- if (!strcmp(task, "start") || !strcmp(task, "monitor")) {
-
+ if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
- pe__add_param_check(xml_op, rsc, node, pe_check_last_failure,
+ pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
rsc->cluster);
} else {
@@ -3783,13 +4042,13 @@ should_clear_for_param_change(const xmlNode *xml_op, const char *task,
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->cluster);
switch (digest_data->rc) {
- case RSC_DIGEST_UNKNOWN:
+ case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, pe__xe_history_key(xml_op),
node->details->id);
break;
- case RSC_DIGEST_MATCH:
+ case pcmk__digest_match:
break;
default:
return TRUE;
@@ -3801,21 +4060,21 @@ should_clear_for_param_change(const xmlNode *xml_op, const char *task,
// Order action after fencing of remote node, given connection rsc
static void
-order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn,
- pe_working_set_t *data_set)
+order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
+ pcmk_scheduler_t *scheduler)
{
- pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id);
+ pcmk_node_t *remote_node = pe_find_node(scheduler->nodes, remote_conn->id);
if (remote_node) {
- pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
- FALSE, data_set);
+ pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
+ FALSE, scheduler);
- order_actions(fence, action, pe_order_implies_then);
+ order_actions(fence, action, pcmk__ar_first_implies_then);
}
}
static bool
-should_ignore_failure_timeout(const pe_resource_t *rsc, const char *task,
+should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
guint interval_ms, bool is_last_failure)
{
/* Clearing failures of recurring monitors has special concerns. The
@@ -3839,10 +4098,11 @@ should_ignore_failure_timeout(const pe_resource_t *rsc, const char *task,
* if the remote node hasn't been fenced.
*/
if (rsc->remote_reconnect_ms
- && pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
- && (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ && pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)
+ && (interval_ms != 0)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
- pe_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
+ pcmk_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
if (remote_node && !remote_node->details->remote_was_fenced) {
if (is_last_failure) {
@@ -3909,7 +4169,8 @@ check_operation_expiry(struct action_history *history)
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
- &last_failure, pe_fc_effective,
+ &last_failure,
+ pcmk__fc_effective,
history->xml);
// Update scheduler recheck time according to *last* failure
@@ -3920,13 +4181,14 @@ check_operation_expiry(struct action_history *history)
history->rsc->failure_timeout, (long long) last_failure);
last_failure += history->rsc->failure_timeout + 1;
if (unexpired_fail_count && (now < last_failure)) {
- pe__update_recheck_time(last_failure, history->rsc->cluster);
+ pe__update_recheck_time(last_failure, history->rsc->cluster,
+ "fail count expiration");
}
}
if (expired) {
- if (pe_get_failcount(history->node, history->rsc, NULL, pe_fc_default,
- history->xml)) {
+ if (pe_get_failcount(history->node, history->rsc, NULL,
+ pcmk__fc_default, history->xml)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
@@ -3963,12 +4225,14 @@ check_operation_expiry(struct action_history *history)
}
if (clear_reason != NULL) {
+ pcmk_action_t *clear_op = NULL;
+
// Schedule clearing of the fail count
- pe_action_t *clear_op = pe__clear_failcount(history->rsc, history->node,
- clear_reason,
- history->rsc->cluster);
+ clear_op = pe__clear_failcount(history->rsc, history->node,
+ clear_reason, history->rsc->cluster);
- if (pcmk_is_set(history->rsc->cluster->flags, pe_flag_stonith_enabled)
+ if (pcmk_is_set(history->rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)
&& (history->rsc->remote_reconnect_ms != 0)) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
@@ -3987,7 +4251,7 @@ check_operation_expiry(struct action_history *history)
}
if (expired && (history->interval_ms == 0)
- && pcmk__str_eq(history->task, CRMD_ACTION_STATUS, pcmk__str_none)) {
+ && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
switch (history->exit_status) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
@@ -4022,27 +4286,6 @@ pe__target_rc_from_xml(const xmlNode *xml_op)
/*!
* \internal
- * \brief Get the failure handling for an action
- *
- * \param[in,out] history Parsed action history entry
- *
- * \return Failure handling appropriate to action
- */
-static enum action_fail_response
-get_action_on_fail(struct action_history *history)
-{
- enum action_fail_response result = action_fail_recover;
- pe_action_t *action = custom_action(history->rsc, strdup(history->key),
- history->task, NULL, TRUE, FALSE,
- history->rsc->cluster);
-
- result = action->on_fail;
- pe_free_action(action);
- return result;
-}
-
-/*!
- * \internal
* \brief Update a resource's state for an action result
*
* \param[in,out] history Parsed action history entry
@@ -4060,53 +4303,53 @@ update_resource_state(struct action_history *history, int exit_status,
if ((exit_status == PCMK_OCF_NOT_INSTALLED)
|| (!pe_rsc_is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml))) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
} else if (exit_status == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_STATUS,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
if ((last_failure != NULL)
&& pcmk__str_eq(history->key, pe__xe_history_key(last_failure),
pcmk__str_none)) {
clear_past_failure = true;
}
- if (history->rsc->role < RSC_ROLE_STARTED) {
+ if (history->rsc->role < pcmk_role_started) {
set_active(history->rsc);
}
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_START, pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STARTED;
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
+ history->rsc->role = pcmk_role_started;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_STOP, pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
+ history->rsc->role = pcmk_role_stopped;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_PROMOTE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_DEMOTE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
pcmk__str_none)) {
- if (*on_fail == action_fail_demote) {
+ if (*on_fail == pcmk_on_fail_demote) {
// Demote clears an error only if on-fail=demote
clear_past_failure = true;
}
- history->rsc->role = RSC_ROLE_UNPROMOTED;
+ history->rsc->role = pcmk_role_unpromoted;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATED,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
unpack_migrate_to_success(history);
- } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ } else if (history->rsc->role < pcmk_role_started) {
pe_rsc_trace(history->rsc, "%s active on %s",
history->rsc->id, pe__node_name(history->node));
set_active(history->rsc);
@@ -4117,26 +4360,26 @@ update_resource_state(struct action_history *history, int exit_status,
}
switch (*on_fail) {
- case action_fail_stop:
- case action_fail_fence:
- case action_fail_migrate:
- case action_fail_standby:
+ case pcmk_on_fail_stop:
+ case pcmk_on_fail_ban:
+ case pcmk_on_fail_standby_node:
+ case pcmk_on_fail_fence_node:
pe_rsc_trace(history->rsc,
"%s (%s) is not cleared by a completed %s",
history->rsc->id, fail2text(*on_fail), history->task);
break;
- case action_fail_block:
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_restart_container:
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ case pcmk_on_fail_block:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_restart_container:
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures");
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
if (history->rsc->remote_reconnect_ms == 0) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
@@ -4144,8 +4387,8 @@ update_resource_state(struct action_history *history, int exit_status,
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures and reset remote");
}
break;
@@ -4170,14 +4413,14 @@ can_affect_state(struct action_history *history)
* Currently, unknown operations can affect whether a resource is considered
* active and/or failed.
*/
- return pcmk__str_any_of(history->task, CRMD_ACTION_STATUS,
- CRMD_ACTION_START, CRMD_ACTION_STOP,
- CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED,
+ return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_START, PCMK_ACTION_STOP,
+ PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
"asyncmon", NULL);
#else
- return !pcmk__str_any_of(history->task, CRMD_ACTION_NOTIFY,
- CRMD_ACTION_METADATA, NULL);
+ return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY,
+ PCMK_ACTION_META_DATA, NULL);
#endif
}
@@ -4244,8 +4487,8 @@ process_expired_result(struct action_history *history, int orig_exit_status)
&& pcmk_xe_mask_probe_failure(history->xml)
&& (orig_exit_status != history->expected_exit_status)) {
- if (history->rsc->role <= RSC_ROLE_STOPPED) {
- history->rsc->role = RSC_ROLE_UNKNOWN;
+ if (history->rsc->role <= pcmk_role_stopped) {
+ history->rsc->role = pcmk_role_unknown;
}
crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
"Masked failure expired",
@@ -4303,9 +4546,9 @@ mask_probe_failure(struct action_history *history, int orig_exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
- pe_resource_t *ban_rsc = history->rsc;
+ pcmk_resource_t *ban_rsc = history->rsc;
- if (!pcmk_is_set(history->rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) {
ban_rsc = uber_parent(history->rsc);
}
@@ -4392,20 +4635,20 @@ process_pending_action(struct action_history *history,
return;
}
- if (strcmp(history->task, CRMD_ACTION_START) == 0) {
- pe__set_resource_flags(history->rsc, pe_rsc_start_pending);
+ if (strcmp(history->task, PCMK_ACTION_START) == 0) {
+ pe__set_resource_flags(history->rsc, pcmk_rsc_start_pending);
set_active(history->rsc);
- } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
+ history->rsc->role = pcmk_role_promoted;
- } else if ((strcmp(history->task, CRMD_ACTION_MIGRATE) == 0)
+ } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
&& history->node->details->unclean) {
/* A migrate_to action is pending on a unclean source, so force a stop
* on the target.
*/
const char *migrate_target = NULL;
- pe_node_t *target = NULL;
+ pcmk_node_t *target = NULL;
migrate_target = crm_element_value(history->xml,
XML_LRM_ATTR_MIGRATE_TARGET);
@@ -4439,13 +4682,14 @@ process_pending_action(struct action_history *history,
}
static void
-unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
+unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum action_fail_response *on_fail)
{
int old_rc = 0;
bool expired = false;
- pe_resource_t *parent = rsc;
- enum action_fail_response failure_strategy = action_fail_recover;
+ pcmk_resource_t *parent = rsc;
+ enum rsc_role_e fail_role = pcmk_role_unknown;
+ enum action_fail_response failure_strategy = pcmk_on_fail_restart;
struct action_history history = {
.rsc = rsc,
@@ -4514,7 +4758,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
goto done;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
parent = uber_parent(rsc);
}
@@ -4529,25 +4773,29 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
goto done;
case PCMK_EXEC_NOT_INSTALLED:
- failure_strategy = get_action_on_fail(&history);
- if (failure_strategy == action_fail_ignore) {
+ unpack_failure_handling(&history, &failure_strategy, &fail_role);
+ if (failure_strategy == pcmk_on_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
CRM_XS " status=%d rc=%d id=%s",
history.task, rsc->id, pe__node_name(node),
history.execution_status, history.exit_status,
history.id);
- /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
- *on_fail = action_fail_migrate;
+ /* Also for printing it as "FAILED" by marking it as
+ * pcmk_rsc_failed later
+ */
+ *on_fail = pcmk_on_fail_ban;
}
resource_location(parent, node, -INFINITY, "hard-error",
rsc->cluster);
- unpack_rsc_op_failure(&history, last_failure, on_fail);
+ unpack_rsc_op_failure(&history, failure_strategy, fail_role,
+ last_failure, on_fail);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pe__is_guest_or_remote_node(node)
- && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(node->details->remote_rsc->flags,
+ pcmk_rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
@@ -4555,7 +4803,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
* that, ensure the remote connection is considered failed.
*/
pe__set_resource_flags(node->details->remote_rsc,
- pe_rsc_failed|pe_rsc_stop);
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
break; // Not done, do error handling
@@ -4571,10 +4819,10 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
break;
}
- failure_strategy = get_action_on_fail(&history);
- if ((failure_strategy == action_fail_ignore)
- || (failure_strategy == action_fail_restart_container
- && (strcmp(history.task, CRMD_ACTION_STOP) == 0))) {
+ unpack_failure_handling(&history, &failure_strategy, &fail_role);
+ if ((failure_strategy == pcmk_on_fail_ignore)
+ || ((failure_strategy == pcmk_on_fail_restart_container)
+ && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
char *last_change_s = last_change_str(xml_op);
@@ -4589,17 +4837,18 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
update_resource_state(&history, history.expected_exit_status,
*last_failure, on_fail);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
- pe__set_resource_flags(rsc, pe_rsc_failure_ignored);
+ pe__set_resource_flags(rsc, pcmk_rsc_ignore_failure);
record_failed_op(&history);
- if ((failure_strategy == action_fail_restart_container)
- && cmp_on_fail(*on_fail, action_fail_recover) <= 0) {
+ if ((failure_strategy == pcmk_on_fail_restart_container)
+ && cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) {
*on_fail = failure_strategy;
}
} else {
- unpack_rsc_op_failure(&history, last_failure, on_fail);
+ unpack_rsc_op_failure(&history, failure_strategy, fail_role,
+ last_failure, on_fail);
if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
uint8_t log_level = LOG_ERR;
@@ -4635,15 +4884,15 @@ done:
}
static void
-add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
- pe_working_set_t *data_set)
+add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
+ pcmk_scheduler_t *scheduler)
{
const char *cluster_name = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -4654,8 +4903,8 @@ add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID),
strdup(node->details->id));
- if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) {
- data_set->dc_node = node;
+ if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) {
+ scheduler->dc_node = node;
node->details->is_dc = TRUE;
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE));
@@ -4664,18 +4913,19 @@ add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE));
}
- cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
+ cluster_name = g_hash_table_lookup(scheduler->config_hash, "cluster-name");
if (cluster_name) {
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME),
strdup(cluster_name));
}
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
- node->details->attrs, NULL, overwrite, data_set);
+ node->details->attrs, NULL, overwrite,
+ scheduler);
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
node->details->utilization, NULL,
- FALSE, data_set);
+ FALSE, scheduler);
if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
const char *site_name = pe_node_attribute_raw(node, "site-name");
@@ -4760,15 +5010,15 @@ extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gbool
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
- pe_working_set_t * data_set)
+ pcmk_scheduler_t *scheduler)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
- xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
+ xmlNode *status = find_xml_node(scheduler->input, XML_CIB_TAG_STATUS, TRUE);
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
xmlNode *node_state = NULL;
@@ -4782,20 +5032,20 @@ find_operations(const char *rsc, const char *node, gboolean active_filter,
continue;
}
- this_node = pe_find_node(data_set->nodes, uname);
+ this_node = pe_find_node(scheduler->nodes, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pe__is_guest_or_remote_node(this_node)) {
- determine_remote_online_status(data_set, this_node);
+ determine_remote_online_status(scheduler, this_node);
} else {
- determine_online_status(node_state, this_node, data_set);
+ determine_online_status(node_state, this_node, scheduler);
}
if (this_node->details->online
- || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ || pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index ef0a092..4055d6d 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -27,40 +27,40 @@ gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
* \internal
* \brief Check whether we can fence a particular node
*
- * \param[in] data_set Working set for cluster
- * \param[in] node Name of node to check
+ * \param[in] scheduler Scheduler data
+ * \param[in] node Name of node to check
*
* \return true if node can be fenced, false otherwise
*/
bool
-pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
+pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
{
if (pe__is_guest_node(node)) {
/* Guest nodes are fenced by stopping their container resource. We can
* do that if the container's host is either online or fenceable.
*/
- pe_resource_t *rsc = node->details->remote_rsc->container;
+ pcmk_resource_t *rsc = node->details->remote_rsc->container;
for (GList *n = rsc->running_on; n != NULL; n = n->next) {
- pe_node_t *container_node = n->data;
+ pcmk_node_t *container_node = n->data;
if (!container_node->details->online
- && !pe_can_fence(data_set, container_node)) {
+ && !pe_can_fence(scheduler, container_node)) {
return false;
}
}
return true;
- } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
return false; /* Turned off */
- } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
return false; /* No devices */
- } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
return true;
- } else if (data_set->no_quorum_policy == no_quorum_ignore) {
+ } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) {
return true;
} else if(node == NULL) {
@@ -85,65 +85,25 @@ pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
* \return Newly allocated shallow copy of this_node
* \note This function asserts on errors and is guaranteed to return non-NULL.
*/
-pe_node_t *
-pe__copy_node(const pe_node_t *this_node)
+pcmk_node_t *
+pe__copy_node(const pcmk_node_t *this_node)
{
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
CRM_ASSERT(this_node != NULL);
- new_node = calloc(1, sizeof(pe_node_t));
+ new_node = calloc(1, sizeof(pcmk_node_t));
CRM_ASSERT(new_node != NULL);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed; // @COMPAT deprecated and unused
+ new_node->count = this_node->count;
new_node->details = this_node->details;
return new_node;
}
-/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
-void
-node_list_exclude(GHashTable * hash, GList *list, gboolean merge_scores)
-{
- GHashTable *result = hash;
- pe_node_t *other_node = NULL;
- GList *gIter = list;
-
- GHashTableIter iter;
- pe_node_t *node = NULL;
-
- g_hash_table_iter_init(&iter, hash);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
-
- other_node = pe_find_node_id(list, node->details->id);
- if (other_node == NULL) {
- node->weight = -INFINITY;
- crm_trace("Banning dependent from %s (no primary instance)",
- pe__node_name(node));
- } else if (merge_scores) {
- node->weight = pcmk__add_scores(node->weight, other_node->weight);
- crm_trace("Added primary's score %s to dependent's score for %s "
- "(now %s)", pcmk_readable_score(other_node->weight),
- pe__node_name(node), pcmk_readable_score(node->weight));
- }
- }
-
- for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- other_node = pe_hash_table_lookup(result, node->details->id);
-
- if (other_node == NULL) {
- pe_node_t *new_node = pe__copy_node(node);
-
- new_node->weight = -INFINITY;
- g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
- }
- }
-}
-
/*!
* \internal
* \brief Create a node hash table from a node list
@@ -159,8 +119,9 @@ pe__node_list2table(const GList *list)
result = pcmk__strkey_table(NULL, free);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- pe_node_t *new_node = pe__copy_node((const pe_node_t *) gIter->data);
+ pcmk_node_t *new_node = NULL;
+ new_node = pe__copy_node((const pcmk_node_t *) gIter->data);
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
return result;
@@ -184,8 +145,8 @@ pe__node_list2table(const GList *list)
gint
pe__cmp_node_name(gconstpointer a, gconstpointer b)
{
- const pe_node_t *node1 = (const pe_node_t *) a;
- const pe_node_t *node2 = (const pe_node_t *) b;
+ const pcmk_node_t *node1 = (const pcmk_node_t *) a;
+ const pcmk_node_t *node2 = (const pcmk_node_t *) b;
if ((node1 == NULL) && (node2 == NULL)) {
return 0;
@@ -207,23 +168,23 @@ pe__cmp_node_name(gconstpointer a, gconstpointer b)
* \internal
* \brief Output node weights to stdout
*
- * \param[in] rsc Use allowed nodes for this resource
- * \param[in] comment Text description to prefix lines with
- * \param[in] nodes If rsc is not specified, use these nodes
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc Use allowed nodes for this resource
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes If rsc is not specified, use these nodes
+ * \param[in,out] scheduler Scheduler data
*/
static void
-pe__output_node_weights(const pe_resource_t *rsc, const char *comment,
- GHashTable *nodes, pe_working_set_t *data_set)
+pe__output_node_weights(const pcmk_resource_t *rsc, const char *comment,
+ GHashTable *nodes, pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
// Sort the nodes so the output is consistent for regression tests
GList *list = g_list_sort(g_hash_table_get_values(nodes),
pe__cmp_node_name);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- const pe_node_t *node = (const pe_node_t *) gIter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) gIter->data;
out->message(out, "node-weight", rsc, comment, node->details->uname,
pcmk_readable_score(node->weight));
@@ -244,11 +205,11 @@ pe__output_node_weights(const pe_resource_t *rsc, const char *comment,
*/
static void
pe__log_node_weights(const char *file, const char *function, int line,
- const pe_resource_t *rsc, const char *comment,
+ const pcmk_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
// Don't waste time if we're not tracing at this point
pcmk__if_tracing({}, return);
@@ -275,23 +236,23 @@ pe__log_node_weights(const char *file, const char *function, int line,
* \internal
* \brief Log or output node weights
*
- * \param[in] file Caller's filename
- * \param[in] function Caller's function name
- * \param[in] line Caller's line number
- * \param[in] to_log Log if true, otherwise output
- * \param[in] rsc If not NULL, use this resource's ID in logs,
- * and show scores recursively for any children
- * \param[in] comment Text description to prefix lines with
- * \param[in] nodes Nodes whose scores should be shown
- * \param[in,out] data_set Cluster working set
+ * \param[in] file Caller's filename
+ * \param[in] function Caller's function name
+ * \param[in] line Caller's line number
+ * \param[in] to_log Log if true, otherwise output
+ * \param[in] rsc If not NULL, use this resource's ID in logs,
+ * and show scores recursively for any children
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes Nodes whose scores should be shown
+ * \param[in,out] scheduler Scheduler data
*/
void
-pe__show_node_weights_as(const char *file, const char *function, int line,
- bool to_log, const pe_resource_t *rsc,
- const char *comment, GHashTable *nodes,
- pe_working_set_t *data_set)
+pe__show_node_scores_as(const char *file, const char *function, int line,
+ bool to_log, const pcmk_resource_t *rsc,
+ const char *comment, GHashTable *nodes,
+ pcmk_scheduler_t *scheduler)
{
- if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
// Don't show allocation scores for orphans
return;
}
@@ -303,16 +264,16 @@ pe__show_node_weights_as(const char *file, const char *function, int line,
if (to_log) {
pe__log_node_weights(file, function, line, rsc, comment, nodes);
} else {
- pe__output_node_weights(rsc, comment, nodes, data_set);
+ pe__output_node_weights(rsc, comment, nodes, scheduler);
}
// If this resource has children, repeat recursively for each
if (rsc && rsc->children) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
- pe__show_node_weights_as(file, function, line, to_log, child,
- comment, child->allowed_nodes, data_set);
+ pe__show_node_scores_as(file, function, line, to_log, child,
+ comment, child->allowed_nodes, scheduler);
}
}
}
@@ -334,8 +295,8 @@ pe__show_node_weights_as(const char *file, const char *function, int line,
gint
pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
{
- const pe_resource_t *resource1 = (const pe_resource_t *)a;
- const pe_resource_t *resource2 = (const pe_resource_t *)b;
+ const pcmk_resource_t *resource1 = (const pcmk_resource_t *)a;
+ const pcmk_resource_t *resource2 = (const pcmk_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
@@ -359,12 +320,13 @@ pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
}
static void
-resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
+resource_node_score(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
const char *tag)
{
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
- if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
+ if ((rsc->exclusive_discover
+ || (node->rsc_discover_mode == pcmk_probe_never))
&& pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
@@ -376,13 +338,13 @@ resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
- match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
+ match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
@@ -395,24 +357,24 @@ resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
}
void
-resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
- const char *tag, pe_working_set_t *data_set)
+resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
+ const char *tag, pcmk_scheduler_t *scheduler)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
- } else if (data_set != NULL) {
- GList *gIter = data_set->nodes;
+ } else if (scheduler != NULL) {
+ GList *gIter = scheduler->nodes;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node_iter = (pe_node_t *) gIter->data;
+ pcmk_node_t *node_iter = (pcmk_node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
- pe_node_t *node_iter = NULL;
+ pcmk_node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
@@ -431,14 +393,14 @@ resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
}
time_t
-get_effective_time(pe_working_set_t * data_set)
+get_effective_time(pcmk_scheduler_t *scheduler)
{
- if(data_set) {
- if (data_set->now == NULL) {
+ if(scheduler) {
+ if (scheduler->now == NULL) {
crm_trace("Recording a new 'now'");
- data_set->now = crm_time_new(NULL);
+ scheduler->now = crm_time_new(NULL);
}
- return crm_time_get_seconds_since_epoch(data_set->now);
+ return crm_time_get_seconds_since_epoch(scheduler->now);
}
crm_trace("Defaulting to 'now'");
@@ -446,9 +408,9 @@ get_effective_time(pe_working_set_t * data_set)
}
gboolean
-get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
+get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role)
{
- enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e local_role = pcmk_role_unknown;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
@@ -459,15 +421,15 @@ get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
}
local_role = text2role(value);
- if (local_role == RSC_ROLE_UNKNOWN) {
+ if (local_role == pcmk_role_unknown) {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' is not valid", rsc->id, value);
return FALSE;
- } else if (local_role > RSC_ROLE_STARTED) {
+ } else if (local_role > pcmk_role_started) {
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)) {
- if (local_role > RSC_ROLE_UNPROMOTED) {
+ pcmk_rsc_promotable)) {
+ if (local_role > pcmk_role_unpromoted) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
@@ -485,13 +447,14 @@ get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
}
gboolean
-order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
+order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
+ uint32_t flags)
{
GList *gIter = NULL;
- pe_action_wrapper_t *wrapper = NULL;
+ pcmk__related_action_t *wrapper = NULL;
GList *list = NULL;
- if (order == pe_order_none) {
+ if (flags == pcmk__ar_none) {
return FALSE;
}
@@ -508,23 +471,23 @@ order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
+ pcmk__related_action_t *after = gIter->data;
- if (after->action == rh_action && (after->type & order)) {
+ if (after->action == rh_action && (after->type & flags)) {
return FALSE;
}
}
- wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper = calloc(1, sizeof(pcmk__related_action_t));
wrapper->action = rh_action;
- wrapper->type = order;
+ wrapper->type = flags;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
- wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper = calloc(1, sizeof(pcmk__related_action_t));
wrapper->action = lh_action;
- wrapper->type = order;
+ wrapper->type = flags;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
@@ -534,7 +497,7 @@ order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering
void
destroy_ticket(gpointer data)
{
- pe_ticket_t *ticket = data;
+ pcmk_ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
@@ -543,23 +506,23 @@ destroy_ticket(gpointer data)
free(ticket);
}
-pe_ticket_t *
-ticket_new(const char *ticket_id, pe_working_set_t * data_set)
+pcmk_ticket_t *
+ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler)
{
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
if (pcmk__str_empty(ticket_id)) {
return NULL;
}
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
- ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
- ticket = calloc(1, sizeof(pe_ticket_t));
+ ticket = calloc(1, sizeof(pcmk_ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
@@ -573,55 +536,57 @@ ticket_new(const char *ticket_id, pe_working_set_t * data_set)
ticket->standby = FALSE;
ticket->state = pcmk__strkey_table(free, free);
- g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
+ g_hash_table_insert(scheduler->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
const char *
-rsc_printable_id(const pe_resource_t *rsc)
+rsc_printable_id(const pcmk_resource_t *rsc)
{
- return pcmk_is_set(rsc->flags, pe_rsc_unique)? rsc->id : ID(rsc->xml);
+ return pcmk_is_set(rsc->flags, pcmk_rsc_unique)? rsc->id : ID(rsc->xml);
}
void
-pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pe__clear_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ pe__clear_resource_flags_recursive((pcmk_resource_t *) gIter->data,
+ flags);
}
}
void
-pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag)
+pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag)
{
- for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *r = (pe_resource_t *) lpc->data;
+ for (GList *lpc = scheduler->resources; lpc != NULL; lpc = lpc->next) {
+ pcmk_resource_t *r = (pcmk_resource_t *) lpc->data;
pe__clear_resource_flags_recursive(r, flag);
}
}
void
-pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pe__set_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ pe__set_resource_flags_recursive((pcmk_resource_t *) gIter->data,
+ flags);
}
}
void
-trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
- pe_action_t *dependency, pe_working_set_t *data_set)
+trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason,
+ pcmk_action_t *dependency, pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* No resources require it */
return;
} else if ((rsc != NULL)
- && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
@@ -629,10 +594,11 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
- pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
+ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, FALSE,
+ reason, FALSE, scheduler);
if(dependency) {
- order_actions(unfence, dependency, pe_order_optional);
+ order_actions(unfence, dependency, pcmk__ar_ordered);
}
} else if(rsc) {
@@ -641,7 +607,7 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
- trigger_unfencing(rsc, node, reason, dependency, data_set);
+ trigger_unfencing(rsc, node, reason, dependency, scheduler);
}
}
}
@@ -650,7 +616,7 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
- pe_tag_t *tag = NULL;
+ pcmk_tag_t *tag = NULL;
GList *gIter = NULL;
gboolean is_existing = FALSE;
@@ -658,7 +624,7 @@ add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
- tag = calloc(1, sizeof(pe_tag_t));
+ tag = calloc(1, sizeof(pcmk_tag_t));
if (tag == NULL) {
return FALSE;
}
@@ -697,7 +663,7 @@ add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
* shutdown of remote nodes by virtue of their connection stopping.
*/
bool
-pe__shutdown_requested(const pe_node_t *node)
+pe__shutdown_requested(const pcmk_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
@@ -706,18 +672,22 @@ pe__shutdown_requested(const pe_node_t *node)
/*!
* \internal
- * \brief Update a data set's "recheck by" time
+ * \brief Update "recheck by" time in scheduler data
*
- * \param[in] recheck Epoch time when recheck should happen
- * \param[in,out] data_set Current working set
+ * \param[in] recheck Epoch time when recheck should happen
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] reason What time is being updated for (for logs)
*/
void
-pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
+pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
+ const char *reason)
{
- if ((recheck > get_effective_time(data_set))
- && ((data_set->recheck_by == 0)
- || (data_set->recheck_by > recheck))) {
- data_set->recheck_by = recheck;
+ if ((recheck > get_effective_time(scheduler))
+ && ((scheduler->recheck_by == 0)
+ || (scheduler->recheck_by > recheck))) {
+ scheduler->recheck_by = recheck;
+ crm_debug("Updated next scheduler recheck to %s for %s",
+ pcmk__trim(ctime(&recheck)), reason);
}
}
@@ -731,28 +701,28 @@ pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
- * \param[in,out] data_set Cluster working set containing \p xml_obj
+ * \param[in,out] scheduler Scheduler data containing \p xml_obj
*/
void
pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
- gboolean overwrite, pe_working_set_t *data_set)
+ gboolean overwrite, pcmk_scheduler_t *scheduler)
{
crm_time_t *next_change = crm_time_new_undefined();
- pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
+ pe_eval_nvpairs(scheduler->input, xml_obj, set_name, rule_data, hash,
always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
- pe__update_recheck_time(recheck, data_set);
+ pe__update_recheck_time(recheck, scheduler, "rule evaluation");
}
crm_time_free(next_change);
}
bool
-pe__resource_is_disabled(const pe_resource_t *rsc)
+pe__resource_is_disabled(const pcmk_resource_t *rsc)
{
const char *target_role = NULL;
@@ -761,10 +731,10 @@ pe__resource_is_disabled(const pe_resource_t *rsc)
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
- if ((target_role_e == RSC_ROLE_STOPPED)
- || ((target_role_e == RSC_ROLE_UNPROMOTED)
+ if ((target_role_e == pcmk_role_stopped)
+ || ((target_role_e == pcmk_role_unpromoted)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable))) {
+ pcmk_rsc_promotable))) {
return true;
}
}
@@ -781,17 +751,17 @@ pe__resource_is_disabled(const pe_resource_t *rsc)
* \return true if \p rsc is running only on \p node, otherwise false
*/
bool
-pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node)
+pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return (rsc != NULL) && pcmk__list_of_1(rsc->running_on)
- && pe__same_node((const pe_node_t *) rsc->running_on->data, node);
+ && pe__same_node((const pcmk_node_t *) rsc->running_on->data, node);
}
bool
-pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
+pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list)
{
for (GList *ele = rsc->running_on; ele; ele = ele->next) {
- pe_node_t *node = (pe_node_t *) ele->data;
+ pcmk_node_t *node = (pcmk_node_t *) ele->data;
if (pcmk__str_in_list(node->details->uname, node_list,
pcmk__str_star_matches|pcmk__str_casei)) {
return true;
@@ -802,7 +772,7 @@ pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
}
bool
-pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node)
+pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node)
{
return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node));
}
@@ -813,7 +783,7 @@ pe__filter_rsc_list(GList *rscs, GList *filter)
GList *retval = NULL;
for (GList *gIter = rscs; gIter; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
/* I think the second condition is safe here for all callers of this
* function. If not, it needs to move into pe__node_text.
@@ -828,7 +798,8 @@ pe__filter_rsc_list(GList *rscs, GList *filter)
}
GList *
-pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
+pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s)
+{
GList *nodes = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
@@ -838,7 +809,7 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
*/
nodes = g_list_prepend(nodes, strdup("*"));
} else {
- pe_node_t *node = pe_find_node(data_set->nodes, s);
+ pcmk_node_t *node = pe_find_node(scheduler->nodes, s);
if (node) {
/* The given string was a valid uname for a node. Return a
@@ -852,7 +823,7 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
* second case, we'll return a NULL pointer and nothing will
* get displayed.
*/
- nodes = pe__unames_with_tag(data_set, s);
+ nodes = pe__unames_with_tag(scheduler, s);
}
}
@@ -860,14 +831,16 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
}
GList *
-pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
+pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s)
+{
GList *resources = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
resources = g_list_prepend(resources, strdup("*"));
} else {
- pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
- pe_find_renamed|pe_find_any);
+ const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
+ pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
+ s, flags);
if (rsc) {
/* A colon in the name we were given means we're being asked to filter
@@ -885,7 +858,7 @@ pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
* typo or something. See pe__build_node_name_list() for more
* detail.
*/
- resources = pe__rscs_with_tag(data_set, s);
+ resources = pe__rscs_with_tag(scheduler, s);
}
}
@@ -893,12 +866,12 @@ pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
}
xmlNode *
-pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name)
+pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name)
{
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
const char *rsc_id = rsc->id;
- if (parent->variant == pe_clone) {
+ if (parent->variant == pcmk_rsc_variant_clone) {
rsc_id = pe__clone_child_id(parent);
}
diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h
deleted file mode 100644
index daa3781..0000000
--- a/lib/pengine/variant.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2004-2022 the Pacemaker project contributors
- *
- * The version control history for this file may have further details.
- *
- * This source code is licensed under the GNU Lesser General Public License
- * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
- */
-
-#ifndef PE_VARIANT__H
-# define PE_VARIANT__H
-
-# if PE__VARIANT_BUNDLE
-
-typedef struct {
- int offset;
- char *ipaddr;
- pe_node_t *node;
- pe_resource_t *ip;
- pe_resource_t *child;
- pe_resource_t *container;
- pe_resource_t *remote;
-} pe__bundle_replica_t;
-
-enum pe__bundle_mount_flags {
- pe__bundle_mount_none = 0x00,
-
- // mount instance-specific subdirectory rather than source directly
- pe__bundle_mount_subdir = 0x01
-};
-
-typedef struct {
- char *source;
- char *target;
- char *options;
- uint32_t flags; // bitmask of pe__bundle_mount_flags
-} pe__bundle_mount_t;
-
-typedef struct {
- char *source;
- char *target;
-} pe__bundle_port_t;
-
-enum pe__container_agent {
- PE__CONTAINER_AGENT_UNKNOWN,
- PE__CONTAINER_AGENT_DOCKER,
- PE__CONTAINER_AGENT_RKT,
- PE__CONTAINER_AGENT_PODMAN,
-};
-
-#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
-#define PE__CONTAINER_AGENT_DOCKER_S "docker"
-#define PE__CONTAINER_AGENT_RKT_S "rkt"
-#define PE__CONTAINER_AGENT_PODMAN_S "podman"
-
-typedef struct pe__bundle_variant_data_s {
- int promoted_max;
- int nreplicas;
- int nreplicas_per_host;
- char *prefix;
- char *image;
- const char *ip_last;
- char *host_network;
- char *host_netmask;
- char *control_port;
- char *container_network;
- char *ip_range_start;
- gboolean add_host;
- gchar *container_host_options;
- char *container_command;
- char *launcher_options;
- const char *attribute_target;
-
- pe_resource_t *child;
-
- GList *replicas; // pe__bundle_replica_t *
- GList *ports; // pe__bundle_port_t *
- GList *mounts; // pe__bundle_mount_t *
-
- enum pe__container_agent agent_type;
-} pe__bundle_variant_data_t;
-
-# define get_bundle_variant_data(data, rsc) \
- CRM_ASSERT(rsc != NULL); \
- CRM_ASSERT(rsc->variant == pe_container); \
- CRM_ASSERT(rsc->variant_opaque != NULL); \
- data = (pe__bundle_variant_data_t *)rsc->variant_opaque; \
-
-# endif
-
-#endif