summaryrefslogtreecommitdiffstats
path: root/lib/pengine/pe_actions.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/pengine/pe_actions.c')
-rw-r--r--lib/pengine/pe_actions.c1303
1 files changed, 744 insertions, 559 deletions
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index ed7f0da..aaa6598 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,29 +14,30 @@
#include <crm/crm.h>
#include <crm/msg_xml.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
+#include <crm/common/xml_internal.h>
#include "pe_status_private.h"
-static void unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms);
+static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms);
static void
-add_singleton(pe_working_set_t *data_set, pe_action_t *action)
+add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action)
{
- if (data_set->singletons == NULL) {
- data_set->singletons = pcmk__strkey_table(NULL, NULL);
+ if (scheduler->singletons == NULL) {
+ scheduler->singletons = pcmk__strkey_table(NULL, NULL);
}
- g_hash_table_insert(data_set->singletons, action->uuid, action);
+ g_hash_table_insert(scheduler->singletons, action->uuid, action);
}
-static pe_action_t *
-lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
+static pcmk_action_t *
+lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid)
{
- if (data_set->singletons == NULL) {
+ if (scheduler->singletons == NULL) {
return NULL;
}
- return g_hash_table_lookup(data_set->singletons, action_uuid);
+ return g_hash_table_lookup(scheduler->singletons, action_uuid);
}
/*!
@@ -46,21 +47,21 @@ lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Existing action that matches arguments (or NULL if none)
*/
-static pe_action_t *
-find_existing_action(const char *key, const pe_resource_t *rsc,
- const pe_node_t *node, const pe_working_set_t *data_set)
+static pcmk_action_t *
+find_existing_action(const char *key, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const pcmk_scheduler_t *scheduler)
{
GList *matches = NULL;
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- /* When rsc is NULL, it would be quicker to check data_set->singletons,
- * but checking all data_set->actions takes the node into account.
+ /* When rsc is NULL, it would be quicker to check scheduler->singletons,
+ * but checking all scheduler->actions takes the node into account.
*/
- matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
+ matches = find_actions(((rsc == NULL)? scheduler->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
@@ -72,79 +73,78 @@ find_existing_action(const char *key, const pe_resource_t *rsc,
return action;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration corresponding to a specific action key
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] key "RSC_ACTION_INTERVAL" of action to find
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
static xmlNode *
-find_rsc_op_entry_helper(const pe_resource_t *rsc, const char *key,
- gboolean include_disabled)
+find_exact_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- guint interval_ms = 0;
- gboolean do_retry = TRUE;
- char *local_key = NULL;
- const char *name = NULL;
- const char *interval_spec = NULL;
- char *match_key = NULL;
- xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- retry:
- for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
- operation = pcmk__xe_next(operation)) {
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
+ bool enabled = false;
+ const char *config_name = NULL;
+ const char *interval_spec = NULL;
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
-
- interval_ms = crm_parse_interval_spec(interval_spec);
- match_key = pcmk__op_key(rsc->id, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
-
- if (rsc->clone_name) {
- match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
- }
-
- if (op != NULL) {
- free(local_key);
- return op;
- }
+ // @TODO This does not consider rules, defaults, etc.
+ if (!include_disabled
+ && (pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
}
- }
-
- free(local_key);
- if (do_retry == FALSE) {
- return NULL;
- }
- do_retry = FALSE;
- if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
- local_key = pcmk__op_key(rsc->id, "migrate", 0);
- key = local_key;
- goto retry;
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) != interval_ms) {
+ continue;
+ }
- } else if (strstr(key, "_notify_")) {
- local_key = pcmk__op_key(rsc->id, "notify", 0);
- key = local_key;
- goto retry;
+ config_name = crm_element_value(operation, "name");
+ if (pcmk__str_eq(action_name, config_name, pcmk__str_none)) {
+ return operation;
+ }
}
-
return NULL;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration of a resource action
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] action_name Action name to search for
+ * \param[in] interval_ms Action interval (in milliseconds) to search for
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
xmlNode *
-find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
+pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- return find_rsc_op_entry_helper(rsc, key, FALSE);
+ xmlNode *action_config = NULL;
+
+ // Try requested action first
+ action_config = find_exact_action_config(rsc, action_name, interval_ms,
+ include_disabled);
+
+ // For migrate_to and migrate_from actions, retry with "migrate"
+ // @TODO This should be either documented or deprecated
+ if ((action_config == NULL)
+ && pcmk__str_any_of(action_name, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ action_config = find_exact_action_config(rsc, "migrate", 0,
+ include_disabled);
+ }
+
+ return action_config;
}
/*!
@@ -156,98 +156,106 @@ find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
-static pe_action_t *
-new_action(char *key, const char *task, pe_resource_t *rsc,
- const pe_node_t *node, bool optional, bool for_graph,
- pe_working_set_t *data_set)
+static pcmk_action_t *
+new_action(char *key, const char *task, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, bool optional, pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = calloc(1, sizeof(pe_action_t));
+ pcmk_action_t *action = calloc(1, sizeof(pcmk_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
- action->extra = pcmk__strkey_table(free, free);
- action->meta = pcmk__strkey_table(free, free);
if (node) {
action->node = pe__copy_node(node);
}
- if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
- pe__set_action_flags(action, pe_action_dc);
+ pe__set_action_flags(action, pcmk_action_on_dc);
}
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
if (optional) {
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
} else {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
- if (rsc != NULL) {
+ if (rsc == NULL) {
+ action->meta = pcmk__strkey_table(free, free);
+ } else {
guint interval_ms = 0;
- action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
- unpack_operation(action, action->op_entry, rsc->container, data_set,
- interval_ms);
+ action->op_entry = pcmk__find_action_config(rsc, task, interval_ms,
+ true);
+
+ /* If the given key is for one of the many notification pseudo-actions
+ * (pre_notify_promote, etc.), the actual action name is "notify"
+ */
+ if ((action->op_entry == NULL) && (strstr(key, "_notify_") != NULL)) {
+ action->op_entry = find_exact_action_config(rsc, PCMK_ACTION_NOTIFY,
+ 0, true);
+ }
+
+ unpack_operation(action, action->op_entry, interval_ms);
}
- if (for_graph) {
- pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
- (optional? "optional" : "required"),
- data_set->action_id, key, task,
- ((rsc == NULL)? "no resource" : rsc->id),
- pe__node_name(node));
- action->id = data_set->action_id++;
+ pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
+ (optional? "optional" : "required"),
+ scheduler->action_id, key, task,
+ ((rsc == NULL)? "no resource" : rsc->id),
+ pe__node_name(node));
+ action->id = scheduler->action_id++;
- data_set->actions = g_list_prepend(data_set->actions, action);
- if (rsc == NULL) {
- add_singleton(data_set, action);
- } else {
- rsc->actions = g_list_prepend(rsc->actions, action);
- }
+ scheduler->actions = g_list_prepend(scheduler->actions, action);
+ if (rsc == NULL) {
+ add_singleton(scheduler, action);
+ } else {
+ rsc->actions = g_list_prepend(rsc->actions, action);
}
return action;
}
/*!
* \internal
- * \brief Evaluate node attribute values for an action
+ * \brief Unpack a resource's action-specific instance parameters
*
- * \param[in,out] action Action to unpack attributes for
- * \param[in,out] data_set Cluster working set
+ * \param[in] action_xml XML of action's configuration in CIB (if any)
+ * \param[in,out] node_attrs Table of node attributes (for rule evaluation)
+ * \param[in,out] scheduler Cluster working set (for rule evaluation)
+ *
+ * \return Newly allocated hash table of action-specific instance parameters
*/
-static void
-unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
+GHashTable *
+pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
+ GHashTable *node_attrs,
+ pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
- && (action->op_entry != NULL)) {
-
- pe_rule_eval_data_t rule_data = {
- .node_hash = action->node->details->attrs,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
- .match_data = NULL,
- .rsc_data = NULL,
- .op_data = NULL
- };
-
- pe__set_action_flags(action, pe_action_have_node_attrs);
- pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
- &rule_data, action->extra, NULL,
- FALSE, data_set);
- }
+ GHashTable *params = pcmk__strkey_table(free, free);
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_attrs,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ pe__unpack_dataset_nvpairs(action_xml, XML_TAG_ATTR_SETS,
+ &rule_data, params, NULL,
+ FALSE, scheduler);
+ return params;
}
/*!
@@ -258,46 +266,46 @@ unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
* \param[in] optional Requested optional status
*/
static void
-update_action_optional(pe_action_t *action, gboolean optional)
+update_action_optional(pcmk_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
- && !pcmk_is_set(action->flags, pe_action_pseudo)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
+ && !pcmk_is_set(action->flags, pcmk_action_pseudo)
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, pe__node_name(action->node),
action->rsc->id);
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
}
static enum pe_quorum_policy
-effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
+effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
- enum pe_quorum_policy policy = data_set->no_quorum_policy;
+ enum pe_quorum_policy policy = scheduler->no_quorum_policy;
- if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- policy = no_quorum_ignore;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
+ policy = pcmk_no_quorum_ignore;
- } else if (data_set->no_quorum_policy == no_quorum_demote) {
+ } else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) {
switch (rsc->role) {
- case RSC_ROLE_PROMOTED:
- case RSC_ROLE_UNPROMOTED:
- if (rsc->next_role > RSC_ROLE_UNPROMOTED) {
- pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED,
+ case pcmk_role_promoted:
+ case pcmk_role_unpromoted:
+ if (rsc->next_role > pcmk_role_unpromoted) {
+ pe__set_next_role(rsc, pcmk_role_unpromoted,
"no-quorum-policy=demote");
}
- policy = no_quorum_ignore;
+ policy = pcmk_no_quorum_ignore;
break;
default:
- policy = no_quorum_stop;
+ policy = pcmk_no_quorum_stop;
break;
}
}
@@ -309,50 +317,47 @@ effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
* \brief Update a resource action's runnable flag
*
* \param[in,out] action Action to update
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
-update_resource_action_runnable(pe_action_t *action, bool for_graph,
- pe_working_set_t *data_set)
+update_resource_action_runnable(pcmk_action_t *action,
+ pcmk_scheduler_t *scheduler)
{
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
- "%s on %s is unrunnable (node is offline)",
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING, "%s on %s is unrunnable (node is offline)",
action->uuid, pe__node_name(action->node));
- if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
- && for_graph
- && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
- pe_fence_node(data_set, action->node, "stop is unrunnable", false);
+ pe_fence_node(scheduler, action->node, "stop is unrunnable", false);
}
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& action->node->details->pending) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING,
"Action %s on %s is unrunnable (node is pending)",
action->uuid, pe__node_name(action->node));
- } else if (action->needs == rsc_req_nothing) {
+ } else if (action->needs == pcmk_requires_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
- && !pe_can_fence(data_set, action->node)) {
+ && !pe_can_fence(scheduler, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
@@ -361,37 +366,37 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, pe__node_name(action->node));
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
} else {
- switch (effective_quorum_policy(action->rsc, data_set)) {
- case no_quorum_stop:
+ switch (effective_quorum_policy(action->rsc, scheduler)) {
+ case pcmk_no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
- case no_quorum_freeze:
+ case pcmk_no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
break;
}
}
@@ -405,19 +410,20 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
* \param[in] action New action
*/
static void
-update_resource_flags_for_action(pe_resource_t *rsc, const pe_action_t *action)
+update_resource_flags_for_action(pcmk_resource_t *rsc,
+ const pcmk_action_t *action)
{
- /* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used
- * within Pacemaker, and should be deprecated and eventually removed
+ /* @COMPAT pcmk_rsc_starting and pcmk_rsc_stopping are deprecated and unused
+ * within Pacemaker, and will eventually be removed
*/
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_stopping);
+ if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_stopping);
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
- if (pcmk_is_set(action->flags, pe_action_runnable)) {
- pe__set_resource_flags(rsc, pe_rsc_starting);
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_starting);
} else {
- pe__clear_resource_flags(rsc, pe_rsc_starting);
+ pe__clear_resource_flags(rsc, pcmk_rsc_starting);
}
}
}
@@ -428,80 +434,121 @@ valid_stop_on_fail(const char *value)
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
-static const char *
-unpack_operation_on_fail(pe_action_t * action)
+/*!
+ * \internal
+ * \brief Validate (and possibly reset) resource action's on_fail meta-attribute
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] action_config Action configuration XML from CIB (if any)
+ * \param[in,out] meta Table of action meta-attributes
+ */
+static void
+validate_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ const xmlNode *action_config, GHashTable *meta)
{
const char *name = NULL;
const char *role = NULL;
- const char *on_fail = NULL;
const char *interval_spec = NULL;
- const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ const char *value = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+ char *key = NULL;
+ char *new_value = NULL;
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ // Stop actions can only use certain on-fail values
+ if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
- "allowed for stop", action->rsc->id, value);
- return NULL;
-
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
- // demote on_fail defaults to monitor value for promoted role if present
- xmlNode *operation = NULL;
+ "allowed for stop", rsc->id, value);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
+ }
- CRM_CHECK(action->rsc != NULL, return NULL);
+ /* Demote actions default on-fail to the on-fail value for the first
+ * recurring monitor for the promoted role (if any).
+ */
+ if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
+ && (value == NULL)) {
- for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
- (operation != NULL) && (value == NULL);
- operation = pcmk__xe_next(operation)) {
+ /* @TODO This does not consider promote options set in a meta-attribute
+ * block (which may have rules that need to be evaluated) rather than
+ * XML properties.
+ */
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
+ const char *promote_on_fail = NULL;
- if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ /* We only care about explicit on-fail (if promote uses default, so
+ * can demote)
+ */
+ promote_on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
+ if (promote_on_fail == NULL) {
continue;
}
+
+ // We only care about recurring monitors for the promoted role
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
- on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!on_fail) {
- continue;
- } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) {
+ if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
continue;
- } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S,
- NULL)) {
+ }
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
- } else if (crm_parse_interval_spec(interval_spec) == 0) {
+ }
+
+ // We only care about enabled monitors
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
continue;
- } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
+ }
+
+ // Demote actions can't default to on-fail="demote"
+ if (pcmk__str_eq(promote_on_fail, "demote", pcmk__str_casei)) {
continue;
}
- value = on_fail;
+ // Use value from first applicable promote action found
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup(promote_on_fail);
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
}
- } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
- value = "ignore";
+ return;
+ }
- } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- name = crm_element_value(action->op_entry, "name");
- role = crm_element_value(action->op_entry, "role");
- interval_spec = crm_element_value(action->op_entry,
+ if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
+ && !pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup("ignore");
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
+ return;
+ }
+
+ // on-fail="demote" is allowed only for certain actions
+ if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ name = crm_element_value(action_config, "name");
+ role = crm_element_value(action_config, "role");
+ interval_spec = crm_element_value(action_config,
XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
- && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S, NULL)
+ if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ && (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
- "allowed for it", action->rsc->id, name);
- return NULL;
+ "allowed for it", rsc->id, name);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
}
}
-
- return value;
}
static int
@@ -510,7 +557,7 @@ unpack_timeout(const char *value)
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
return timeout_ms;
}
@@ -579,346 +626,475 @@ unpack_start_delay(const char *value, GHashTable *meta)
return start_delay;
}
+/*!
+ * \internal
+ * \brief Find a resource's most frequent recurring monitor
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Operation XML configured for most frequent recurring monitor for
+ * \p rsc (if any)
+ */
static xmlNode *
-find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
+most_frequent_monitor(const pcmk_resource_t *rsc)
{
- guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
- const char *name = NULL;
- const char *interval_spec = NULL;
xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- for (operation = pcmk__xe_first_child(rsc->ops_xml);
- operation != NULL;
- operation = pcmk__xe_next(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
-
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
+ bool enabled = false;
+ guint interval_ms = 0;
+ const char *interval_spec = crm_element_value(operation,
+ XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
- continue;
- }
+ // We only care about enabled recurring monitors
+ if (!pcmk__str_eq(crm_element_value(operation, "name"),
+ PCMK_ACTION_MONITOR, pcmk__str_none)) {
+ continue;
+ }
+ interval_ms = crm_parse_interval_spec(interval_spec);
+ if (interval_ms == 0) {
+ continue;
+ }
- interval_ms = crm_parse_interval_spec(interval_spec);
+ // @TODO This does not account for rules, defaults, etc.
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
+ }
- if (interval_ms && (interval_ms < min_interval_ms)) {
- min_interval_ms = interval_ms;
- op = operation;
- }
+ if (interval_ms < min_interval_ms) {
+ min_interval_ms = interval_ms;
+ op = operation;
}
}
-
return op;
}
/*!
- * \brief Unpack operation XML into an action structure
+ * \internal
+ * \brief Unpack action meta-attributes
*
- * Unpack an operation's meta-attributes (normalizing the interval, timeout,
- * and start delay values as integer milliseconds), requirements, and
- * failure policy.
+ * \param[in,out] rsc Resource that action is for
+ * \param[in] node Node that action is on
+ * \param[in] action_name Action name
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] action_config Action XML configuration from CIB (if any)
*
- * \param[in,out] action Action to unpack into
- * \param[in] xml_obj Operation XML (or NULL if all defaults)
- * \param[in] container Resource that contains affected resource, if any
- * \param[in,out] data_set Cluster state
- * \param[in] interval_ms How frequently to perform the operation
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds) from its CIB XML
+ * configuration (including defaults).
+ *
+ * \return Newly allocated hash table with normalized action meta-attributes
*/
-static void
-unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms)
+GHashTable *
+pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *action_name, guint interval_ms,
+ const xmlNode *action_config)
{
- int timeout_ms = 0;
- const char *value = NULL;
- bool is_probe = false;
+ GHashTable *meta = NULL;
+ char *name = NULL;
+ char *value = NULL;
+ const char *timeout_spec = NULL;
+ const char *str = NULL;
pe_rsc_eval_data_t rsc_rule_data = {
- .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
- .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
- .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
+ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE),
};
pe_op_eval_data_t op_rule_data = {
- .op_name = action->task,
- .interval = interval_ms
+ .op_name = action_name,
+ .interval = interval_ms,
};
pe_rule_eval_data_t rule_data = {
- .node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .node_hash = (node == NULL)? NULL : node->details->attrs,
+ .role = pcmk_role_unknown,
+ .now = rsc->cluster->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
- .op_data = &op_rule_data
+ .op_data = &op_rule_data,
};
- CRM_CHECK(action && action->rsc, return);
-
- is_probe = pcmk_is_probe(action->task, interval_ms);
+ meta = pcmk__strkey_table(free, free);
// Cluster-wide <op_defaults> <meta_attributes>
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, FALSE, data_set);
-
- // Determine probe default timeout differently
- if (is_probe) {
- xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
-
- if (min_interval_mon) {
- value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
- if (value) {
- crm_trace("\t%s: Setting default timeout to minimum-interval "
- "monitor's timeout '%s'", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ pe__unpack_dataset_nvpairs(rsc->cluster->op_defaults, XML_TAG_META_SETS,
+ &rule_data, meta, NULL, FALSE, rsc->cluster);
+
+ // Derive default timeout for probes from recurring monitor timeouts
+ if (pcmk_is_probe(action_name, interval_ms)) {
+ xmlNode *min_interval_mon = most_frequent_monitor(rsc);
+
+ if (min_interval_mon != NULL) {
+ /* @TODO This does not consider timeouts set in meta_attributes
+ * blocks (which may also have rules that need to be evaluated).
+ */
+ timeout_spec = crm_element_value(min_interval_mon,
+ XML_ATTR_TIMEOUT);
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting default timeout for %s probe to "
+ "most frequent monitor's timeout '%s'",
+ rsc->id, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
}
- if (xml_obj) {
- xmlAttrPtr xIter = NULL;
-
+ if (action_config != NULL) {
// <op> <meta_attributes> take precedence over defaults
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, TRUE, data_set);
+ pe__unpack_dataset_nvpairs(action_config, XML_TAG_META_SETS, &rule_data,
+ meta, NULL, TRUE, rsc->cluster);
/* Anything set as an <op> XML property has highest precedence.
* This ensures we use the name and interval from the <op> tag.
+ * (See below for the only exception, fence device start/probe timeout.)
*/
- for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
- const char *prop_name = (const char *)xIter->name;
- const char *prop_value = crm_element_value(xml_obj, prop_name);
+ for (xmlAttrPtr attr = action_config->properties;
+ attr != NULL; attr = attr->next) {
+ name = strdup((const char *) attr->name);
+ value = strdup(pcmk__xml_attr_value(attr));
- g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
- g_hash_table_remove(action->meta, "id");
+ g_hash_table_remove(meta, XML_ATTR_ID);
// Normalize interval to milliseconds
if (interval_ms > 0) {
- g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
- crm_strdup_printf("%u", interval_ms));
+ name = strdup(XML_LRM_ATTR_INTERVAL);
+ CRM_ASSERT(name != NULL);
+ value = crm_strdup_printf("%u", interval_ms);
+ g_hash_table_insert(meta, name, value);
} else {
- g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
- }
-
- /*
- * Timeout order of precedence:
- * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
- * and task is start or a probe; pcmk_monitor_timeout works
- * by default for a recurring monitor)
- * 2. explicit op timeout on the primitive
- * 3. default op timeout
- * a. if probe, then min-interval monitor's timeout
- * b. else, in XML_CIB_TAG_OPCONFIG
- * 4. CRM_DEFAULT_OP_TIMEOUT_S
- *
- * #1 overrides general rule of <op> XML property having highest
- * precedence.
+ g_hash_table_remove(meta, XML_LRM_ATTR_INTERVAL);
+ }
+
+ /* Timeout order of precedence (highest to lowest):
+ * 1. pcmk_monitor_timeout resource parameter (only for starts and probes
+ * when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
+ * monitors via the executor instead)
+ * 2. timeout configured in <op> (with <op timeout> taking precedence over
+ * <op> <meta_attributes>)
+ * 3. timeout configured in <op_defaults> <meta_attributes>
+ * 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
*/
+
+ // Check for pcmk_monitor_timeout
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
- && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
- || is_probe)) {
-
- GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
+ && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
+ || pcmk_is_probe(action_name, interval_ms))) {
+
+ GHashTable *params = pe_rsc_params(rsc, node, rsc->cluster);
+
+ timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting timeout for %s %s to "
+ "pcmk_monitor_timeout (%s)",
+ rsc->id, action_name, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
+ }
+ }
- value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ // Normalize timeout to positive milliseconds
+ name = strdup(XML_ATTR_TIMEOUT);
+ CRM_ASSERT(name != NULL);
+ timeout_spec = g_hash_table_lookup(meta, XML_ATTR_TIMEOUT);
+ g_hash_table_insert(meta, name, pcmk__itoa(unpack_timeout(timeout_spec)));
+
+ // Ensure on-fail has a valid value
+ validate_on_fail(rsc, action_name, action_config, meta);
+
+ // Normalize start-delay
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_START_DELAY);
+ if (str != NULL) {
+ unpack_start_delay(str, meta);
+ } else {
+ long long start_delay = 0;
- if (value) {
- crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
- "overriding default", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_ORIGIN);
+ if (unpack_interval_origin(str, action_config, interval_ms,
+ rsc->cluster->now, &start_delay)) {
+ name = strdup(XML_OP_ATTR_START_DELAY);
+ CRM_ASSERT(name != NULL);
+ g_hash_table_insert(meta, name,
+ crm_strdup_printf("%lld", start_delay));
}
}
+ return meta;
+}
- // Normalize timeout to positive milliseconds
- value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
- timeout_ms = unpack_timeout(value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- pcmk__itoa(timeout_ms));
+/*!
+ * \internal
+ * \brief Determine an action's quorum and fencing dependency
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action being unpacked
+ *
+ * \return Quorum and fencing dependency appropriate to action
+ */
+enum rsc_start_requirement
+pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name)
+{
+ const char *value = NULL;
+ enum rsc_start_requirement requires = pcmk_requires_nothing;
+
+ CRM_CHECK((rsc != NULL) && (action_name != NULL), return requires);
- if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
- action->needs = rsc_req_nothing;
+ if (!pcmk__strcase_any_of(action_name, PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE, NULL)) {
value = "nothing (not start or promote)";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
- action->needs = rsc_req_stonith;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
+ requires = pcmk_requires_fencing;
value = "fencing";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
- action->needs = rsc_req_quorum;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_quorum)) {
+ requires = pcmk_requires_quorum;
value = "quorum";
} else {
- action->needs = rsc_req_nothing;
value = "nothing";
}
- pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
+ pe_rsc_trace(rsc, "%s of %s requires %s", action_name, rsc->id, value);
+ return requires;
+}
- value = unpack_operation_on_fail(action);
+/*!
+ * \internal
+ * \brief Parse action failure response from a user-provided string
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] value User-provided configuration value for on-fail
+ *
+ * \return Action failure response parsed from \p text
+ */
+enum action_fail_response
+pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, const char *value)
+{
+ const char *desc = NULL;
+ bool needs_remote_reset = false;
+ enum action_fail_response on_fail = pcmk_on_fail_ignore;
if (value == NULL) {
+ // Use default
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
- action->on_fail = action_fail_block;
- g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
- value = "block"; // The above could destroy the original string
+ on_fail = pcmk_on_fail_block;
+ desc = "block";
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
- action->on_fail = action_fail_fence;
- value = "node fencing";
-
- if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "node fencing";
+ } else {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
- "operation '%s' to 'stop' because 'fence' is not "
- "valid when fencing is disabled", action->uuid);
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ "%s of %s to 'stop' because 'fence' is not "
+ "valid when fencing is disabled",
+ action_name, rsc->id);
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
- action->on_fail = action_fail_standby;
- value = "node standby";
+ on_fail = pcmk_on_fail_standby_node;
+ desc = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
NULL)) {
- action->on_fail = action_fail_ignore;
- value = "ignore";
+ desc = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
- action->on_fail = action_fail_migrate;
- value = "force migration";
+ on_fail = pcmk_on_fail_ban;
+ desc = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate)";
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
- if (container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate)";
-
+ if (rsc->container == NULL) {
+ pe_rsc_debug(rsc,
+ "Using default " XML_OP_ATTR_ON_FAIL
+ " for %s of %s because it does not have a container",
+ action_name, rsc->id);
} else {
- value = NULL;
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate)";
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- action->on_fail = action_fail_demote;
- value = "demote instance";
+ on_fail = pcmk_on_fail_demote;
+ desc = "demote instance";
} else {
- pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
- value = NULL;
+ pcmk__config_err("Using default '" XML_OP_ATTR_ON_FAIL "' for "
+ "%s of %s because '%s' is not valid",
+ action_name, rsc->id, value);
}
- /* defaults */
- if (value == NULL && container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate) (default)";
+ /* Remote node connections are handled specially. Failures that result
+ * in dropping an active connection must result in fencing. The only
+ * failures that don't are probes and starts. The user can explicitly set
+ * on-fail="fence" to fence after start failures.
+ */
+ if (pe__resource_is_remote_conn(rsc)
+ && !pcmk_is_probe(action_name, interval_ms)
+ && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) {
+ needs_remote_reset = true;
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ desc = NULL; // Force default for unmanaged connections
+ }
+ }
- /* For remote nodes, ensure that any failure that results in dropping an
- * active connection to the node results in fencing of the node.
- *
- * There are only two action failures that don't result in fencing.
- * 1. probes - probe failures are expected.
- * 2. start - a start failure indicates that an active connection does not already
- * exist. The user can set op on-fail=fence if they really want to fence start
- * failures. */
- } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
- && pe__resource_is_remote_conn(action->rsc, data_set)
- && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
- && (interval_ms == 0))
- && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
-
- if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop unmanaged remote node (enforcing default)";
+ if (desc != NULL) {
+ // Explicit value used, default not needed
- } else {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- value = "fence remote node (default)";
- } else {
- value = "recover remote node connection (default)";
- }
+ } else if (rsc->container != NULL) {
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate) (default)";
- if (action->rsc->remote_reconnect_ms) {
- action->fail_role = RSC_ROLE_STOPPED;
+ } else if (needs_remote_reset) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
+ desc = "fence remote node (default)";
+ } else {
+ desc = "recover remote node connection (default)";
}
- action->on_fail = action_fail_reset_remote;
+ on_fail = pcmk_on_fail_reset_remote;
+ } else {
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop unmanaged remote node (enforcing default)";
}
- } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- action->on_fail = action_fail_fence;
- value = "resource fence (default)";
-
+ } else if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "resource fence (default)";
} else {
- action->on_fail = action_fail_block;
- value = "resource block (default)";
+ on_fail = pcmk_on_fail_block;
+ desc = "resource block (default)";
}
- } else if (value == NULL) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate) (default)";
+ } else {
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate) (default)";
}
- pe_rsc_trace(action->rsc, "%s failure handling: %s",
- action->uuid, value);
+ pe_rsc_trace(rsc, "Failure handling for %s-interval %s of %s: %s",
+ pcmk__readable_interval(interval_ms), action_name,
+ rsc->id, desc);
+ return on_fail;
+}
- value = NULL;
- if (xml_obj != NULL) {
- value = g_hash_table_lookup(action->meta, "role_after_failure");
- if (value) {
- pe_warn_once(pe_wo_role_after,
- "Support for role_after_failure is deprecated and will be removed in a future release");
- }
+/*!
+ * \internal
+ * \brief Determine a resource's role after failure of an action
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] on_fail Failure handling for action
+ * \param[in] meta Unpacked action meta-attributes
+ *
+ * \return Resource role that results from failure of action
+ */
+enum rsc_role_e
+pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name,
+ enum action_fail_response on_fail, GHashTable *meta)
+{
+ const char *value = NULL;
+ enum rsc_role_e role = pcmk_role_unknown;
+
+ // Set default for role after failure specially in certain circumstances
+ switch (on_fail) {
+ case pcmk_on_fail_stop:
+ role = pcmk_role_stopped;
+ break;
+
+ case pcmk_on_fail_reset_remote:
+ if (rsc->remote_reconnect_ms != 0) {
+ role = pcmk_role_stopped;
+ }
+ break;
+
+ default:
+ break;
}
- if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
- action->fail_role = text2role(value);
+
+ // @COMPAT Check for explicitly configured role (deprecated)
+ value = g_hash_table_lookup(meta, "role_after_failure");
+ if (value != NULL) {
+ pe_warn_once(pcmk__wo_role_after,
+ "Support for role_after_failure is deprecated "
+ "and will be removed in a future release");
+ if (role == pcmk_role_unknown) {
+ role = text2role(value);
+ }
}
- /* defaults */
- if (action->fail_role == RSC_ROLE_UNKNOWN) {
- if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
- action->fail_role = RSC_ROLE_UNPROMOTED;
+
+ if (role == pcmk_role_unknown) {
+ // Use default
+ if (pcmk__str_eq(action_name, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
+ role = pcmk_role_unpromoted;
} else {
- action->fail_role = RSC_ROLE_STARTED;
+ role = pcmk_role_started;
}
}
- pe_rsc_trace(action->rsc, "%s failure results in: %s",
- action->uuid, role2text(action->fail_role));
+ pe_rsc_trace(rsc, "Role after %s %s failure is: %s",
+ rsc->id, action_name, role2text(role));
+ return role;
+}
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
- if (value) {
- unpack_start_delay(value, action->meta);
- } else {
- long long start_delay = 0;
+/*!
+ * \internal
+ * \brief Unpack action configuration
+ *
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds), requirements, and
+ * failure policy from its CIB XML configuration (including defaults).
+ *
+ * \param[in,out] action Resource action to unpack into
+ * \param[in] xml_obj Action configuration XML (NULL for defaults only)
+ * \param[in] interval_ms How frequently to perform the operation
+ */
+static void
+unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms)
+{
+ const char *value = NULL;
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
- if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
- &start_delay)) {
- g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
- crm_strdup_printf("%lld", start_delay));
- }
- }
+ action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
+ action->task, interval_ms, xml_obj);
+ action->needs = pcmk__action_requires(action->rsc, action->task);
+
+ value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ action->on_fail = pcmk__parse_on_fail(action->rsc, action->task,
+ interval_ms, value);
+
+ action->fail_role = pcmk__role_after_failure(action->rsc, action->task,
+ action->on_fail, action->meta);
}
/*!
@@ -929,31 +1105,26 @@ unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] save_action Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
- * \return Action object corresponding to arguments
- * \note This function takes ownership of (and might free) \p key. If
- * \p save_action is true, \p data_set will own the returned action,
- * otherwise it is the caller's responsibility to free the return value
- * with pe_free_action().
+ * \return Action object corresponding to arguments (guaranteed not to be
+ * \c NULL)
+ * \note This function takes ownership of (and might free) \p key, and
+ * \p scheduler takes ownership of the returned action (the caller should
+ * not free it).
*/
-pe_action_t *
-custom_action(pe_resource_t *rsc, char *key, const char *task,
- const pe_node_t *on_node, gboolean optional, gboolean save_action,
- pe_working_set_t *data_set)
+pcmk_action_t *
+custom_action(pcmk_resource_t *rsc, char *key, const char *task,
+ const pcmk_node_t *on_node, gboolean optional,
+ pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
-
- if (save_action) {
- action = find_existing_action(key, rsc, on_node, data_set);
- }
+ CRM_ASSERT((key != NULL) && (task != NULL) && (scheduler != NULL));
+ action = find_existing_action(key, rsc, on_node, scheduler);
if (action == NULL) {
- action = new_action(key, task, rsc, on_node, optional, save_action,
- data_set);
+ action = new_action(key, task, rsc, on_node, optional, scheduler);
} else {
free(key);
}
@@ -961,28 +1132,38 @@ custom_action(pe_resource_t *rsc, char *key, const char *task,
update_action_optional(action, optional);
if (rsc != NULL) {
- if (action->node != NULL) {
- unpack_action_node_attributes(action, data_set);
- }
+ if ((action->node != NULL) && (action->op_entry != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_attrs_evaluated)) {
- update_resource_action_runnable(action, save_action, data_set);
+ GHashTable *attrs = action->node->details->attrs;
- if (save_action) {
- update_resource_flags_for_action(rsc, action);
+ if (action->extra != NULL) {
+ g_hash_table_destroy(action->extra);
+ }
+ action->extra = pcmk__unpack_action_rsc_params(action->op_entry,
+ attrs, scheduler);
+ pe__set_action_flags(action, pcmk_action_attrs_evaluated);
}
+
+ update_resource_action_runnable(action, scheduler);
+ update_resource_flags_for_action(rsc, action);
+ }
+
+ if (action->extra == NULL) {
+ action->extra = pcmk__strkey_table(free, free);
}
return action;
}
-pe_action_t *
-get_pseudo_op(const char *name, pe_working_set_t * data_set)
+pcmk_action_t *
+get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler)
{
- pe_action_t *op = lookup_singleton(data_set, name);
+ pcmk_action_t *op = lookup_singleton(scheduler, name);
if (op == NULL) {
- op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
- pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
+ op = custom_action(NULL, strdup(name), name, NULL, TRUE, scheduler);
+ pe__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
}
return op;
}
@@ -991,15 +1172,15 @@ static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *candidate = gIter->data;
+ pcmk_resource_t *candidate = gIter->data;
if (candidate->children != NULL) {
matches = find_unfencing_devices(candidate->children, matches);
- } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
+ } else if (!pcmk_is_set(candidate->flags, pcmk_rsc_fence_device)) {
continue;
- } else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) {
+ } else if (pcmk_is_set(candidate->flags, pcmk_rsc_needs_unfencing)) {
matches = g_list_prepend(matches, candidate);
} else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
@@ -1013,8 +1194,8 @@ find_unfencing_devices(GList *candidates, GList *matches)
}
static int
-node_priority_fencing_delay(const pe_node_t *node,
- const pe_working_set_t *data_set)
+node_priority_fencing_delay(const pcmk_node_t *node,
+ const pcmk_scheduler_t *scheduler)
{
int member_count = 0;
int online_count = 0;
@@ -1023,13 +1204,13 @@ node_priority_fencing_delay(const pe_node_t *node,
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
- if (data_set->priority_fencing_delay <= 0) {
+ if (scheduler->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
- if (node->details->type != node_member) {
+ if (node->details->type != pcmk_node_variant_cluster) {
return 0;
}
@@ -1038,10 +1219,10 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *n = gIter->data;
+ for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *n = gIter->data;
- if (n->details->type != node_member) {
+ if (n->details->type != pcmk_node_variant_cluster) {
continue;
}
@@ -1077,54 +1258,58 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- return data_set->priority_fencing_delay;
+ return scheduler->priority_fencing_delay;
}
-pe_action_t *
-pe_fence_op(pe_node_t *node, const char *op, bool optional,
- const char *reason, bool priority_delay, pe_working_set_t *data_set)
+pcmk_action_t *
+pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
+ const char *reason, bool priority_delay,
+ pcmk_scheduler_t *scheduler)
{
char *op_key = NULL;
- pe_action_t *stonith_op = NULL;
+ pcmk_action_t *stonith_op = NULL;
if(op == NULL) {
- op = data_set->stonith_action;
+ op = scheduler->stonith_action;
}
- op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
+ op_key = crm_strdup_printf("%s-%s-%s",
+ PCMK_ACTION_STONITH, node->details->uname, op);
- stonith_op = lookup_singleton(data_set, op_key);
+ stonith_op = lookup_singleton(scheduler, op_key);
if(stonith_op == NULL) {
- stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
+ stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
+ TRUE, scheduler);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
- if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* Extra work to detect device changes
*/
GString *digests_all = g_string_sized_new(1024);
GString *digests_secure = g_string_sized_new(1024);
- GList *matches = find_unfencing_devices(data_set->resources, NULL);
+ GList *matches = find_unfencing_devices(scheduler->resources, NULL);
char *key = NULL;
char *value = NULL;
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *match = gIter->data;
+ pcmk_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
- data = pe__compare_fencing_digest(match, agent, node, data_set);
- if(data->rc == RSC_DIGEST_ALL) {
+ data = pe__compare_fencing_digest(match, agent, node,
+ scheduler);
+ if (data->rc == pcmk__digest_mismatch) {
optional = FALSE;
crm_notice("Unfencing node %s because the definition of "
"%s changed", pe__node_name(node), match->id);
- if (!pcmk__is_daemon && data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ if (!pcmk__is_daemon && scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
out->info(out,
"notice: Unfencing node %s because the "
@@ -1157,7 +1342,7 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
free(op_key);
}
- if (data_set->priority_fencing_delay > 0
+ if (scheduler->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
@@ -1174,15 +1359,16 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
- char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
+ char *delay_s = pcmk__itoa(node_priority_fencing_delay(node,
+ scheduler));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
- if(optional == FALSE && pe_can_fence(data_set, node)) {
- pe__clear_action_flags(stonith_op, pe_action_optional);
+ if(optional == FALSE && pe_can_fence(scheduler, node)) {
+ pe__clear_action_flags(stonith_op, pcmk_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
@@ -1193,13 +1379,13 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
}
void
-pe_free_action(pe_action_t * action)
+pe_free_action(pcmk_action_t *action)
{
if (action == NULL) {
return;
}
- g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
- g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
+ g_list_free_full(action->actions_before, free);
+ g_list_free_full(action->actions_after, free);
if (action->extra) {
g_hash_table_destroy(action->extra);
}
@@ -1215,7 +1401,8 @@ pe_free_action(pe_action_t * action)
}
int
-pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
+pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
@@ -1224,8 +1411,8 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -1240,10 +1427,11 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
}
- if (timeout_spec == NULL && data_set->op_defaults) {
+ if (timeout_spec == NULL && scheduler->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
- &rule_data, action_meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->op_defaults, XML_TAG_META_SETS,
+ &rule_data, action_meta, NULL, FALSE,
+ scheduler);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
@@ -1252,7 +1440,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
if (action_meta != NULL) {
@@ -1262,16 +1450,16 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
enum action_tasks
-get_complex_task(const pe_resource_t *rsc, const char *name)
+get_complex_task(const pcmk_resource_t *rsc, const char *name)
{
enum action_tasks task = text2task(name);
- if ((rsc != NULL) && (rsc->variant == pe_native)) {
+ if ((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)) {
switch (task) {
- case stopped_rsc:
- case started_rsc:
- case action_demoted:
- case action_promoted:
+ case pcmk_action_stopped:
+ case pcmk_action_started:
+ case pcmk_action_demoted:
+ case pcmk_action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s",
name, rsc->id);
--task;
@@ -1294,14 +1482,14 @@ get_complex_task(const pe_resource_t *rsc, const char *name)
*
* \return First action in list that matches criteria, or NULL if none
*/
-pe_action_t *
+pcmk_action_t *
find_first_action(const GList *input, const char *uuid, const char *task,
- const pe_node_t *on_node)
+ const pcmk_node_t *on_node)
{
CRM_CHECK(uuid || task, return NULL);
for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
@@ -1324,7 +1512,7 @@ find_first_action(const GList *input, const char *uuid, const char *task,
}
GList *
-find_actions(GList *input, const char *key, const pe_node_t *on_node)
+find_actions(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
@@ -1332,7 +1520,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
@@ -1358,7 +1546,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
}
GList *
-find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
+find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *result = NULL;
@@ -1369,7 +1557,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
@@ -1397,7 +1585,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
* without a node will be assigned to node.
*/
GList *
-pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
+pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
@@ -1423,16 +1611,18 @@ pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
* \note It is the caller's responsibility to free() the result.
*/
char *
-pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
+pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
- case pe_action_runnable:
- case pe_action_migrate_runnable:
+ case pcmk_action_runnable:
change = "unrunnable";
break;
- case pe_action_optional:
+ case pcmk_action_migratable:
+ change = "unmigrateable";
+ break;
+ case pcmk_action_optional:
change = "required";
break;
default:
@@ -1446,7 +1636,8 @@ pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
action->task);
}
-void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
+void pe_action_set_reason(pcmk_action_t *action, const char *reason,
+ bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
@@ -1468,20 +1659,14 @@ void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrit
*
* \param[in,out] rsc Resource to clear
* \param[in] node Node to clear history on
- * \param[in,out] data_set Cluster working set
- *
- * \return New action to clear resource history
*/
-pe_action_t *
-pe__clear_resource_history(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+void
+pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- char *key = NULL;
+ CRM_ASSERT((rsc != NULL) && (node != NULL));
- CRM_ASSERT(rsc && node);
- key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
- return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
- data_set);
+ custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
+ PCMK_ACTION_LRM_DELETE, node, FALSE, rsc->cluster);
}
#define sort_return(an_int, why) do { \
@@ -1646,19 +1831,19 @@ sort_op_by_callid(gconstpointer a, gconstpointer b)
*
* \return New action object corresponding to arguments
*/
-pe_action_t *
-pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
+pcmk_action_t *
+pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional,
bool runnable)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL));
action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
- optional, TRUE, rsc->cluster);
- pe__set_action_flags(action, pe_action_pseudo);
+ optional, rsc->cluster);
+ pe__set_action_flags(action, pcmk_action_pseudo);
if (runnable) {
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
return action;
}
@@ -1673,7 +1858,7 @@ pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
* \note This is more efficient than calling add_hash_param().
*/
void
-pe__add_action_expected_result(pe_action_t *action, int expected_result)
+pe__add_action_expected_result(pcmk_action_t *action, int expected_result)
{
char *name = NULL;