summaryrefslogtreecommitdiffstats
path: root/lib/pengine
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:53:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 06:53:20 +0000
commite5a812082ae033afb1eed82c0f2df3d0f6bdc93f (patch)
treea6716c9275b4b413f6c9194798b34b91affb3cc7 /lib/pengine
parentInitial commit. (diff)
downloadpacemaker-e5a812082ae033afb1eed82c0f2df3d0f6bdc93f.tar.xz
pacemaker-e5a812082ae033afb1eed82c0f2df3d0f6bdc93f.zip
Adding upstream version 2.1.6.upstream/2.1.6
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib/pengine')
-rw-r--r--lib/pengine/Makefile.am81
-rw-r--r--lib/pengine/bundle.c2004
-rw-r--r--lib/pengine/clone.c1470
-rw-r--r--lib/pengine/common.c564
-rw-r--r--lib/pengine/complex.c1174
-rw-r--r--lib/pengine/failcounts.c403
-rw-r--r--lib/pengine/group.c521
-rw-r--r--lib/pengine/native.c1414
-rw-r--r--lib/pengine/pe_actions.c1686
-rw-r--r--lib/pengine/pe_digest.c592
-rw-r--r--lib/pengine/pe_health.c157
-rw-r--r--lib/pengine/pe_notif.c996
-rw-r--r--lib/pengine/pe_output.c3108
-rw-r--r--lib/pengine/pe_status_private.h121
-rw-r--r--lib/pengine/remote.c270
-rw-r--r--lib/pengine/rules.c1316
-rw-r--r--lib/pengine/rules_alerts.c299
-rw-r--r--lib/pengine/status.c483
-rw-r--r--lib/pengine/tags.c111
-rw-r--r--lib/pengine/tests/Makefile.am1
-rw-r--r--lib/pengine/tests/native/Makefile.am22
-rw-r--r--lib/pengine/tests/native/native_find_rsc_test.c677
-rw-r--r--lib/pengine/tests/native/pe_base_name_eq_test.c149
-rw-r--r--lib/pengine/tests/rules/Makefile.am18
-rw-r--r--lib/pengine/tests/rules/pe_cron_range_satisfied_test.c165
-rw-r--r--lib/pengine/tests/status/Makefile.am22
-rw-r--r--lib/pengine/tests/status/pe_find_node_any_test.c62
-rw-r--r--lib/pengine/tests/status/pe_find_node_id_test.c51
-rw-r--r--lib/pengine/tests/status/pe_find_node_test.c51
-rw-r--r--lib/pengine/tests/status/pe_new_working_set_test.c46
-rw-r--r--lib/pengine/tests/status/set_working_set_defaults_test.c46
-rw-r--r--lib/pengine/tests/unpack/Makefile.am18
-rw-r--r--lib/pengine/tests/unpack/pe_base_name_end_test.c36
-rw-r--r--lib/pengine/tests/utils/Makefile.am21
-rw-r--r--lib/pengine/tests/utils/pe__cmp_node_name_test.c55
-rw-r--r--lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c50
-rw-r--r--lib/pengine/unpack.c4829
-rw-r--r--lib/pengine/utils.c938
-rw-r--r--lib/pengine/variant.h91
39 files changed, 24118 insertions, 0 deletions
diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am
new file mode 100644
index 0000000..c2a8c90
--- /dev/null
+++ b/lib/pengine/Makefile.am
@@ -0,0 +1,81 @@
+#
+# Copyright 2004-2022 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+include $(top_srcdir)/mk/common.mk
+
+# Without "." here, check-recursive will run through the subdirectories first
+# and then run "make check" here. This will fail, because there's things in
+# the subdirectories that need check_LTLIBRARIES built first. Adding "." here
+# changes the order so the subdirectories are processed afterwards.
+SUBDIRS = . tests
+
+## libraries
+lib_LTLIBRARIES = libpe_rules.la libpe_status.la
+check_LTLIBRARIES = libpe_rules_test.la libpe_status_test.la
+
+## SOURCES
+noinst_HEADERS = variant.h pe_status_private.h
+
+libpe_rules_la_LDFLAGS = -version-info 30:0:4
+
+libpe_rules_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
+libpe_rules_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
+
+libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
+libpe_rules_la_SOURCES = rules.c rules_alerts.c common.c
+
+libpe_status_la_LDFLAGS = -version-info 34:0:6
+
+libpe_status_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
+libpe_status_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
+
+libpe_status_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
+# Use += rather than backlashed continuation lines for parsing by bumplibs
+libpe_status_la_SOURCES =
+libpe_status_la_SOURCES += bundle.c
+libpe_status_la_SOURCES += clone.c
+libpe_status_la_SOURCES += common.c
+libpe_status_la_SOURCES += complex.c
+libpe_status_la_SOURCES += failcounts.c
+libpe_status_la_SOURCES += group.c
+libpe_status_la_SOURCES += native.c
+libpe_status_la_SOURCES += pe_actions.c
+libpe_status_la_SOURCES += pe_health.c
+libpe_status_la_SOURCES += pe_digest.c
+libpe_status_la_SOURCES += pe_notif.c
+libpe_status_la_SOURCES += pe_output.c
+libpe_status_la_SOURCES += remote.c
+libpe_status_la_SOURCES += rules.c
+libpe_status_la_SOURCES += status.c
+libpe_status_la_SOURCES += tags.c
+libpe_status_la_SOURCES += unpack.c
+libpe_status_la_SOURCES += utils.c
+
+#
+# libpe_rules_test and libpe_status_test are only used with unit tests, so we can
+# mock system calls. See lib/common/mock.c for details.
+#
+
+include $(top_srcdir)/mk/tap.mk
+
+libpe_rules_test_la_SOURCES = $(libpe_rules_la_SOURCES)
+libpe_rules_test_la_LDFLAGS = $(libpe_rules_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
+libpe_rules_test_la_CFLAGS = $(libpe_rules_la_CFLAGS) -DPCMK__UNIT_TESTING \
+ -fno-builtin -fno-inline
+libpe_rules_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
+
+libpe_status_test_la_SOURCES = $(libpe_status_la_SOURCES)
+libpe_status_test_la_LDFLAGS = $(libpe_status_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
+libpe_status_test_la_CFLAGS = $(libpe_status_la_CFLAGS) -DPCMK__UNIT_TESTING \
+ -fno-builtin -fno-inline
+libpe_status_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
+
+clean-generic:
+ rm -f *.log *.debug *~
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
new file mode 100644
index 0000000..ff1b365
--- /dev/null
+++ b/lib/pengine/bundle.c
@@ -0,0 +1,2004 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <ctype.h>
+#include <stdint.h>
+
+#include <crm/pengine/rules.h>
+#include <crm/pengine/status.h>
+#include <crm/pengine/internal.h>
+#include <crm/msg_xml.h>
+#include <crm/common/output.h>
+#include <crm/common/xml_internal.h>
+#include <pe_status_private.h>
+
+#define PE__VARIANT_BUNDLE 1
+#include "./variant.h"
+
+/*!
+ * \internal
+ * \brief Get maximum number of bundle replicas allowed to run
+ *
+ * \param[in] rsc Bundle or bundled resource to check
+ *
+ * \return Maximum replicas for bundle corresponding to \p rsc
+ */
+int
+pe__bundle_max(const pe_resource_t *rsc)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
+ return bundle_data->nreplicas;
+}
+
+/*!
+ * \internal
+ * \brief Get maximum number of bundle replicas allowed to run on one node
+ *
+ * \param[in] rsc Bundle or bundled resource to check
+ *
+ * \return Maximum replicas per node for bundle corresponding to \p rsc
+ */
+int
+pe__bundle_max_per_node(const pe_resource_t *rsc)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
+ return bundle_data->nreplicas_per_host;
+}
+
+static char *
+next_ip(const char *last_ip)
+{
+ unsigned int oct1 = 0;
+ unsigned int oct2 = 0;
+ unsigned int oct3 = 0;
+ unsigned int oct4 = 0;
+ int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
+
+ if (rc != 4) {
+ /*@ TODO check for IPv6 */
+ return NULL;
+
+ } else if (oct3 > 253) {
+ return NULL;
+
+ } else if (oct4 > 253) {
+ ++oct3;
+ oct4 = 1;
+
+ } else {
+ ++oct4;
+ }
+
+ return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
+}
+
+static void
+allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
+ GString *buffer)
+{
+ if(data->ip_range_start == NULL) {
+ return;
+
+ } else if(data->ip_last) {
+ replica->ipaddr = next_ip(data->ip_last);
+
+ } else {
+ replica->ipaddr = strdup(data->ip_range_start);
+ }
+
+ data->ip_last = replica->ipaddr;
+ switch (data->agent_type) {
+ case PE__CONTAINER_AGENT_DOCKER:
+ case PE__CONTAINER_AGENT_PODMAN:
+ if (data->add_host) {
+ g_string_append_printf(buffer, " --add-host=%s-%d:%s",
+ data->prefix, replica->offset,
+ replica->ipaddr);
+ } else {
+ g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
+ replica->ipaddr, data->prefix,
+ replica->offset);
+ }
+ break;
+
+ case PE__CONTAINER_AGENT_RKT:
+ g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
+ replica->ipaddr, data->prefix,
+ replica->offset);
+ break;
+
+ default: // PE__CONTAINER_AGENT_UNKNOWN
+ break;
+ }
+}
+
+static xmlNode *
+create_resource(const char *name, const char *provider, const char *kind)
+{
+ xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
+
+ crm_xml_add(rsc, XML_ATTR_ID, name);
+ crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
+ crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
+ crm_xml_add(rsc, XML_ATTR_TYPE, kind);
+
+ return rsc;
+}
+
+/*!
+ * \internal
+ * \brief Check whether cluster can manage resource inside container
+ *
+ * \param[in,out] data Container variant data
+ *
+ * \return TRUE if networking configuration is acceptable, FALSE otherwise
+ *
+ * \note The resource is manageable if an IP range or control port has been
+ * specified. If a control port is used without an IP range, replicas per
+ * host must be 1.
+ */
+static bool
+valid_network(pe__bundle_variant_data_t *data)
+{
+ if(data->ip_range_start) {
+ return TRUE;
+ }
+ if(data->control_port) {
+ if(data->nreplicas_per_host > 1) {
+ pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
+ data->nreplicas_per_host = 1;
+ // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
+ }
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int
+create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+ pe__bundle_replica_t *replica)
+{
+ if(data->ip_range_start) {
+ char *id = NULL;
+ xmlNode *xml_ip = NULL;
+ xmlNode *xml_obj = NULL;
+
+ id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
+ crm_xml_sanitize_id(id);
+ xml_ip = create_resource(id, "heartbeat", "IPaddr2");
+ free(id);
+
+ xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
+ crm_xml_set_id(xml_obj, "%s-attributes-%d",
+ data->prefix, replica->offset);
+
+ crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
+ if(data->host_network) {
+ crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
+ }
+
+ if(data->host_netmask) {
+ crm_create_nvpair_xml(xml_obj, NULL,
+ "cidr_netmask", data->host_netmask);
+
+ } else {
+ crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
+ }
+
+ xml_obj = create_xml_node(xml_ip, "operations");
+ crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
+
+ // TODO: Other ops? Timeouts and intervals from underlying resource?
+
+ if (pe__unpack_resource(xml_ip, &replica->ip, parent,
+ parent->cluster) != pcmk_rc_ok) {
+ return pcmk_rc_unpack_error;
+ }
+
+ parent->children = g_list_append(parent->children, replica->ip);
+ }
+ return pcmk_rc_ok;
+}
+
+static const char*
+container_agent_str(enum pe__container_agent t)
+{
+ switch (t) {
+ case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
+ case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S;
+ case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
+ default: // PE__CONTAINER_AGENT_UNKNOWN
+ break;
+ }
+ return PE__CONTAINER_AGENT_UNKNOWN_S;
+}
+
+static int
+create_container_resource(pe_resource_t *parent,
+ const pe__bundle_variant_data_t *data,
+ pe__bundle_replica_t *replica)
+{
+ char *id = NULL;
+ xmlNode *xml_container = NULL;
+ xmlNode *xml_obj = NULL;
+
+ // Agent-specific
+ const char *hostname_opt = NULL;
+ const char *env_opt = NULL;
+ const char *agent_str = NULL;
+ int volid = 0; // rkt-only
+
+ GString *buffer = NULL;
+ GString *dbuffer = NULL;
+
+ // Where syntax differences are drop-in replacements, set them now
+ switch (data->agent_type) {
+ case PE__CONTAINER_AGENT_DOCKER:
+ case PE__CONTAINER_AGENT_PODMAN:
+ hostname_opt = "-h ";
+ env_opt = "-e ";
+ break;
+ case PE__CONTAINER_AGENT_RKT:
+ hostname_opt = "--hostname=";
+ env_opt = "--environment=";
+ break;
+ default: // PE__CONTAINER_AGENT_UNKNOWN
+ return pcmk_rc_unpack_error;
+ }
+ agent_str = container_agent_str(data->agent_type);
+
+ buffer = g_string_sized_new(4096);
+
+ id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str,
+ replica->offset);
+ crm_xml_sanitize_id(id);
+ xml_container = create_resource(id, "heartbeat", agent_str);
+ free(id);
+
+ xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
+ crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset);
+
+ crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
+ crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
+ crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
+ crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
+
+ if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) {
+ g_string_append(buffer, " --restart=no");
+ }
+
+ /* Set a container hostname only if we have an IP to map it to. The user can
+ * set -h or --uts=host themselves if they want a nicer name for logs, but
+ * this makes applications happy who need their hostname to match the IP
+ * they bind to.
+ */
+ if (data->ip_range_start != NULL) {
+ g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix,
+ replica->offset);
+ }
+ pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL);
+
+ if (data->container_network != NULL) {
+ pcmk__g_strcat(buffer, " --net=", data->container_network, NULL);
+ }
+
+ if (data->control_port != NULL) {
+ pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=",
+ data->control_port, NULL);
+ } else {
+ g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt,
+ DEFAULT_REMOTE_PORT);
+ }
+
+ for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
+ pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data;
+ char *source = NULL;
+
+ if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
+ source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix,
+ replica->offset);
+ pcmk__add_separated_word(&dbuffer, 1024, source, ",");
+ }
+
+ switch (data->agent_type) {
+ case PE__CONTAINER_AGENT_DOCKER:
+ case PE__CONTAINER_AGENT_PODMAN:
+ pcmk__g_strcat(buffer,
+ " -v ", pcmk__s(source, mount->source),
+ ":", mount->target, NULL);
+
+ if (mount->options != NULL) {
+ pcmk__g_strcat(buffer, ":", mount->options, NULL);
+ }
+ break;
+ case PE__CONTAINER_AGENT_RKT:
+ g_string_append_printf(buffer,
+ " --volume vol%d,kind=host,"
+ "source=%s%s%s "
+ "--mount volume=vol%d,target=%s",
+ volid, pcmk__s(source, mount->source),
+ (mount->options != NULL)? "," : "",
+ pcmk__s(mount->options, ""),
+ volid, mount->target);
+ volid++;
+ break;
+ default:
+ break;
+ }
+ free(source);
+ }
+
+ for (GList *iter = data->ports; iter != NULL; iter = iter->next) {
+ pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data;
+
+ switch (data->agent_type) {
+ case PE__CONTAINER_AGENT_DOCKER:
+ case PE__CONTAINER_AGENT_PODMAN:
+ if (replica->ipaddr != NULL) {
+ pcmk__g_strcat(buffer,
+ " -p ", replica->ipaddr, ":", port->source,
+ ":", port->target, NULL);
+
+ } else if (!pcmk__str_eq(data->container_network, "host",
+ pcmk__str_none)) {
+ // No need to do port mapping if net == host
+ pcmk__g_strcat(buffer,
+ " -p ", port->source, ":", port->target,
+ NULL);
+ }
+ break;
+ case PE__CONTAINER_AGENT_RKT:
+ if (replica->ipaddr != NULL) {
+ pcmk__g_strcat(buffer,
+ " --port=", port->target,
+ ":", replica->ipaddr, ":", port->source,
+ NULL);
+ } else {
+ pcmk__g_strcat(buffer,
+ " --port=", port->target, ":", port->source,
+ NULL);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because
+ * it would cause restarts during rolling upgrades.
+ *
+ * In a previous version of the container resource creation logic, if
+ * data->launcher_options is not NULL, we append
+ * (" %s", data->launcher_options) even if data->launcher_options is an
+ * empty string. Likewise for data->container_host_options. Using
+ *
+ * pcmk__add_word(buffer, 0, data->launcher_options)
+ *
+ * removes that extra trailing space, causing a resource definition change.
+ */
+ if (data->launcher_options != NULL) {
+ pcmk__g_strcat(buffer, " ", data->launcher_options, NULL);
+ }
+
+ if (data->container_host_options != NULL) {
+ pcmk__g_strcat(buffer, " ", data->container_host_options, NULL);
+ }
+
+ crm_create_nvpair_xml(xml_obj, NULL, "run_opts",
+ (const char *) buffer->str);
+ g_string_free(buffer, TRUE);
+
+ crm_create_nvpair_xml(xml_obj, NULL, "mount_points",
+ (dbuffer != NULL)? (const char *) dbuffer->str : "");
+ if (dbuffer != NULL) {
+ g_string_free(dbuffer, TRUE);
+ }
+
+ if (replica->child != NULL) {
+ if (data->container_command != NULL) {
+ crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
+ data->container_command);
+ } else {
+ crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
+ SBIN_DIR "/pacemaker-remoted");
+ }
+
+ /* TODO: Allow users to specify their own?
+ *
+ * We just want to know if the container is alive; we'll monitor the
+ * child independently.
+ */
+ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
+#if 0
+ /* @TODO Consider supporting the use case where we can start and stop
+ * resources, but not proxy local commands (such as setting node
+ * attributes), by running the local executor in stand-alone mode.
+ * However, this would probably be better done via ACLs as with other
+ * Pacemaker Remote nodes.
+ */
+ } else if ((child != NULL) && data->untrusted) {
+ crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
+ CRM_DAEMON_DIR "/pacemaker-execd");
+ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
+ CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
+#endif
+ } else {
+ if (data->container_command != NULL) {
+ crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
+ data->container_command);
+ }
+
+ /* TODO: Allow users to specify their own?
+ *
+ * We don't know what's in the container, so we just want to know if it
+ * is alive.
+ */
+ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
+ }
+
+ xml_obj = create_xml_node(xml_container, "operations");
+ crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
+
+ // TODO: Other ops? Timeouts and intervals from underlying resource?
+ if (pe__unpack_resource(xml_container, &replica->container, parent,
+ parent->cluster) != pcmk_rc_ok) {
+ return pcmk_rc_unpack_error;
+ }
+ pe__set_resource_flags(replica->container, pe_rsc_replica_container);
+ parent->children = g_list_append(parent->children, replica->container);
+
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \brief Ban a node from a resource's (and its children's) allowed nodes list
+ *
+ * \param[in,out] rsc Resource to modify
+ * \param[in] uname Name of node to ban
+ */
+static void
+disallow_node(pe_resource_t *rsc, const char *uname)
+{
+ gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
+
+ if (match) {
+ ((pe_node_t *) match)->weight = -INFINITY;
+ ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
+ }
+ if (rsc->children) {
+ g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
+ }
+}
+
+static int
+create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+ pe__bundle_replica_t *replica)
+{
+ if (replica->child && valid_network(data)) {
+ GHashTableIter gIter;
+ pe_node_t *node = NULL;
+ xmlNode *xml_remote = NULL;
+ char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
+ char *port_s = NULL;
+ const char *uname = NULL;
+ const char *connect_name = NULL;
+
+ if (pe_find_resource(parent->cluster->resources, id) != NULL) {
+ free(id);
+ // The biggest hammer we have
+ id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
+ replica->child->id, replica->offset);
+ //@TODO return error instead of asserting?
+ CRM_ASSERT(pe_find_resource(parent->cluster->resources,
+ id) == NULL);
+ }
+
+ /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
+ * connection does not have its own IP is a magic string that we use to
+ * support nested remotes (i.e. a bundle running on a remote node).
+ */
+ connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
+
+ if (data->control_port == NULL) {
+ port_s = pcmk__itoa(DEFAULT_REMOTE_PORT);
+ }
+
+ /* This sets replica->container as replica->remote's container, which is
+ * similar to what happens with guest nodes. This is how the scheduler
+ * knows that the bundle node is fenced by recovering the container, and
+ * that remote should be ordered relative to the container.
+ */
+ xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
+ NULL, NULL, NULL,
+ connect_name, (data->control_port?
+ data->control_port : port_s));
+ free(port_s);
+
+ /* Abandon our created ID, and pull the copy from the XML, because we
+ * need something that will get freed during data set cleanup to use as
+ * the node ID and uname.
+ */
+ free(id);
+ id = NULL;
+ uname = ID(xml_remote);
+
+ /* Ensure a node has been created for the guest (it may have already
+ * been, if it has a permanent node attribute), and ensure its weight is
+ * -INFINITY so no other resources can run on it.
+ */
+ node = pe_find_node(parent->cluster->nodes, uname);
+ if (node == NULL) {
+ node = pe_create_node(uname, uname, "remote", "-INFINITY",
+ parent->cluster);
+ } else {
+ node->weight = -INFINITY;
+ }
+ node->rsc_discover_mode = pe_discover_never;
+
+ /* unpack_remote_nodes() ensures that each remote node and guest node
+ * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
+ * Unfortunately, a bundle has to be mostly unpacked before it's obvious
+ * what nodes will be needed, so we do it just above.
+ *
+ * Worse, that means that the node may have been utilized while
+ * unpacking other resources, without our weight correction. The most
+ * likely place for this to happen is when pe__unpack_resource() calls
+ * resource_location() to set a default score in symmetric clusters.
+ * This adds a node *copy* to each resource's allowed nodes, and these
+ * copies will have the wrong weight.
+ *
+ * As a hacky workaround, fix those copies here.
+ *
+ * @TODO Possible alternative: ensure bundles are unpacked before other
+ * resources, so the weight is correct before any copies are made.
+ */
+ g_list_foreach(parent->cluster->resources, (GFunc) disallow_node,
+ (gpointer) uname);
+
+ replica->node = pe__copy_node(node);
+ replica->node->weight = 500;
+ replica->node->rsc_discover_mode = pe_discover_exclusive;
+
+ /* Ensure the node shows up as allowed and with the correct discovery set */
+ if (replica->child->allowed_nodes != NULL) {
+ g_hash_table_destroy(replica->child->allowed_nodes);
+ }
+ replica->child->allowed_nodes = pcmk__strkey_table(NULL, free);
+ g_hash_table_insert(replica->child->allowed_nodes,
+ (gpointer) replica->node->details->id,
+ pe__copy_node(replica->node));
+
+ {
+ pe_node_t *copy = pe__copy_node(replica->node);
+ copy->weight = -INFINITY;
+ g_hash_table_insert(replica->child->parent->allowed_nodes,
+ (gpointer) replica->node->details->id, copy);
+ }
+ if (pe__unpack_resource(xml_remote, &replica->remote, parent,
+ parent->cluster) != pcmk_rc_ok) {
+ return pcmk_rc_unpack_error;
+ }
+
+ g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
+ while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
+ if (pe__is_guest_or_remote_node(node)) {
+ /* Remote resources can only run on 'normal' cluster node */
+ node->weight = -INFINITY;
+ }
+ }
+
+ replica->node->details->remote_rsc = replica->remote;
+
+ // Ensure pe__is_guest_node() functions correctly immediately
+ replica->remote->container = replica->container;
+
+ /* A bundle's #kind is closer to "container" (guest node) than the
+ * "remote" set by pe_create_node().
+ */
+ g_hash_table_insert(replica->node->details->attrs,
+ strdup(CRM_ATTR_KIND), strdup("container"));
+
+ /* One effect of this is that setup_container() will add
+ * replica->remote to replica->container's fillers, which will make
+ * pe__resource_contains_guest_node() true for replica->container.
+ *
+ * replica->child does NOT get added to replica->container's fillers.
+ * The only noticeable effect if it did would be for its fail count to
+ * be taken into account when checking replica->container's migration
+ * threshold.
+ */
+ parent->children = g_list_append(parent->children, replica->remote);
+ }
+ return pcmk_rc_ok;
+}
+
+static int
+create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+ pe__bundle_replica_t *replica)
+{
+ int rc = pcmk_rc_ok;
+
+ rc = create_container_resource(parent, data, replica);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ rc = create_ip_resource(parent, data, replica);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ rc = create_remote_resource(parent, data, replica);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ if ((replica->child != NULL) && (replica->ipaddr != NULL)) {
+ add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
+ }
+
+ if (replica->remote != NULL) {
+ /*
+ * Allow the remote connection resource to be allocated to a
+ * different node than the one on which the container is active.
+ *
+ * This makes it possible to have Pacemaker Remote nodes running
+ * containers with pacemaker-remoted inside in order to start
+ * services inside those containers.
+ */
+ pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
+ }
+ return rc;
+}
+
+static void
+mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
+ const char *target, const char *options, uint32_t flags)
+{
+ pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
+
+ CRM_ASSERT(mount != NULL);
+ mount->source = strdup(source);
+ mount->target = strdup(target);
+ pcmk__str_update(&mount->options, options);
+ mount->flags = flags;
+ bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
+}
+
+static void
+mount_free(pe__bundle_mount_t *mount)
+{
+ free(mount->source);
+ free(mount->target);
+ free(mount->options);
+ free(mount);
+}
+
+static void
+port_free(pe__bundle_port_t *port)
+{
+ free(port->source);
+ free(port->target);
+ free(port);
+}
+
+static pe__bundle_replica_t *
+replica_for_remote(pe_resource_t *remote)
+{
+ pe_resource_t *top = remote;
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ if (top == NULL) {
+ return NULL;
+ }
+
+ while (top->parent != NULL) {
+ top = top->parent;
+ }
+
+ get_bundle_variant_data(bundle_data, top);
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+
+ if (replica->remote == remote) {
+ return replica;
+ }
+ }
+ CRM_LOG_ASSERT(FALSE);
+ return NULL;
+}
+
+bool
+pe__bundle_needs_remote_name(pe_resource_t *rsc)
+{
+ const char *value;
+ GHashTable *params = NULL;
+
+ if (rsc == NULL) {
+ return false;
+ }
+
+ // Use NULL node since pcmk__bundle_expand() uses that to set value
+ params = pe_rsc_params(rsc, NULL, rsc->cluster);
+ value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR);
+
+ return pcmk__str_eq(value, "#uname", pcmk__str_casei)
+ && xml_contains_remote_node(rsc->xml);
+}
+
+const char *
+pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
+ xmlNode *xml, const char *field)
+{
+ // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
+
+ pe_node_t *node = NULL;
+ pe__bundle_replica_t *replica = NULL;
+
+ if (!pe__bundle_needs_remote_name(rsc)) {
+ return NULL;
+ }
+
+ replica = replica_for_remote(rsc);
+ if (replica == NULL) {
+ return NULL;
+ }
+
+ node = replica->container->allocated_to;
+ if (node == NULL) {
+ /* If it won't be running anywhere after the
+ * transition, go with where it's running now.
+ */
+ node = pe__current_node(replica->container);
+ }
+
+ if(node == NULL) {
+ crm_trace("Cannot determine address for bundle connection %s", rsc->id);
+ return NULL;
+ }
+
+ crm_trace("Setting address for bundle connection %s to bundle host %s",
+ rsc->id, pe__node_name(node));
+ if(xml != NULL && field != NULL) {
+ crm_xml_add(xml, field, node->details->uname);
+ }
+
+ return node->details->uname;
+}
+
+#define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do { \
+ flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
+ "Bundle mount", ID(mount_xml), flags, \
+ (flags_to_set), #flags_to_set); \
+ } while (0)
+
+gboolean
+pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
+{
+ const char *value = NULL;
+ xmlNode *xml_obj = NULL;
+ xmlNode *xml_resource = NULL;
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ bool need_log_mount = TRUE;
+
+ CRM_ASSERT(rsc != NULL);
+ pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
+
+ bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
+ rsc->variant_opaque = bundle_data;
+ bundle_data->prefix = strdup(rsc->id);
+
+ xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
+ if (xml_obj != NULL) {
+ bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
+ } else {
+ xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
+ if (xml_obj != NULL) {
+ bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
+ } else {
+ xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
+ if (xml_obj != NULL) {
+ bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
+ } else {
+ return FALSE;
+ }
+ }
+ }
+
+ // Use 0 for default, minimum, and invalid promoted-max
+ value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
+ if (value == NULL) {
+ // @COMPAT deprecated since 2.0.0
+ value = crm_element_value(xml_obj, "masters");
+ }
+ pcmk__scan_min_int(value, &bundle_data->promoted_max, 0);
+
+ // Default replicas to promoted-max if it was specified and 1 otherwise
+ value = crm_element_value(xml_obj, "replicas");
+ if ((value == NULL) && (bundle_data->promoted_max > 0)) {
+ bundle_data->nreplicas = bundle_data->promoted_max;
+ } else {
+ pcmk__scan_min_int(value, &bundle_data->nreplicas, 1);
+ }
+
+ /*
+ * Communication between containers on the same host via the
+ * floating IPs only works if the container is started with:
+ * --userland-proxy=false --ip-masq=false
+ */
+ value = crm_element_value(xml_obj, "replicas-per-host");
+ pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
+ if (bundle_data->nreplicas_per_host == 1) {
+ pe__clear_resource_flags(rsc, pe_rsc_unique);
+ }
+
+ bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
+ bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
+ bundle_data->image = crm_element_value_copy(xml_obj, "image");
+ bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
+
+ xml_obj = first_named_child(rsc->xml, "network");
+ if(xml_obj) {
+
+ bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
+ bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
+ bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
+ bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
+ value = crm_element_value(xml_obj, "add-host");
+ if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
+ bundle_data->add_host = TRUE;
+ }
+
+ for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
+ xml_child = pcmk__xe_next(xml_child)) {
+
+ pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
+ port->source = crm_element_value_copy(xml_child, "port");
+
+ if(port->source == NULL) {
+ port->source = crm_element_value_copy(xml_child, "range");
+ } else {
+ port->target = crm_element_value_copy(xml_child, "internal-port");
+ }
+
+ if(port->source != NULL && strlen(port->source) > 0) {
+ if(port->target == NULL) {
+ port->target = strdup(port->source);
+ }
+ bundle_data->ports = g_list_append(bundle_data->ports, port);
+
+ } else {
+ pe_err("Invalid port directive %s", ID(xml_child));
+ port_free(port);
+ }
+ }
+ }
+
+ xml_obj = first_named_child(rsc->xml, "storage");
+ for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
+ xml_child = pcmk__xe_next(xml_child)) {
+
+ const char *source = crm_element_value(xml_child, "source-dir");
+ const char *target = crm_element_value(xml_child, "target-dir");
+ const char *options = crm_element_value(xml_child, "options");
+ int flags = pe__bundle_mount_none;
+
+ if (source == NULL) {
+ source = crm_element_value(xml_child, "source-dir-root");
+ pe__set_bundle_mount_flags(xml_child, flags,
+ pe__bundle_mount_subdir);
+ }
+
+ if (source && target) {
+ mount_add(bundle_data, source, target, options, flags);
+ if (strcmp(target, "/var/log") == 0) {
+ need_log_mount = FALSE;
+ }
+ } else {
+ pe_err("Invalid mount directive %s", ID(xml_child));
+ }
+ }
+
+ xml_obj = first_named_child(rsc->xml, "primitive");
+ if (xml_obj && valid_network(bundle_data)) {
+ char *value = NULL;
+ xmlNode *xml_set = NULL;
+
+ xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
+
+ /* @COMPAT We no longer use the <master> tag, but we need to keep it as
+ * part of the resource name, so that bundles don't restart in a rolling
+ * upgrade. (It also avoids needing to change regression tests.)
+ */
+ crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
+ (bundle_data->promoted_max? "master"
+ : (const char *)xml_resource->name));
+
+ xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
+ crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
+
+ crm_create_nvpair_xml(xml_set, NULL,
+ XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
+
+ value = pcmk__itoa(bundle_data->nreplicas);
+ crm_create_nvpair_xml(xml_set, NULL,
+ XML_RSC_ATTR_INCARNATION_MAX, value);
+ free(value);
+
+ value = pcmk__itoa(bundle_data->nreplicas_per_host);
+ crm_create_nvpair_xml(xml_set, NULL,
+ XML_RSC_ATTR_INCARNATION_NODEMAX, value);
+ free(value);
+
+ crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
+ pcmk__btoa(bundle_data->nreplicas_per_host > 1));
+
+ if (bundle_data->promoted_max) {
+ crm_create_nvpair_xml(xml_set, NULL,
+ XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
+
+ value = pcmk__itoa(bundle_data->promoted_max);
+ crm_create_nvpair_xml(xml_set, NULL,
+ XML_RSC_ATTR_PROMOTED_MAX, value);
+ free(value);
+ }
+
+ //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
+ add_node_copy(xml_resource, xml_obj);
+
+ } else if(xml_obj) {
+ pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
+ rsc->id, ID(xml_obj));
+ return FALSE;
+ }
+
+ if(xml_resource) {
+ int lpc = 0;
+ GList *childIter = NULL;
+ pe__bundle_port_t *port = NULL;
+ GString *buffer = NULL;
+
+ if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
+ data_set) != pcmk_rc_ok) {
+ return FALSE;
+ }
+
+ /* Currently, we always map the default authentication key location
+ * into the same location inside the container.
+ *
+ * Ideally, we would respect the host's PCMK_authkey_location, but:
+ * - it may be different on different nodes;
+ * - the actual connection will do extra checking to make sure the key
+ * file exists and is readable, that we can't do here on the DC
+ * - tools such as crm_resource and crm_simulate may not have the same
+ * environment variables as the cluster, causing operation digests to
+ * differ
+ *
+ * Always using the default location inside the container is fine,
+ * because we control the pacemaker_remote environment, and it avoids
+ * having to pass another environment variable to the container.
+ *
+ * @TODO A better solution may be to have only pacemaker_remote use the
+ * environment variable, and have the cluster nodes use a new
+ * cluster option for key location. This would introduce the limitation
+ * of the location being the same on all cluster nodes, but that's
+ * reasonable.
+ */
+ mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
+ DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
+
+ if (need_log_mount) {
+ mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
+ pe__bundle_mount_subdir);
+ }
+
+ port = calloc(1, sizeof(pe__bundle_port_t));
+ if(bundle_data->control_port) {
+ port->source = strdup(bundle_data->control_port);
+ } else {
+ /* If we wanted to respect PCMK_remote_port, we could use
+ * crm_default_remote_port() here and elsewhere in this file instead
+ * of DEFAULT_REMOTE_PORT.
+ *
+ * However, it gains nothing, since we control both the container
+ * environment and the connection resource parameters, and the user
+ * can use a different port if desired by setting control-port.
+ */
+ port->source = pcmk__itoa(DEFAULT_REMOTE_PORT);
+ }
+ port->target = strdup(port->source);
+ bundle_data->ports = g_list_append(bundle_data->ports, port);
+
+ buffer = g_string_sized_new(1024);
+ for (childIter = bundle_data->child->children; childIter != NULL;
+ childIter = childIter->next) {
+
+ pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
+
+ replica->child = childIter->data;
+ replica->child->exclusive_discover = TRUE;
+ replica->offset = lpc++;
+
+ // Ensure the child's notify gets set based on the underlying primitive's value
+ if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
+ pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
+ }
+
+ allocate_ip(bundle_data, replica, buffer);
+ bundle_data->replicas = g_list_append(bundle_data->replicas,
+ replica);
+ bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
+ XML_RSC_ATTR_TARGET);
+ }
+ bundle_data->container_host_options = g_string_free(buffer, FALSE);
+
+ if (bundle_data->attribute_target) {
+ g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
+ strdup(bundle_data->attribute_target));
+ g_hash_table_replace(bundle_data->child->meta,
+ strdup(XML_RSC_ATTR_TARGET),
+ strdup(bundle_data->attribute_target));
+ }
+
+ } else {
+ // Just a naked container, no pacemaker-remote
+ GString *buffer = g_string_sized_new(1024);
+
+ for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
+ pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
+
+ replica->offset = lpc;
+ allocate_ip(bundle_data, replica, buffer);
+ bundle_data->replicas = g_list_append(bundle_data->replicas,
+ replica);
+ }
+ bundle_data->container_host_options = g_string_free(buffer, FALSE);
+ }
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+
+ if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) {
+ pe_err("Failed unpacking resource %s", rsc->id);
+ rsc->fns->free(rsc);
+ return FALSE;
+ }
+
+ /* Utilization needs special handling for bundles. It makes no sense for
+ * the inner primitive to have utilization, because it is tied
+ * one-to-one to the guest node created by the container resource -- and
+ * there's no way to set capacities for that guest node anyway.
+ *
+ * What the user really wants is to configure utilization for the
+ * container. However, the schema only allows utilization for
+ * primitives, and the container resource is implicit anyway, so the
+ * user can *only* configure utilization for the inner primitive. If
+ * they do, move the primitive's utilization values to the container.
+ *
+ * @TODO This means that bundles without an inner primitive can't have
+ * utilization. An alternative might be to allow utilization values in
+ * the top-level bundle XML in the schema, and copy those to each
+ * container.
+ */
+ if (replica->child != NULL) {
+ GHashTable *empty = replica->container->utilization;
+
+ replica->container->utilization = replica->child->utilization;
+ replica->child->utilization = empty;
+ }
+ }
+
+ if (bundle_data->child) {
+ rsc->children = g_list_append(rsc->children, bundle_data->child);
+ }
+ return TRUE;
+}
+
+static int
+replica_resource_active(pe_resource_t *rsc, gboolean all)
+{
+ if (rsc) {
+ gboolean child_active = rsc->fns->active(rsc, all);
+
+ if (child_active && !all) {
+ return TRUE;
+ } else if (!child_active && all) {
+ return FALSE;
+ }
+ }
+ return -1;
+}
+
+gboolean
+pe__bundle_active(pe_resource_t *rsc, gboolean all)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ GList *iter = NULL;
+
+ get_bundle_variant_data(bundle_data, rsc);
+ for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
+ pe__bundle_replica_t *replica = iter->data;
+ int rsc_active;
+
+ rsc_active = replica_resource_active(replica->ip, all);
+ if (rsc_active >= 0) {
+ return (gboolean) rsc_active;
+ }
+
+ rsc_active = replica_resource_active(replica->child, all);
+ if (rsc_active >= 0) {
+ return (gboolean) rsc_active;
+ }
+
+ rsc_active = replica_resource_active(replica->container, all);
+ if (rsc_active >= 0) {
+ return (gboolean) rsc_active;
+ }
+
+ rsc_active = replica_resource_active(replica->remote, all);
+ if (rsc_active >= 0) {
+ return (gboolean) rsc_active;
+ }
+ }
+
+ /* If "all" is TRUE, we've already checked that no resources were inactive,
+ * so return TRUE; if "all" is FALSE, we didn't find any active resources,
+ * so return FALSE.
+ */
+ return all;
+}
+
+/*!
+ * \internal
+ * \brief Find the bundle replica corresponding to a given node
+ *
+ * \param[in] bundle Top-level bundle resource
+ * \param[in] node Node to search for
+ *
+ * \return Bundle replica if found, NULL otherwise
+ */
+pe_resource_t *
+pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ CRM_ASSERT(bundle && node);
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+
+ CRM_ASSERT(replica && replica->node);
+ if (replica->node->details == node->details) {
+ return replica->child;
+ }
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+static void
+print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ if (rsc != NULL) {
+ if (options & pe_print_html) {
+ status_print("<li>");
+ }
+ rsc->fns->print(rsc, pre_text, options, print_data);
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+static void
+bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ char *child_text = NULL;
+ CRM_CHECK(rsc != NULL, return);
+
+ if (pre_text == NULL) {
+ pre_text = "";
+ }
+ child_text = crm_strdup_printf("%s ", pre_text);
+
+ get_bundle_variant_data(bundle_data, rsc);
+
+ status_print("%s<bundle ", pre_text);
+ status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
+ status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
+ status_print("image=\"%s\" ", bundle_data->image);
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
+ status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print(">\n");
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+
+ CRM_ASSERT(replica);
+ status_print("%s <replica " XML_ATTR_ID "=\"%d\">\n",
+ pre_text, replica->offset);
+ print_rsc_in_list(replica->ip, child_text, options, print_data);
+ print_rsc_in_list(replica->child, child_text, options, print_data);
+ print_rsc_in_list(replica->container, child_text, options, print_data);
+ print_rsc_in_list(replica->remote, child_text, options, print_data);
+ status_print("%s </replica>\n", pre_text);
+ }
+ status_print("%s</bundle>\n", pre_text);
+ free(child_text);
+}
+
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__bundle_xml(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ int rc = pcmk_rc_no_output;
+ gboolean printed_header = FALSE;
+ gboolean print_everything = TRUE;
+
+ const char *desc = NULL;
+
+ CRM_ASSERT(rsc != NULL);
+
+ get_bundle_variant_data(bundle_data, rsc);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+ char *id = NULL;
+ gboolean print_ip, print_child, print_ctnr, print_remote;
+
+ CRM_ASSERT(replica);
+
+ if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
+ continue;
+ }
+
+ print_ip = replica->ip != NULL &&
+ !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
+ print_child = replica->child != NULL &&
+ !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
+ print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
+ print_remote = replica->remote != NULL &&
+ !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
+
+ if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
+ continue;
+ }
+
+ if (!printed_header) {
+ printed_header = TRUE;
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ rc = pe__name_and_nvpairs_xml(out, true, "bundle", 8,
+ "id", rsc->id,
+ "type", container_agent_str(bundle_data->agent_type),
+ "image", bundle_data->image,
+ "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
+ "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
+ "description", desc);
+ CRM_ASSERT(rc == pcmk_rc_ok);
+ }
+
+ id = pcmk__itoa(replica->offset);
+ rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
+ free(id);
+ CRM_ASSERT(rc == pcmk_rc_ok);
+
+ if (print_ip) {
+ out->message(out, crm_map_element_name(replica->ip->xml), show_opts,
+ replica->ip, only_node, only_rsc);
+ }
+
+ if (print_child) {
+ out->message(out, crm_map_element_name(replica->child->xml), show_opts,
+ replica->child, only_node, only_rsc);
+ }
+
+ if (print_ctnr) {
+ out->message(out, crm_map_element_name(replica->container->xml), show_opts,
+ replica->container, only_node, only_rsc);
+ }
+
+ if (print_remote) {
+ out->message(out, crm_map_element_name(replica->remote->xml), show_opts,
+ replica->remote, only_node, only_rsc);
+ }
+
+ pcmk__output_xml_pop_parent(out); // replica
+ }
+
+ if (printed_header) {
+ pcmk__output_xml_pop_parent(out); // bundle
+ }
+
+ return rc;
+}
+
+static void
+pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
+ pe_node_t *node, uint32_t show_opts)
+{
+ pe_resource_t *rsc = replica->child;
+
+ int offset = 0;
+ char buffer[LINE_MAX];
+
+ if(rsc == NULL) {
+ rsc = replica->container;
+ }
+
+ if (replica->remote) {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
+ rsc_printable_id(replica->remote));
+ } else {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
+ rsc_printable_id(replica->container));
+ }
+ if (replica->ipaddr) {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
+ replica->ipaddr);
+ }
+
+ pe__common_output_html(out, rsc, buffer, node, show_opts);
+}
+
+/*!
+ * \internal
+ * \brief Get a string describing a resource's unmanaged state or lack thereof
+ *
+ * \param[in] rsc Resource to describe
+ *
+ * \return A string indicating that a resource is in maintenance mode or
+ * otherwise unmanaged, or an empty string otherwise
+ */
+static const char *
+get_unmanaged_str(const pe_resource_t *rsc)
+{
+ if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ return " (maintenance)";
+ }
+ if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ return " (unmanaged)";
+ }
+ return "";
+}
+
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__bundle_html(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ const char *desc = NULL;
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
+
+ CRM_ASSERT(rsc != NULL);
+
+ get_bundle_variant_data(bundle_data, rsc);
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+ gboolean print_ip, print_child, print_ctnr, print_remote;
+
+ CRM_ASSERT(replica);
+
+ if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
+ continue;
+ }
+
+ print_ip = replica->ip != NULL &&
+ !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
+ print_child = replica->child != NULL &&
+ !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
+ print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
+ print_remote = replica->remote != NULL &&
+ !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
+
+ if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
+ (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
+ /* The text output messages used below require pe_print_implicit to
+ * be set to do anything.
+ */
+ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
+
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
+ get_unmanaged_str(rsc));
+
+ if (pcmk__list_of_multiple(bundle_data->replicas)) {
+ out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset);
+ }
+
+ if (print_ip) {
+ out->message(out, crm_map_element_name(replica->ip->xml),
+ new_show_opts, replica->ip, only_node, only_rsc);
+ }
+
+ if (print_child) {
+ out->message(out, crm_map_element_name(replica->child->xml),
+ new_show_opts, replica->child, only_node, only_rsc);
+ }
+
+ if (print_ctnr) {
+ out->message(out, crm_map_element_name(replica->container->xml),
+ new_show_opts, replica->container, only_node, only_rsc);
+ }
+
+ if (print_remote) {
+ out->message(out, crm_map_element_name(replica->remote->xml),
+ new_show_opts, replica->remote, only_node, only_rsc);
+ }
+
+ if (pcmk__list_of_multiple(bundle_data->replicas)) {
+ out->end_list(out);
+ }
+ } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
+ continue;
+ } else {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
+ get_unmanaged_str(rsc));
+
+ pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container),
+ show_opts);
+ }
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+static void
+pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
+ pe_node_t *node, uint32_t show_opts)
+{
+ const pe_resource_t *rsc = replica->child;
+
+ int offset = 0;
+ char buffer[LINE_MAX];
+
+ if(rsc == NULL) {
+ rsc = replica->container;
+ }
+
+ if (replica->remote) {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
+ rsc_printable_id(replica->remote));
+ } else {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
+ rsc_printable_id(replica->container));
+ }
+ if (replica->ipaddr) {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
+ replica->ipaddr);
+ }
+
+ pe__common_output_text(out, rsc, buffer, node, show_opts);
+}
+
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__bundle_text(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ const char *desc = NULL;
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ get_bundle_variant_data(bundle_data, rsc);
+
+ CRM_ASSERT(rsc != NULL);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+ gboolean print_ip, print_child, print_ctnr, print_remote;
+
+ CRM_ASSERT(replica);
+
+ if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
+ continue;
+ }
+
+ print_ip = replica->ip != NULL &&
+ !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
+ print_child = replica->child != NULL &&
+ !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
+ print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
+ print_remote = replica->remote != NULL &&
+ !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
+
+ if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
+ (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
+ /* The text output messages used below require pe_print_implicit to
+ * be set to do anything.
+ */
+ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
+
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
+ get_unmanaged_str(rsc));
+
+ if (pcmk__list_of_multiple(bundle_data->replicas)) {
+ out->list_item(out, NULL, "Replica[%d]", replica->offset);
+ }
+
+ out->begin_list(out, NULL, NULL, NULL);
+
+ if (print_ip) {
+ out->message(out, crm_map_element_name(replica->ip->xml),
+ new_show_opts, replica->ip, only_node, only_rsc);
+ }
+
+ if (print_child) {
+ out->message(out, crm_map_element_name(replica->child->xml),
+ new_show_opts, replica->child, only_node, only_rsc);
+ }
+
+ if (print_ctnr) {
+ out->message(out, crm_map_element_name(replica->container->xml),
+ new_show_opts, replica->container, only_node, only_rsc);
+ }
+
+ if (print_remote) {
+ out->message(out, crm_map_element_name(replica->remote->xml),
+ new_show_opts, replica->remote, only_node, only_rsc);
+ }
+
+ out->end_list(out);
+ } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
+ continue;
+ } else {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
+ get_unmanaged_str(rsc));
+
+ pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container),
+ show_opts);
+ }
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+static void
+print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
+ long options, void *print_data)
+{
+ pe_node_t *node = NULL;
+ pe_resource_t *rsc = replica->child;
+
+ int offset = 0;
+ char buffer[LINE_MAX];
+
+ if(rsc == NULL) {
+ rsc = replica->container;
+ }
+
+ if (replica->remote) {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
+ rsc_printable_id(replica->remote));
+ } else {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
+ rsc_printable_id(replica->container));
+ }
+ if (replica->ipaddr) {
+ offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
+ replica->ipaddr);
+ }
+
+ node = pe__current_node(replica->container);
+ common_print(rsc, pre_text, buffer, node, options, print_data);
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+void
+pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ char *child_text = NULL;
+ CRM_CHECK(rsc != NULL, return);
+
+ if (options & pe_print_xml) {
+ bundle_print_xml(rsc, pre_text, options, print_data);
+ return;
+ }
+
+ get_bundle_variant_data(bundle_data, rsc);
+
+ if (pre_text == NULL) {
+ pre_text = " ";
+ }
+
+ status_print("%sContainer bundle%s: %s [%s]%s%s\n",
+ pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
+ rsc->id, bundle_data->image,
+ pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ if (options & pe_print_html) {
+ status_print("<br />\n<ul>\n");
+ }
+
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL;
+ gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+
+ CRM_ASSERT(replica);
+ if (options & pe_print_html) {
+ status_print("<li>");
+ }
+
+ if (pcmk_is_set(options, pe_print_implicit)) {
+ child_text = crm_strdup_printf(" %s", pre_text);
+ if (pcmk__list_of_multiple(bundle_data->replicas)) {
+ status_print(" %sReplica[%d]\n", pre_text, replica->offset);
+ }
+ if (options & pe_print_html) {
+ status_print("<br />\n<ul>\n");
+ }
+ print_rsc_in_list(replica->ip, child_text, options, print_data);
+ print_rsc_in_list(replica->container, child_text, options, print_data);
+ print_rsc_in_list(replica->remote, child_text, options, print_data);
+ print_rsc_in_list(replica->child, child_text, options, print_data);
+ if (options & pe_print_html) {
+ status_print("</ul>\n");
+ }
+ } else {
+ child_text = crm_strdup_printf("%s ", pre_text);
+ print_bundle_replica(replica, child_text, options, print_data);
+ }
+ free(child_text);
+
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+ }
+ }
+ if (options & pe_print_html) {
+ status_print("</ul>\n");
+ }
+}
+
+static void
+free_bundle_replica(pe__bundle_replica_t *replica)
+{
+ if (replica == NULL) {
+ return;
+ }
+
+ if (replica->node) {
+ free(replica->node);
+ replica->node = NULL;
+ }
+
+ if (replica->ip) {
+ free_xml(replica->ip->xml);
+ replica->ip->xml = NULL;
+ replica->ip->fns->free(replica->ip);
+ replica->ip = NULL;
+ }
+ if (replica->container) {
+ free_xml(replica->container->xml);
+ replica->container->xml = NULL;
+ replica->container->fns->free(replica->container);
+ replica->container = NULL;
+ }
+ if (replica->remote) {
+ free_xml(replica->remote->xml);
+ replica->remote->xml = NULL;
+ replica->remote->fns->free(replica->remote);
+ replica->remote = NULL;
+ }
+ free(replica->ipaddr);
+ free(replica);
+}
+
+void
+pe__free_bundle(pe_resource_t *rsc)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ CRM_CHECK(rsc != NULL, return);
+
+ get_bundle_variant_data(bundle_data, rsc);
+ pe_rsc_trace(rsc, "Freeing %s", rsc->id);
+
+ free(bundle_data->prefix);
+ free(bundle_data->image);
+ free(bundle_data->control_port);
+ free(bundle_data->host_network);
+ free(bundle_data->host_netmask);
+ free(bundle_data->ip_range_start);
+ free(bundle_data->container_network);
+ free(bundle_data->launcher_options);
+ free(bundle_data->container_command);
+ g_free(bundle_data->container_host_options);
+
+ g_list_free_full(bundle_data->replicas,
+ (GDestroyNotify) free_bundle_replica);
+ g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
+ g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
+ g_list_free(rsc->children);
+
+ if(bundle_data->child) {
+ free_xml(bundle_data->child->xml);
+ bundle_data->child->xml = NULL;
+ bundle_data->child->fns->free(bundle_data->child);
+ }
+ common_free(rsc);
+}
+
+enum rsc_role_e
+pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
+{
+ enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
+ return container_role;
+}
+
+/*!
+ * \brief Get the number of configured replicas in a bundle
+ *
+ * \param[in] rsc Bundle resource
+ *
+ * \return Number of configured replicas, or 0 on error
+ */
+int
+pe_bundle_replicas(const pe_resource_t *rsc)
+{
+ if ((rsc == NULL) || (rsc->variant != pe_container)) {
+ return 0;
+ } else {
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, rsc);
+ return bundle_data->nreplicas;
+ }
+}
+
+void
+pe__count_bundle(pe_resource_t *rsc)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, rsc);
+ for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
+ pe__bundle_replica_t *replica = item->data;
+
+ if (replica->ip) {
+ replica->ip->fns->count(replica->ip);
+ }
+ if (replica->child) {
+ replica->child->fns->count(replica->child);
+ }
+ if (replica->container) {
+ replica->container->fns->count(replica->container);
+ }
+ if (replica->remote) {
+ replica->remote->fns->count(replica->remote);
+ }
+ }
+}
+
+gboolean
+pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+ gboolean check_parent)
+{
+ gboolean passes = FALSE;
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
+ passes = TRUE;
+ } else {
+ get_bundle_variant_data(bundle_data, rsc);
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+
+ if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ }
+ }
+ }
+
+ return !passes;
+}
+
+/*!
+ * \internal
+ * \brief Get a list of a bundle's containers
+ *
+ * \param[in] bundle Bundle resource
+ *
+ * \return Newly created list of \p bundle's containers
+ * \note It is the caller's responsibility to free the result with
+ * g_list_free().
+ */
+GList *
+pe__bundle_containers(const pe_resource_t *bundle)
+{
+ GList *containers = NULL;
+ const pe__bundle_variant_data_t *data = NULL;
+
+ get_bundle_variant_data(data, bundle);
+ for (GList *iter = data->replicas; iter != NULL; iter = iter->next) {
+ pe__bundle_replica_t *replica = iter->data;
+
+ containers = g_list_append(containers, replica->container);
+ }
+ return containers;
+}
+
+// Bundle implementation of resource_object_functions_t:active_node()
+pe_node_t *
+pe__bundle_active_node(const pe_resource_t *rsc, unsigned int *count_all,
+ unsigned int *count_clean)
+{
+ pe_node_t *active = NULL;
+ pe_node_t *node = NULL;
+ pe_resource_t *container = NULL;
+ GList *containers = NULL;
+ GList *iter = NULL;
+ GHashTable *nodes = NULL;
+ const pe__bundle_variant_data_t *data = NULL;
+
+ if (count_all != NULL) {
+ *count_all = 0;
+ }
+ if (count_clean != NULL) {
+ *count_clean = 0;
+ }
+ if (rsc == NULL) {
+ return NULL;
+ }
+
+ /* For the purposes of this method, we only care about where the bundle's
+ * containers are active, so build a list of active containers.
+ */
+ get_bundle_variant_data(data, rsc);
+ for (iter = data->replicas; iter != NULL; iter = iter->next) {
+ pe__bundle_replica_t *replica = iter->data;
+
+ if (replica->container->running_on != NULL) {
+ containers = g_list_append(containers, replica->container);
+ }
+ }
+ if (containers == NULL) {
+ return NULL;
+ }
+
+ /* If the bundle has only a single active container, just use that
+ * container's method. If live migration is ever supported for bundle
+ * containers, this will allow us to prefer the migration source when there
+ * is only one container and it is migrating. For now, this just lets us
+ * avoid creating the nodes table.
+ */
+ if (pcmk__list_of_1(containers)) {
+ container = containers->data;
+ node = container->fns->active_node(container, count_all, count_clean);
+ g_list_free(containers);
+ return node;
+ }
+
+ // Add all containers' active nodes to a hash table (for uniqueness)
+ nodes = g_hash_table_new(NULL, NULL);
+ for (iter = containers; iter != NULL; iter = iter->next) {
+ container = iter->data;
+
+ for (GList *node_iter = container->running_on; node_iter != NULL;
+ node_iter = node_iter->next) {
+ node = node_iter->data;
+
+ // If insert returns true, we haven't counted this node yet
+ if (g_hash_table_insert(nodes, (gpointer) node->details,
+ (gpointer) node)
+ && !pe__count_active_node(rsc, node, &active, count_all,
+ count_clean)) {
+ goto done;
+ }
+ }
+ }
+
+done:
+ g_list_free(containers);
+ g_hash_table_destroy(nodes);
+ return active;
+}
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
new file mode 100644
index 0000000..e411f98
--- /dev/null
+++ b/lib/pengine/clone.c
@@ -0,0 +1,1470 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdint.h>
+
+#include <crm/pengine/rules.h>
+#include <crm/pengine/status.h>
+#include <crm/pengine/internal.h>
+#include <pe_status_private.h>
+#include <crm/msg_xml.h>
+#include <crm/common/output.h>
+#include <crm/common/xml_internal.h>
+
+#ifdef PCMK__COMPAT_2_0
+#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_LEGACY_S "s"
+#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s"
+#else
+#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_S
+#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S
+#endif
+
+typedef struct clone_variant_data_s {
+ int clone_max;
+ int clone_node_max;
+
+ int promoted_max;
+ int promoted_node_max;
+
+ int total_clones;
+
+ uint32_t flags; // Group of enum pe__clone_flags
+
+ notify_data_t *stop_notify;
+ notify_data_t *start_notify;
+ notify_data_t *demote_notify;
+ notify_data_t *promote_notify;
+
+ xmlNode *xml_obj_child;
+} clone_variant_data_t;
+
+#define get_clone_variant_data(data, rsc) \
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_clone)); \
+ data = (clone_variant_data_t *) rsc->variant_opaque;
+
+/*!
+ * \internal
+ * \brief Return the maximum number of clone instances allowed to be run
+ *
+ * \param[in] clone Clone or clone instance to check
+ *
+ * \return Maximum instances for \p clone
+ */
+int
+pe__clone_max(const pe_resource_t *clone)
+{
+ const clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, pe__const_top_resource(clone, false));
+ return clone_data->clone_max;
+}
+
+/*!
+ * \internal
+ * \brief Return the maximum number of clone instances allowed per node
+ *
+ * \param[in] clone Promotable clone or clone instance to check
+ *
+ * \return Maximum allowed instances per node for \p clone
+ */
+int
+pe__clone_node_max(const pe_resource_t *clone)
+{
+ const clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, pe__const_top_resource(clone, false));
+ return clone_data->clone_node_max;
+}
+
+/*!
+ * \internal
+ * \brief Return the maximum number of clone instances allowed to be promoted
+ *
+ * \param[in] clone Promotable clone or clone instance to check
+ *
+ * \return Maximum promoted instances for \p clone
+ */
+int
+pe__clone_promoted_max(const pe_resource_t *clone)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, pe__const_top_resource(clone, false));
+ return clone_data->promoted_max;
+}
+
+/*!
+ * \internal
+ * \brief Return the maximum number of clone instances allowed to be promoted
+ *
+ * \param[in] clone Promotable clone or clone instance to check
+ *
+ * \return Maximum promoted instances for \p clone
+ */
+int
+pe__clone_promoted_node_max(const pe_resource_t *clone)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, pe__const_top_resource(clone, false));
+ return clone_data->promoted_node_max;
+}
+
+static GList *
+sorted_hash_table_values(GHashTable *table)
+{
+ GList *retval = NULL;
+ GHashTableIter iter;
+ gpointer key, value;
+
+ g_hash_table_iter_init(&iter, table);
+ while (g_hash_table_iter_next(&iter, &key, &value)) {
+ if (!g_list_find_custom(retval, value, (GCompareFunc) strcmp)) {
+ retval = g_list_prepend(retval, (char *) value);
+ }
+ }
+
+ retval = g_list_sort(retval, (GCompareFunc) strcmp);
+ return retval;
+}
+
+static GList *
+nodes_with_status(GHashTable *table, const char *status)
+{
+ GList *retval = NULL;
+ GHashTableIter iter;
+ gpointer key, value;
+
+ g_hash_table_iter_init(&iter, table);
+ while (g_hash_table_iter_next(&iter, &key, &value)) {
+ if (!strcmp((char *) value, status)) {
+ retval = g_list_prepend(retval, key);
+ }
+ }
+
+ retval = g_list_sort(retval, (GCompareFunc) pcmk__numeric_strcasecmp);
+ return retval;
+}
+
+static GString *
+node_list_to_str(const GList *list)
+{
+ GString *retval = NULL;
+
+ for (const GList *iter = list; iter != NULL; iter = iter->next) {
+ pcmk__add_word(&retval, 1024, (const char *) iter->data);
+ }
+
+ return retval;
+}
+
+static void
+clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+ clone_variant_data_t *clone_data, const char *desc)
+{
+ GString *attrs = NULL;
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ pcmk__add_separated_word(&attrs, 64, "promotable", ", ");
+ }
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ pcmk__add_separated_word(&attrs, 64, "unique", ", ");
+ }
+
+ if (pe__resource_is_disabled(rsc)) {
+ pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
+ }
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
+
+ } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
+ }
+
+ if (attrs != NULL) {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s] (%s)%s%s%s",
+ rsc->id, ID(clone_data->xml_obj_child),
+ (const char *) attrs->str, desc ? " (" : "",
+ desc ? desc : "", desc ? ")" : "");
+ g_string_free(attrs, TRUE);
+ } else {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]%s%s%s",
+ rsc->id, ID(clone_data->xml_obj_child),
+ desc ? " (" : "", desc ? desc : "",
+ desc ? ")" : "");
+ }
+}
+
+void
+pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
+ pe_working_set_t *data_set)
+{
+ if (pe_rsc_is_clone(rsc)) {
+ clone_variant_data_t *clone_data = rsc->variant_opaque;
+
+ pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
+ "such as %s can be used only as anonymous clones",
+ rsc->id, standard, rid);
+
+ clone_data->clone_node_max = 1;
+ clone_data->clone_max = QB_MIN(clone_data->clone_max,
+ g_list_length(data_set->nodes));
+ }
+}
+
+pe_resource_t *
+find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
+{
+ char *child_id = NULL;
+ pe_resource_t *child = NULL;
+ const char *child_base = NULL;
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, rsc);
+
+ child_base = ID(clone_data->xml_obj_child);
+ child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
+ child = pe_find_resource(rsc->children, child_id);
+
+ free(child_id);
+ return child;
+}
+
+pe_resource_t *
+pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
+{
+ gboolean as_orphan = FALSE;
+ char *inc_num = NULL;
+ char *inc_max = NULL;
+ pe_resource_t *child_rsc = NULL;
+ xmlNode *child_copy = NULL;
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, rsc);
+
+ CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
+
+ if (clone_data->total_clones >= clone_data->clone_max) {
+ // If we've already used all available instances, this is an orphan
+ as_orphan = TRUE;
+ }
+
+ // Allocate instance numbers in numerical order (starting at 0)
+ inc_num = pcmk__itoa(clone_data->total_clones);
+ inc_max = pcmk__itoa(clone_data->clone_max);
+
+ child_copy = copy_xml(clone_data->xml_obj_child);
+
+ crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
+
+ if (pe__unpack_resource(child_copy, &child_rsc, rsc,
+ data_set) != pcmk_rc_ok) {
+ goto bail;
+ }
+/* child_rsc->globally_unique = rsc->globally_unique; */
+
+ CRM_ASSERT(child_rsc);
+ clone_data->total_clones += 1;
+ pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
+ rsc->children = g_list_append(rsc->children, child_rsc);
+ if (as_orphan) {
+ pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
+ }
+
+ add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
+ pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
+
+ bail:
+ free(inc_num);
+ free(inc_max);
+
+ return child_rsc;
+}
+
+gboolean
+clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+{
+ int lpc = 0;
+ xmlNode *a_child = NULL;
+ xmlNode *xml_obj = rsc->xml;
+ clone_variant_data_t *clone_data = NULL;
+
+ const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
+ const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
+
+ pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
+
+ clone_data = calloc(1, sizeof(clone_variant_data_t));
+ rsc->variant_opaque = clone_data;
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ const char *promoted_max = NULL;
+ const char *promoted_node_max = NULL;
+
+ promoted_max = g_hash_table_lookup(rsc->meta,
+ XML_RSC_ATTR_PROMOTED_MAX);
+ if (promoted_max == NULL) {
+ // @COMPAT deprecated since 2.0.0
+ promoted_max = g_hash_table_lookup(rsc->meta,
+ PCMK_XA_PROMOTED_MAX_LEGACY);
+ }
+
+ promoted_node_max = g_hash_table_lookup(rsc->meta,
+ XML_RSC_ATTR_PROMOTED_NODEMAX);
+ if (promoted_node_max == NULL) {
+ // @COMPAT deprecated since 2.0.0
+ promoted_node_max =
+ g_hash_table_lookup(rsc->meta,
+ PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
+ }
+
+ // Use 1 as default but 0 for minimum and invalid
+ if (promoted_max == NULL) {
+ clone_data->promoted_max = 1;
+ } else {
+ pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0);
+ }
+
+ // Use 1 as default but 0 for minimum and invalid
+ if (promoted_node_max == NULL) {
+ clone_data->promoted_node_max = 1;
+ } else {
+ pcmk__scan_min_int(promoted_node_max,
+ &(clone_data->promoted_node_max), 0);
+ }
+ }
+
+ // Implied by calloc()
+ /* clone_data->xml_obj_child = NULL; */
+
+ // Use 1 as default but 0 for minimum and invalid
+ if (max_clones_node == NULL) {
+ clone_data->clone_node_max = 1;
+ } else {
+ pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0);
+ }
+
+ /* Use number of nodes (but always at least 1, which is handy for crm_verify
+ * for a CIB without nodes) as default, but 0 for minimum and invalid
+ */
+ if (max_clones == NULL) {
+ clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes));
+ } else {
+ pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0);
+ }
+
+ if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED))) {
+ clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,
+ "Clone", rsc->id,
+ clone_data->flags,
+ pe__clone_ordered,
+ "pe__clone_ordered");
+ }
+
+ if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
+ pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
+ "because anonymous clones support only one instance "
+ "per node", rsc->id);
+ clone_data->clone_node_max = 1;
+ }
+
+ pe_rsc_trace(rsc, "Options for %s", rsc->id);
+ pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
+ pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
+ pe_rsc_trace(rsc, "\tClone is unique: %s",
+ pe__rsc_bool_str(rsc, pe_rsc_unique));
+ pe_rsc_trace(rsc, "\tClone is promotable: %s",
+ pe__rsc_bool_str(rsc, pe_rsc_promotable));
+
+ // Clones may contain a single group or primitive
+ for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
+ a_child = pcmk__xe_next(a_child)) {
+
+ if (pcmk__str_any_of((const char *)a_child->name, XML_CIB_TAG_RESOURCE, XML_CIB_TAG_GROUP, NULL)) {
+ clone_data->xml_obj_child = a_child;
+ break;
+ }
+ }
+
+ if (clone_data->xml_obj_child == NULL) {
+ pcmk__config_err("%s has nothing to clone", rsc->id);
+ return FALSE;
+ }
+
+ /*
+ * Make clones ever so slightly sticky by default
+ *
+ * This helps ensure clone instances are not shuffled around the cluster
+ * for no benefit in situations when pre-allocation is not appropriate
+ */
+ if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
+ add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
+ }
+
+ /* This ensures that the globally-unique value always exists for children to
+ * inherit when being unpacked, as well as in resource agents' environment.
+ */
+ add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
+ pe__rsc_bool_str(rsc, pe_rsc_unique));
+
+ if (clone_data->clone_max <= 0) {
+ /* Create one child instance so that unpack_find_resource() will hook up
+ * any orphans up to the parent correctly.
+ */
+ if (pe__create_clone_child(rsc, data_set) == NULL) {
+ return FALSE;
+ }
+
+ } else {
+ // Create a child instance for each available instance number
+ for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
+ if (pe__create_clone_child(rsc, data_set) == NULL) {
+ return FALSE;
+ }
+ }
+ }
+
+ pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
+ return TRUE;
+}
+
+gboolean
+clone_active(pe_resource_t * rsc, gboolean all)
+{
+ GList *gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ gboolean child_active = child_rsc->fns->active(child_rsc, all);
+
+ if (all == FALSE && child_active) {
+ return TRUE;
+ } else if (all && child_active == FALSE) {
+ return FALSE;
+ }
+ }
+
+ if (all) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+static void
+short_print(const char *list, const char *prefix, const char *type,
+ const char *suffix, long options, void *print_data)
+{
+ if(suffix == NULL) {
+ suffix = "";
+ }
+
+ if (!pcmk__str_empty(list)) {
+ if (options & pe_print_html) {
+ status_print("<li>");
+ }
+ status_print("%s%s: [ %s ]%s", prefix, type, list, suffix);
+
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+
+ } else if (options & pe_print_suppres_nl) {
+ /* nothing */
+ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
+ status_print("\n");
+ }
+
+ }
+}
+
+static const char *
+configured_role_str(pe_resource_t * rsc)
+{
+ const char *target_role = g_hash_table_lookup(rsc->meta,
+ XML_RSC_ATTR_TARGET_ROLE);
+
+ if ((target_role == NULL) && rsc->children && rsc->children->data) {
+ target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
+ XML_RSC_ATTR_TARGET_ROLE);
+ }
+ return target_role;
+}
+
+static enum rsc_role_e
+configured_role(pe_resource_t * rsc)
+{
+ const char *target_role = configured_role_str(rsc);
+
+ if (target_role) {
+ return text2role(target_role);
+ }
+ return RSC_ROLE_UNKNOWN;
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+static void
+clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ char *child_text = crm_strdup_printf("%s ", pre_text);
+ const char *target_role = configured_role_str(rsc);
+ GList *gIter = rsc->children;
+
+ status_print("%s<clone ", pre_text);
+ status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
+ status_print("multi_state=\"%s\" ",
+ pe__rsc_bool_str(rsc, pe_rsc_promotable));
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
+ status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("failure_ignored=\"%s\" ",
+ pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ if (target_role) {
+ status_print("target_role=\"%s\" ", target_role);
+ }
+ status_print(">\n");
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ child_rsc->fns->print(child_rsc, child_text, options, print_data);
+ }
+
+ status_print("%s</clone>\n", pre_text);
+ free(child_text);
+}
+
+bool
+is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
+{
+ GList *gIter;
+ bool all = !any;
+
+ if (pcmk_is_set(rsc->flags, flag)) {
+ if(any) {
+ return TRUE;
+ }
+ } else if(all) {
+ return FALSE;
+ }
+
+ for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ if(is_set_recursive(gIter->data, flag, any)) {
+ if(any) {
+ return TRUE;
+ }
+
+ } else if(all) {
+ return FALSE;
+ }
+ }
+
+ if(all) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+void
+clone_print(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ GString *list_text = NULL;
+ char *child_text = NULL;
+ GString *stopped_list = NULL;
+
+ GList *promoted_list = NULL;
+ GList *started_list = NULL;
+ GList *gIter = rsc->children;
+
+ clone_variant_data_t *clone_data = NULL;
+ int active_instances = 0;
+
+ if (pre_text == NULL) {
+ pre_text = " ";
+ }
+
+ if (options & pe_print_xml) {
+ clone_print_xml(rsc, pre_text, options, print_data);
+ return;
+ }
+
+ get_clone_variant_data(clone_data, rsc);
+
+ child_text = crm_strdup_printf("%s ", pre_text);
+
+ status_print("%sClone Set: %s [%s]%s%s%s",
+ pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
+ pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
+ pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
+
+ if (options & pe_print_html) {
+ status_print("\n<ul>\n");
+
+ } else if ((options & pe_print_log) == 0) {
+ status_print("\n");
+ }
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ gboolean print_full = FALSE;
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
+
+ if (options & pe_print_clone_details) {
+ print_full = TRUE;
+ }
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ // Print individual instance when unique (except stopped orphans)
+ if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ print_full = TRUE;
+ }
+
+ // Everything else in this block is for anonymous clones
+
+ } else if (pcmk_is_set(options, pe_print_pending)
+ && (child_rsc->pending_task != NULL)
+ && strcmp(child_rsc->pending_task, "probe")) {
+ // Print individual instance when non-probe action is pending
+ print_full = TRUE;
+
+ } else if (partially_active == FALSE) {
+ // List stopped instances when requested (except orphans)
+ if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ && !pcmk_is_set(options, pe_print_clone_active)) {
+
+ pcmk__add_word(&stopped_list, 1024, child_rsc->id);
+ }
+
+ } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
+ || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
+ || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+
+ // Print individual instance when active orphaned/unmanaged/failed
+ print_full = TRUE;
+
+ } else if (child_rsc->fns->active(child_rsc, TRUE)) {
+ // Instance of fully active anonymous clone
+
+ pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+
+ if (location) {
+ // Instance is active on a single node
+
+ enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
+
+ if (location->details->online == FALSE && location->details->unclean) {
+ print_full = TRUE;
+
+ } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ promoted_list = g_list_append(promoted_list, location);
+
+ } else {
+ started_list = g_list_append(started_list, location);
+ }
+
+ } else {
+ /* uncolocated group - bleh */
+ print_full = TRUE;
+ }
+
+ } else {
+ // Instance of partially active anonymous clone
+ print_full = TRUE;
+ }
+
+ if (print_full) {
+ if (options & pe_print_html) {
+ status_print("<li>\n");
+ }
+ child_rsc->fns->print(child_rsc, child_text, options, print_data);
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+ }
+ }
+ }
+
+ /* Promoted */
+ promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
+ for (gIter = promoted_list; gIter; gIter = gIter->next) {
+ pe_node_t *host = gIter->data;
+
+ pcmk__add_word(&list_text, 1024, host->details->uname);
+ active_instances++;
+ }
+
+ if (list_text != NULL) {
+ short_print((const char *) list_text->str, child_text,
+ PROMOTED_INSTANCES, NULL, options, print_data);
+ g_string_truncate(list_text, 0);
+ }
+ g_list_free(promoted_list);
+
+ /* Started/Unpromoted */
+ started_list = g_list_sort(started_list, pe__cmp_node_name);
+ for (gIter = started_list; gIter; gIter = gIter->next) {
+ pe_node_t *host = gIter->data;
+
+ pcmk__add_word(&list_text, 1024, host->details->uname);
+ active_instances++;
+ }
+
+ if (list_text != NULL) {
+ if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ enum rsc_role_e role = configured_role(rsc);
+
+ if (role == RSC_ROLE_UNPROMOTED) {
+ short_print((const char *) list_text->str, child_text,
+ UNPROMOTED_INSTANCES " (target-role)", NULL,
+ options, print_data);
+ } else {
+ short_print((const char *) list_text->str, child_text,
+ UNPROMOTED_INSTANCES, NULL, options, print_data);
+ }
+
+ } else {
+ short_print((const char *) list_text->str, child_text, "Started",
+ NULL, options, print_data);
+ }
+ }
+
+ g_list_free(started_list);
+
+ if (!pcmk_is_set(options, pe_print_clone_active)) {
+ const char *state = "Stopped";
+ enum rsc_role_e role = configured_role(rsc);
+
+ if (role == RSC_ROLE_STOPPED) {
+ state = "Stopped (disabled)";
+ }
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ && (clone_data->clone_max > active_instances)) {
+
+ GList *nIter;
+ GList *list = g_hash_table_get_values(rsc->allowed_nodes);
+
+ /* Custom stopped list for non-unique clones */
+ if (stopped_list != NULL) {
+ g_string_truncate(stopped_list, 0);
+ }
+
+ if (list == NULL) {
+ /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
+ * If we've not probed for them yet, the Stopped list will be empty
+ */
+ list = g_hash_table_get_values(rsc->known_on);
+ }
+
+ list = g_list_sort(list, pe__cmp_node_name);
+ for (nIter = list; nIter != NULL; nIter = nIter->next) {
+ pe_node_t *node = (pe_node_t *)nIter->data;
+
+ if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
+ pcmk__add_word(&stopped_list, 1024, node->details->uname);
+ }
+ }
+ g_list_free(list);
+ }
+
+ if (stopped_list != NULL) {
+ short_print((const char *) stopped_list->str, child_text, state,
+ NULL, options, print_data);
+ }
+ }
+
+ if (options & pe_print_html) {
+ status_print("</ul>\n");
+ }
+
+ if (list_text != NULL) {
+ g_string_free(list_text, TRUE);
+ }
+
+ if (stopped_list != NULL) {
+ g_string_free(stopped_list, TRUE);
+ }
+ free(child_text);
+}
+
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__clone_xml(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+
+ const char *desc = NULL;
+ GList *gIter = rsc->children;
+ GList *all = NULL;
+ int rc = pcmk_rc_no_output;
+ gboolean printed_header = FALSE;
+ gboolean print_everything = TRUE;
+
+
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
+
+ all = g_list_prepend(all, (gpointer) "*");
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
+ continue;
+ }
+
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+
+ if (!printed_header) {
+ printed_header = TRUE;
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ rc = pe__name_and_nvpairs_xml(out, true, "clone", 10,
+ "id", rsc->id,
+ "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
+ "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
+ "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
+ "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
+ "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
+ "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "target_role", configured_role_str(rsc),
+ "description", desc);
+ CRM_ASSERT(rc == pcmk_rc_ok);
+ }
+
+ out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
+ child_rsc, only_node, all);
+ }
+
+ if (printed_header) {
+ pcmk__output_xml_pop_parent(out);
+ }
+
+ g_list_free(all);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__clone_default(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ GHashTable *stopped = NULL;
+
+ GString *list_text = NULL;
+
+ GList *promoted_list = NULL;
+ GList *started_list = NULL;
+ GList *gIter = rsc->children;
+
+ const char *desc = NULL;
+
+ clone_variant_data_t *clone_data = NULL;
+ int active_instances = 0;
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ get_clone_variant_data(clone_data, rsc);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ gboolean print_full = FALSE;
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
+
+ if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
+ continue;
+ }
+
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
+ print_full = TRUE;
+ }
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ // Print individual instance when unique (except stopped orphans)
+ if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ print_full = TRUE;
+ }
+
+ // Everything else in this block is for anonymous clones
+
+ } else if (pcmk_is_set(show_opts, pcmk_show_pending)
+ && (child_rsc->pending_task != NULL)
+ && strcmp(child_rsc->pending_task, "probe")) {
+ // Print individual instance when non-probe action is pending
+ print_full = TRUE;
+
+ } else if (partially_active == FALSE) {
+ // List stopped instances when requested (except orphans)
+ if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ && !pcmk_is_set(show_opts, pcmk_show_clone_detail)
+ && pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
+ if (stopped == NULL) {
+ stopped = pcmk__strkey_table(free, free);
+ }
+ g_hash_table_insert(stopped, strdup(child_rsc->id), strdup("Stopped"));
+ }
+
+ } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
+ || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
+ || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+
+ // Print individual instance when active orphaned/unmanaged/failed
+ print_full = TRUE;
+
+ } else if (child_rsc->fns->active(child_rsc, TRUE)) {
+ // Instance of fully active anonymous clone
+
+ pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+
+ if (location) {
+ // Instance is active on a single node
+
+ enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
+
+ if (location->details->online == FALSE && location->details->unclean) {
+ print_full = TRUE;
+
+ } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ promoted_list = g_list_append(promoted_list, location);
+
+ } else {
+ started_list = g_list_append(started_list, location);
+ }
+
+ } else {
+ /* uncolocated group - bleh */
+ print_full = TRUE;
+ }
+
+ } else {
+ // Instance of partially active anonymous clone
+ print_full = TRUE;
+ }
+
+ if (print_full) {
+ GList *all = NULL;
+
+ clone_header(out, &rc, rsc, clone_data, desc);
+
+ /* Print every resource that's a child of this clone. */
+ all = g_list_prepend(all, (gpointer) "*");
+ out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
+ child_rsc, only_node, all);
+ g_list_free(all);
+ }
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) {
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return pcmk_rc_ok;
+ }
+
+ /* Promoted */
+ promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
+ for (gIter = promoted_list; gIter; gIter = gIter->next) {
+ pe_node_t *host = gIter->data;
+
+ if (!pcmk__str_in_list(host->details->uname, only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ continue;
+ }
+
+ pcmk__add_word(&list_text, 1024, host->details->uname);
+ active_instances++;
+ }
+ g_list_free(promoted_list);
+
+ if ((list_text != NULL) && (list_text->len > 0)) {
+ clone_header(out, &rc, rsc, clone_data, desc);
+
+ out->list_item(out, NULL, PROMOTED_INSTANCES ": [ %s ]",
+ (const char *) list_text->str);
+ g_string_truncate(list_text, 0);
+ }
+
+ /* Started/Unpromoted */
+ started_list = g_list_sort(started_list, pe__cmp_node_name);
+ for (gIter = started_list; gIter; gIter = gIter->next) {
+ pe_node_t *host = gIter->data;
+
+ if (!pcmk__str_in_list(host->details->uname, only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ continue;
+ }
+
+ pcmk__add_word(&list_text, 1024, host->details->uname);
+ active_instances++;
+ }
+ g_list_free(started_list);
+
+ if ((list_text != NULL) && (list_text->len > 0)) {
+ clone_header(out, &rc, rsc, clone_data, desc);
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ enum rsc_role_e role = configured_role(rsc);
+
+ if (role == RSC_ROLE_UNPROMOTED) {
+ out->list_item(out, NULL,
+ UNPROMOTED_INSTANCES " (target-role): [ %s ]",
+ (const char *) list_text->str);
+ } else {
+ out->list_item(out, NULL, UNPROMOTED_INSTANCES ": [ %s ]",
+ (const char *) list_text->str);
+ }
+
+ } else {
+ out->list_item(out, NULL, "Started: [ %s ]",
+ (const char *) list_text->str);
+ }
+ }
+
+ if (list_text != NULL) {
+ g_string_free(list_text, TRUE);
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
+ if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ && (clone_data->clone_max > active_instances)) {
+
+ GList *nIter;
+ GList *list = g_hash_table_get_values(rsc->allowed_nodes);
+
+ /* Custom stopped table for non-unique clones */
+ if (stopped != NULL) {
+ g_hash_table_destroy(stopped);
+ stopped = NULL;
+ }
+
+ if (list == NULL) {
+ /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
+ * If we've not probed for them yet, the Stopped list will be empty
+ */
+ list = g_hash_table_get_values(rsc->known_on);
+ }
+
+ list = g_list_sort(list, pe__cmp_node_name);
+ for (nIter = list; nIter != NULL; nIter = nIter->next) {
+ pe_node_t *node = (pe_node_t *)nIter->data;
+
+ if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
+ pcmk__str_in_list(node->details->uname, only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node->details->uname);
+ const char *state = "Stopped";
+
+ if (configured_role(rsc) == RSC_ROLE_STOPPED) {
+ state = "Stopped (disabled)";
+ }
+
+ if (stopped == NULL) {
+ stopped = pcmk__strkey_table(free, free);
+ }
+ if (probe_op != NULL) {
+ int rc;
+
+ pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0);
+ g_hash_table_insert(stopped, strdup(node->details->uname),
+ crm_strdup_printf("Stopped (%s)", services_ocf_exitcode_str(rc)));
+ } else {
+ g_hash_table_insert(stopped, strdup(node->details->uname),
+ strdup(state));
+ }
+ }
+ }
+ g_list_free(list);
+ }
+
+ if (stopped != NULL) {
+ GList *list = sorted_hash_table_values(stopped);
+
+ clone_header(out, &rc, rsc, clone_data, desc);
+
+ for (GList *status_iter = list; status_iter != NULL; status_iter = status_iter->next) {
+ const char *status = status_iter->data;
+ GList *nodes = nodes_with_status(stopped, status);
+ GString *nodes_str = node_list_to_str(nodes);
+
+ if (nodes_str != NULL) {
+ if (nodes_str->len > 0) {
+ out->list_item(out, NULL, "%s: [ %s ]", status,
+ (const char *) nodes_str->str);
+ }
+ g_string_free(nodes_str, TRUE);
+ }
+
+ g_list_free(nodes);
+ }
+
+ g_list_free(list);
+ g_hash_table_destroy(stopped);
+
+ /* If there are no instances of this clone (perhaps because there are no
+ * nodes configured), simply output the clone header by itself. This can
+ * come up in PCS testing.
+ */
+ } else if (active_instances == 0) {
+ clone_header(out, &rc, rsc, clone_data, desc);
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+ }
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+void
+clone_free(pe_resource_t * rsc)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, rsc);
+
+ pe_rsc_trace(rsc, "Freeing %s", rsc->id);
+
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ CRM_ASSERT(child_rsc);
+ pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
+ free_xml(child_rsc->xml);
+ child_rsc->xml = NULL;
+ /* There could be a saved unexpanded xml */
+ free_xml(child_rsc->orig_xml);
+ child_rsc->orig_xml = NULL;
+ child_rsc->fns->free(child_rsc);
+ }
+
+ g_list_free(rsc->children);
+
+ if (clone_data) {
+ CRM_ASSERT(clone_data->demote_notify == NULL);
+ CRM_ASSERT(clone_data->stop_notify == NULL);
+ CRM_ASSERT(clone_data->start_notify == NULL);
+ CRM_ASSERT(clone_data->promote_notify == NULL);
+ }
+
+ common_free(rsc);
+}
+
+enum rsc_role_e
+clone_resource_state(const pe_resource_t * rsc, gboolean current)
+{
+ enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
+ GList *gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
+
+ if (a_role > clone_role) {
+ clone_role = a_role;
+ }
+ }
+
+ pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
+ return clone_role;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a clone has an instance for every node
+ *
+ * \param[in] rsc Clone to check
+ * \param[in] data_set Cluster state
+ */
+bool
+pe__is_universal_clone(const pe_resource_t *rsc,
+ const pe_working_set_t *data_set)
+{
+ if (pe_rsc_is_clone(rsc)) {
+ clone_variant_data_t *clone_data = rsc->variant_opaque;
+
+ if (clone_data->clone_max == g_list_length(data_set->nodes)) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+gboolean
+pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+ gboolean check_parent)
+{
+ gboolean passes = FALSE;
+ clone_variant_data_t *clone_data = NULL;
+
+ if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
+ passes = TRUE;
+ } else {
+ get_clone_variant_data(clone_data, rsc);
+ passes = pcmk__str_in_list(ID(clone_data->xml_obj_child), only_rsc, pcmk__str_star_matches);
+
+ if (!passes) {
+ for (const GList *iter = rsc->children;
+ iter != NULL; iter = iter->next) {
+
+ const pe_resource_t *child_rsc = NULL;
+
+ child_rsc = (const pe_resource_t *) iter->data;
+ if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ }
+ }
+ }
+ }
+ return !passes;
+}
+
+const char *
+pe__clone_child_id(const pe_resource_t *rsc)
+{
+ clone_variant_data_t *clone_data = NULL;
+ get_clone_variant_data(clone_data, rsc);
+ return ID(clone_data->xml_obj_child);
+}
+
+/*!
+ * \internal
+ * \brief Check whether a clone is ordered
+ *
+ * \param[in] clone Clone resource to check
+ *
+ * \return true if clone is ordered, otherwise false
+ */
+bool
+pe__clone_is_ordered(const pe_resource_t *clone)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+ return pcmk_is_set(clone_data->flags, pe__clone_ordered);
+}
+
+/*!
+ * \internal
+ * \brief Set a clone flag
+ *
+ * \param[in,out] clone Clone resource to set flag for
+ * \param[in] flag Clone flag to set
+ *
+ * \return Standard Pacemaker return code (either pcmk_rc_ok if flag was not
+ * already set or pcmk_rc_already if it was)
+ */
+int
+pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+ if (pcmk_is_set(clone_data->flags, flag)) {
+ return pcmk_rc_already;
+ }
+ clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,
+ "Clone", clone->id,
+ clone_data->flags, flag, "flag");
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Create pseudo-actions needed for promotable clones
+ *
+ * \param[in,out] clone Promotable clone to create actions for
+ * \param[in] any_promoting Whether any instances will be promoted
+ * \param[in] any_demoting Whether any instance will be demoted
+ */
+void
+pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
+ bool any_demoting)
+{
+ pe_action_t *action = NULL;
+ pe_action_t *action_complete = NULL;
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+
+ // Create a "promote" action for the clone itself
+ action = pe__new_rsc_pseudo_action(clone, RSC_PROMOTE, !any_promoting,
+ true);
+
+ // Create a "promoted" action for when all promotions are done
+ action_complete = pe__new_rsc_pseudo_action(clone, RSC_PROMOTED,
+ !any_promoting, true);
+ action_complete->priority = INFINITY;
+
+ // Create notification pseudo-actions for promotion
+ if (clone_data->promote_notify == NULL) {
+ clone_data->promote_notify = pe__action_notif_pseudo_ops(clone,
+ RSC_PROMOTE,
+ action,
+ action_complete);
+ }
+
+ // Create a "demote" action for the clone itself
+ action = pe__new_rsc_pseudo_action(clone, RSC_DEMOTE, !any_demoting, true);
+
+ // Create a "demoted" action for when all demotions are done
+ action_complete = pe__new_rsc_pseudo_action(clone, RSC_DEMOTED,
+ !any_demoting, true);
+ action_complete->priority = INFINITY;
+
+ // Create notification pseudo-actions for demotion
+ if (clone_data->demote_notify == NULL) {
+ clone_data->demote_notify = pe__action_notif_pseudo_ops(clone,
+ RSC_DEMOTE,
+ action,
+ action_complete);
+
+ if (clone_data->promote_notify != NULL) {
+ order_actions(clone_data->stop_notify->post_done,
+ clone_data->promote_notify->pre,
+ pe_order_optional);
+ order_actions(clone_data->start_notify->post_done,
+ clone_data->promote_notify->pre,
+ pe_order_optional);
+ order_actions(clone_data->demote_notify->post_done,
+ clone_data->promote_notify->pre,
+ pe_order_optional);
+ order_actions(clone_data->demote_notify->post_done,
+ clone_data->start_notify->pre,
+ pe_order_optional);
+ order_actions(clone_data->demote_notify->post_done,
+ clone_data->stop_notify->pre,
+ pe_order_optional);
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Create all notification data and actions for a clone
+ *
+ * \param[in,out] clone Clone to create notifications for
+ */
+void
+pe__create_clone_notifications(pe_resource_t *clone)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+
+ pe__create_action_notifications(clone, clone_data->start_notify);
+ pe__create_action_notifications(clone, clone_data->stop_notify);
+ pe__create_action_notifications(clone, clone_data->promote_notify);
+ pe__create_action_notifications(clone, clone_data->demote_notify);
+}
+
+/*!
+ * \internal
+ * \brief Free all notification data for a clone
+ *
+ * \param[in,out] clone Clone to free notification data for
+ */
+void
+pe__free_clone_notification_data(pe_resource_t *clone)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+
+ pe__free_action_notification_data(clone_data->demote_notify);
+ clone_data->demote_notify = NULL;
+
+ pe__free_action_notification_data(clone_data->stop_notify);
+ clone_data->stop_notify = NULL;
+
+ pe__free_action_notification_data(clone_data->start_notify);
+ clone_data->start_notify = NULL;
+
+ pe__free_action_notification_data(clone_data->promote_notify);
+ clone_data->promote_notify = NULL;
+}
+
+/*!
+ * \internal
+ * \brief Create pseudo-actions for clone start/stop notifications
+ *
+ * \param[in,out] clone Clone to create pseudo-actions for
+ * \param[in,out] start Start action for \p clone
+ * \param[in,out] stop Stop action for \p clone
+ * \param[in,out] started Started action for \p clone
+ * \param[in,out] stopped Stopped action for \p clone
+ */
+void
+pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
+ pe_action_t *start, pe_action_t *started,
+ pe_action_t *stop, pe_action_t *stopped)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+
+ if (clone_data->start_notify == NULL) {
+ clone_data->start_notify = pe__action_notif_pseudo_ops(clone, RSC_START,
+ start, started);
+ }
+
+ if (clone_data->stop_notify == NULL) {
+ clone_data->stop_notify = pe__action_notif_pseudo_ops(clone, RSC_STOP,
+ stop, stopped);
+ if ((clone_data->start_notify != NULL)
+ && (clone_data->stop_notify != NULL)) {
+ order_actions(clone_data->stop_notify->post_done,
+ clone_data->start_notify->pre, pe_order_optional);
+ }
+ }
+}
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
new file mode 100644
index 0000000..6c69bfc
--- /dev/null
+++ b/lib/pengine/common.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright 2004-2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/common/util.h>
+
+#include <glib.h>
+
+#include <crm/pengine/internal.h>
+
+gboolean was_processing_error = FALSE;
+gboolean was_processing_warning = FALSE;
+
+static bool
+check_placement_strategy(const char *value)
+{
+ return pcmk__strcase_any_of(value, "default", "utilization", "minimal",
+ "balanced", NULL);
+}
+
+static pcmk__cluster_option_t pe_opts[] = {
+ /* name, old name, type, allowed values,
+ * default value, validator,
+ * short description,
+ * long description
+ */
+ {
+ "no-quorum-policy", NULL, "select", "stop, freeze, ignore, demote, suicide",
+ "stop", pcmk__valid_quorum,
+ N_("What to do when the cluster does not have quorum"),
+ NULL
+ },
+ {
+ "symmetric-cluster", NULL, "boolean", NULL,
+ "true", pcmk__valid_boolean,
+ N_("Whether resources can run on any node by default"),
+ NULL
+ },
+ {
+ "maintenance-mode", NULL, "boolean", NULL,
+ "false", pcmk__valid_boolean,
+ N_("Whether the cluster should refrain from monitoring, starting, "
+ "and stopping resources"),
+ NULL
+ },
+ {
+ "start-failure-is-fatal", NULL, "boolean", NULL,
+ "true", pcmk__valid_boolean,
+ N_("Whether a start failure should prevent a resource from being "
+ "recovered on the same node"),
+ N_("When true, the cluster will immediately ban a resource from a node "
+ "if it fails to start there. When false, the cluster will instead "
+ "check the resource's fail count against its migration-threshold.")
+ },
+ {
+ "enable-startup-probes", NULL, "boolean", NULL,
+ "true", pcmk__valid_boolean,
+ N_("Whether the cluster should check for active resources during start-up"),
+ NULL
+ },
+ {
+ XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL,
+ "false", pcmk__valid_boolean,
+ N_("Whether to lock resources to a cleanly shut down node"),
+ N_("When true, resources active on a node when it is cleanly shut down "
+ "are kept \"locked\" to that node (not allowed to run elsewhere) "
+ "until they start again on that node after it rejoins (or for at "
+ "most shutdown-lock-limit, if set). Stonith resources and "
+ "Pacemaker Remote connections are never locked. Clone and bundle "
+ "instances and the promoted role of promotable clones are "
+ "currently never locked, though support could be added in a future "
+ "release.")
+ },
+ {
+ XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL,
+ "0", pcmk__valid_interval_spec,
+ N_("Do not lock resources to a cleanly shut down node longer than "
+ "this"),
+ N_("If shutdown-lock is true and this is set to a nonzero time "
+ "duration, shutdown locks will expire after this much time has "
+ "passed since the shutdown was initiated, even if the node has not "
+ "rejoined.")
+ },
+
+ // Fencing-related options
+ {
+ "stonith-enabled", NULL, "boolean", NULL,
+ "true", pcmk__valid_boolean,
+ N_("*** Advanced Use Only *** "
+ "Whether nodes may be fenced as part of recovery"),
+ N_("If false, unresponsive nodes are immediately assumed to be harmless, "
+ "and resources that were active on them may be recovered "
+ "elsewhere. This can result in a \"split-brain\" situation, "
+ "potentially leading to data loss and/or service unavailability.")
+ },
+ {
+ "stonith-action", NULL, "select", "reboot, off, poweroff",
+ "reboot", pcmk__is_fencing_action,
+ N_("Action to send to fence device when a node needs to be fenced "
+ "(\"poweroff\" is a deprecated alias for \"off\")"),
+ NULL
+ },
+ {
+ "stonith-timeout", NULL, "time", NULL,
+ "60s", pcmk__valid_interval_spec,
+ N_("*** Advanced Use Only *** Unused by Pacemaker"),
+ N_("This value is not used by Pacemaker, but is kept for backward "
+ "compatibility, and certain legacy fence agents might use it.")
+ },
+ {
+ XML_ATTR_HAVE_WATCHDOG, NULL, "boolean", NULL,
+ "false", pcmk__valid_boolean,
+ N_("Whether watchdog integration is enabled"),
+ N_("This is set automatically by the cluster according to whether SBD "
+ "is detected to be in use. User-configured values are ignored. "
+ "The value `true` is meaningful if diskless SBD is used and "
+ "`stonith-watchdog-timeout` is nonzero. In that case, if fencing "
+ "is required, watchdog-based self-fencing will be performed via "
+ "SBD without requiring a fencing resource explicitly configured.")
+ },
+ {
+ "concurrent-fencing", NULL, "boolean", NULL,
+ PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean,
+ N_("Allow performing fencing operations in parallel"),
+ NULL
+ },
+ {
+ "startup-fencing", NULL, "boolean", NULL,
+ "true", pcmk__valid_boolean,
+ N_("*** Advanced Use Only *** Whether to fence unseen nodes at start-up"),
+ N_("Setting this to false may lead to a \"split-brain\" situation,"
+ "potentially leading to data loss and/or service unavailability.")
+ },
+ {
+ XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY, NULL, "time", NULL,
+ "0", pcmk__valid_interval_spec,
+ N_("Apply fencing delay targeting the lost nodes with the highest total resource priority"),
+ N_("Apply specified delay for the fencings that are targeting the lost "
+ "nodes with the highest total resource priority in case we don't "
+ "have the majority of the nodes in our cluster partition, so that "
+ "the more significant nodes potentially win any fencing match, "
+ "which is especially meaningful under split-brain of 2-node "
+ "cluster. A promoted resource instance takes the base priority + 1 "
+ "on calculation if the base priority is not 0. Any static/random "
+ "delays that are introduced by `pcmk_delay_base/max` configured "
+ "for the corresponding fencing resources will be added to this "
+ "delay. This delay should be significantly greater than, safely "
+ "twice, the maximum `pcmk_delay_base/max`. By default, priority "
+ "fencing delay is disabled.")
+ },
+
+ {
+ "cluster-delay", NULL, "time", NULL,
+ "60s", pcmk__valid_interval_spec,
+ N_("Maximum time for node-to-node communication"),
+ N_("The node elected Designated Controller (DC) will consider an action "
+ "failed if it does not get a response from the node executing the "
+ "action within this time (after considering the action's own "
+ "timeout). The \"correct\" value will depend on the speed and "
+ "load of your network and cluster nodes.")
+ },
+ {
+ "batch-limit", NULL, "integer", NULL,
+ "0", pcmk__valid_number,
+ N_("Maximum number of jobs that the cluster may execute in parallel "
+ "across all nodes"),
+ N_("The \"correct\" value will depend on the speed and load of your "
+ "network and cluster nodes. If set to 0, the cluster will "
+ "impose a dynamically calculated limit when any node has a "
+ "high load.")
+ },
+ {
+ "migration-limit", NULL, "integer", NULL,
+ "-1", pcmk__valid_number,
+ N_("The number of live migration actions that the cluster is allowed "
+ "to execute in parallel on a node (-1 means no limit)")
+ },
+
+ /* Orphans and stopping */
+ {
+ "stop-all-resources", NULL, "boolean", NULL,
+ "false", pcmk__valid_boolean,
+ N_("Whether the cluster should stop all active resources"),
+ NULL
+ },
+ {
+ "stop-orphan-resources", NULL, "boolean", NULL,
+ "true", pcmk__valid_boolean,
+ N_("Whether to stop resources that were removed from the configuration"),
+ NULL
+ },
+ {
+ "stop-orphan-actions", NULL, "boolean", NULL,
+ "true", pcmk__valid_boolean,
+ N_("Whether to cancel recurring actions removed from the configuration"),
+ NULL
+ },
+ {
+ "remove-after-stop", NULL, "boolean", NULL,
+ "false", pcmk__valid_boolean,
+ N_("*** Deprecated *** Whether to remove stopped resources from "
+ "the executor"),
+ N_("Values other than default are poorly tested and potentially dangerous."
+ " This option will be removed in a future release.")
+ },
+
+ /* Storing inputs */
+ {
+ "pe-error-series-max", NULL, "integer", NULL,
+ "-1", pcmk__valid_number,
+ N_("The number of scheduler inputs resulting in errors to save"),
+ N_("Zero to disable, -1 to store unlimited.")
+ },
+ {
+ "pe-warn-series-max", NULL, "integer", NULL,
+ "5000", pcmk__valid_number,
+ N_("The number of scheduler inputs resulting in warnings to save"),
+ N_("Zero to disable, -1 to store unlimited.")
+ },
+ {
+ "pe-input-series-max", NULL, "integer", NULL,
+ "4000", pcmk__valid_number,
+ N_("The number of scheduler inputs without errors or warnings to save"),
+ N_("Zero to disable, -1 to store unlimited.")
+ },
+
+ /* Node health */
+ {
+ PCMK__OPT_NODE_HEALTH_STRATEGY, NULL, "select",
+ PCMK__VALUE_NONE ", " PCMK__VALUE_MIGRATE_ON_RED ", "
+ PCMK__VALUE_ONLY_GREEN ", " PCMK__VALUE_PROGRESSIVE ", "
+ PCMK__VALUE_CUSTOM,
+ PCMK__VALUE_NONE, pcmk__validate_health_strategy,
+ N_("How cluster should react to node health attributes"),
+ N_("Requires external entities to create node attributes (named with "
+ "the prefix \"#health\") with values \"red\", "
+ "\"yellow\", or \"green\".")
+ },
+ {
+ PCMK__OPT_NODE_HEALTH_BASE, NULL, "integer", NULL,
+ "0", pcmk__valid_number,
+ N_("Base health score assigned to a node"),
+ N_("Only used when \"node-health-strategy\" is set to \"progressive\".")
+ },
+ {
+ PCMK__OPT_NODE_HEALTH_GREEN, NULL, "integer", NULL,
+ "0", pcmk__valid_number,
+ N_("The score to use for a node health attribute whose value is \"green\""),
+ N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
+ },
+ {
+ PCMK__OPT_NODE_HEALTH_YELLOW, NULL, "integer", NULL,
+ "0", pcmk__valid_number,
+ N_("The score to use for a node health attribute whose value is \"yellow\""),
+ N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
+ },
+ {
+ PCMK__OPT_NODE_HEALTH_RED, NULL, "integer", NULL,
+ "-INFINITY", pcmk__valid_number,
+ N_("The score to use for a node health attribute whose value is \"red\""),
+ N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
+ },
+
+ /*Placement Strategy*/
+ {
+ "placement-strategy", NULL, "select",
+ "default, utilization, minimal, balanced",
+ "default", check_placement_strategy,
+ N_("How the cluster should allocate resources to nodes"),
+ NULL
+ },
+};
+
+void
+pe_metadata(pcmk__output_t *out)
+{
+ const char *desc_short = "Pacemaker scheduler options";
+ const char *desc_long = "Cluster options used by Pacemaker's scheduler";
+
+ gchar *s = pcmk__format_option_metadata("pacemaker-schedulerd", desc_short,
+ desc_long, pe_opts,
+ PCMK__NELEM(pe_opts));
+ out->output_xml(out, "metadata", s);
+ g_free(s);
+}
+
+void
+verify_pe_options(GHashTable * options)
+{
+ pcmk__validate_cluster_options(options, pe_opts, PCMK__NELEM(pe_opts));
+}
+
+const char *
+pe_pref(GHashTable * options, const char *name)
+{
+ return pcmk__cluster_option(options, pe_opts, PCMK__NELEM(pe_opts), name);
+}
+
+const char *
+fail2text(enum action_fail_response fail)
+{
+ const char *result = "<unknown>";
+
+ switch (fail) {
+ case action_fail_ignore:
+ result = "ignore";
+ break;
+ case action_fail_demote:
+ result = "demote";
+ break;
+ case action_fail_block:
+ result = "block";
+ break;
+ case action_fail_recover:
+ result = "recover";
+ break;
+ case action_fail_migrate:
+ result = "migrate";
+ break;
+ case action_fail_stop:
+ result = "stop";
+ break;
+ case action_fail_fence:
+ result = "fence";
+ break;
+ case action_fail_standby:
+ result = "standby";
+ break;
+ case action_fail_restart_container:
+ result = "restart-container";
+ break;
+ case action_fail_reset_remote:
+ result = "reset-remote";
+ break;
+ }
+ return result;
+}
+
+enum action_tasks
+text2task(const char *task)
+{
+ if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ return stop_rsc;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_STOPPED, pcmk__str_casei)) {
+ return stopped_rsc;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
+ return start_rsc;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_STARTED, pcmk__str_casei)) {
+ return started_rsc;
+ } else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
+ return shutdown_crm;
+ } else if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
+ return stonith_node;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ return monitor_rsc;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei)) {
+ return action_notify;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFIED, pcmk__str_casei)) {
+ return action_notified;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
+ return action_promote;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
+ return action_demote;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTED, pcmk__str_casei)) {
+ return action_promoted;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTED, pcmk__str_casei)) {
+ return action_demoted;
+ }
+#if SUPPORT_TRACING
+ if (pcmk__str_eq(task, CRMD_ACTION_CANCEL, pcmk__str_casei)) {
+ return no_action;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_DELETE, pcmk__str_casei)) {
+ return no_action;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ return no_action;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
+ return no_action;
+ } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
+ return no_action;
+ }
+ crm_trace("Unsupported action: %s", task);
+#endif
+
+ return no_action;
+}
+
+const char *
+task2text(enum action_tasks task)
+{
+ const char *result = "<unknown>";
+
+ switch (task) {
+ case no_action:
+ result = "no_action";
+ break;
+ case stop_rsc:
+ result = CRMD_ACTION_STOP;
+ break;
+ case stopped_rsc:
+ result = CRMD_ACTION_STOPPED;
+ break;
+ case start_rsc:
+ result = CRMD_ACTION_START;
+ break;
+ case started_rsc:
+ result = CRMD_ACTION_STARTED;
+ break;
+ case shutdown_crm:
+ result = CRM_OP_SHUTDOWN;
+ break;
+ case stonith_node:
+ result = CRM_OP_FENCE;
+ break;
+ case monitor_rsc:
+ result = CRMD_ACTION_STATUS;
+ break;
+ case action_notify:
+ result = CRMD_ACTION_NOTIFY;
+ break;
+ case action_notified:
+ result = CRMD_ACTION_NOTIFIED;
+ break;
+ case action_promote:
+ result = CRMD_ACTION_PROMOTE;
+ break;
+ case action_promoted:
+ result = CRMD_ACTION_PROMOTED;
+ break;
+ case action_demote:
+ result = CRMD_ACTION_DEMOTE;
+ break;
+ case action_demoted:
+ result = CRMD_ACTION_DEMOTED;
+ break;
+ }
+
+ return result;
+}
+
+const char *
+role2text(enum rsc_role_e role)
+{
+ switch (role) {
+ case RSC_ROLE_UNKNOWN:
+ return RSC_ROLE_UNKNOWN_S;
+ case RSC_ROLE_STOPPED:
+ return RSC_ROLE_STOPPED_S;
+ case RSC_ROLE_STARTED:
+ return RSC_ROLE_STARTED_S;
+ case RSC_ROLE_UNPROMOTED:
+#ifdef PCMK__COMPAT_2_0
+ return RSC_ROLE_UNPROMOTED_LEGACY_S;
+#else
+ return RSC_ROLE_UNPROMOTED_S;
+#endif
+ case RSC_ROLE_PROMOTED:
+#ifdef PCMK__COMPAT_2_0
+ return RSC_ROLE_PROMOTED_LEGACY_S;
+#else
+ return RSC_ROLE_PROMOTED_S;
+#endif
+ }
+ CRM_CHECK(role >= RSC_ROLE_UNKNOWN, return RSC_ROLE_UNKNOWN_S);
+ CRM_CHECK(role < RSC_ROLE_MAX, return RSC_ROLE_UNKNOWN_S);
+ // coverity[dead_error_line]
+ return RSC_ROLE_UNKNOWN_S;
+}
+
+enum rsc_role_e
+text2role(const char *role)
+{
+ CRM_ASSERT(role != NULL);
+ if (pcmk__str_eq(role, RSC_ROLE_STOPPED_S, pcmk__str_casei)) {
+ return RSC_ROLE_STOPPED;
+ } else if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_casei)) {
+ return RSC_ROLE_STARTED;
+ } else if (pcmk__strcase_any_of(role, RSC_ROLE_UNPROMOTED_S,
+ RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
+ return RSC_ROLE_UNPROMOTED;
+ } else if (pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
+ RSC_ROLE_PROMOTED_LEGACY_S, NULL)) {
+ return RSC_ROLE_PROMOTED;
+ } else if (pcmk__str_eq(role, RSC_ROLE_UNKNOWN_S, pcmk__str_casei)) {
+ return RSC_ROLE_UNKNOWN;
+ }
+ crm_err("Unknown role: %s", role);
+ return RSC_ROLE_UNKNOWN;
+}
+
+void
+add_hash_param(GHashTable * hash, const char *name, const char *value)
+{
+ CRM_CHECK(hash != NULL, return);
+
+ crm_trace("Adding name='%s' value='%s' to hash table",
+ pcmk__s(name, "<null>"), pcmk__s(value, "<null>"));
+ if (name == NULL || value == NULL) {
+ return;
+
+ } else if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
+ return;
+
+ } else if (g_hash_table_lookup(hash, name) == NULL) {
+ g_hash_table_insert(hash, strdup(name), strdup(value));
+ }
+}
+
+const char *
+pe_node_attribute_calculated(const pe_node_t *node, const char *name,
+ const pe_resource_t *rsc)
+{
+ const char *source;
+
+ if(node == NULL) {
+ return NULL;
+
+ } else if(rsc == NULL) {
+ return g_hash_table_lookup(node->details->attrs, name);
+ }
+
+ source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
+ if(source == NULL || !pcmk__str_eq("host", source, pcmk__str_casei)) {
+ return g_hash_table_lookup(node->details->attrs, name);
+ }
+
+ /* Use attributes set for the containers location
+ * instead of for the container itself
+ *
+ * Useful when the container is using the host's local
+ * storage
+ */
+
+ CRM_ASSERT(node->details->remote_rsc);
+ CRM_ASSERT(node->details->remote_rsc->container);
+
+ if(node->details->remote_rsc->container->running_on) {
+ pe_node_t *host = node->details->remote_rsc->container->running_on->data;
+ pe_rsc_trace(rsc, "%s: Looking for %s on the container host %s",
+ rsc->id, name, pe__node_name(host));
+ return g_hash_table_lookup(host->details->attrs, name);
+ }
+
+ pe_rsc_trace(rsc, "%s: Not looking for %s on the container host: %s is inactive",
+ rsc->id, name, node->details->remote_rsc->container->id);
+ return NULL;
+}
+
+const char *
+pe_node_attribute_raw(const pe_node_t *node, const char *name)
+{
+ if(node == NULL) {
+ return NULL;
+ }
+ return g_hash_table_lookup(node->details->attrs, name);
+}
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
new file mode 100644
index 0000000..f168124
--- /dev/null
+++ b/lib/pengine/complex.c
@@ -0,0 +1,1174 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/pengine/rules.h>
+#include <crm/pengine/internal.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml_internal.h>
+
+#include "pe_status_private.h"
+
+void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
+
+static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
+ unsigned int *count_clean);
+
+resource_object_functions_t resource_class_functions[] = {
+ {
+ native_unpack,
+ native_find_rsc,
+ native_parameter,
+ native_print,
+ native_active,
+ native_resource_state,
+ native_location,
+ native_free,
+ pe__count_common,
+ pe__native_is_filtered,
+ active_node,
+ },
+ {
+ group_unpack,
+ native_find_rsc,
+ native_parameter,
+ group_print,
+ group_active,
+ group_resource_state,
+ native_location,
+ group_free,
+ pe__count_common,
+ pe__group_is_filtered,
+ active_node,
+ },
+ {
+ clone_unpack,
+ native_find_rsc,
+ native_parameter,
+ clone_print,
+ clone_active,
+ clone_resource_state,
+ native_location,
+ clone_free,
+ pe__count_common,
+ pe__clone_is_filtered,
+ active_node,
+ },
+ {
+ pe__unpack_bundle,
+ native_find_rsc,
+ native_parameter,
+ pe__print_bundle,
+ pe__bundle_active,
+ pe__bundle_resource_state,
+ native_location,
+ pe__free_bundle,
+ pe__count_bundle,
+ pe__bundle_is_filtered,
+ pe__bundle_active_node,
+ }
+};
+
+static enum pe_obj_types
+get_resource_type(const char *name)
+{
+ if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
+ return pe_native;
+
+ } else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
+ return pe_group;
+
+ } else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
+ return pe_clone;
+
+ } else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
+ // @COMPAT deprecated since 2.0.0
+ return pe_clone;
+
+ } else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
+ return pe_container;
+ }
+
+ return pe_unknown;
+}
+
+static void
+dup_attr(gpointer key, gpointer value, gpointer user_data)
+{
+ add_hash_param(user_data, key, value);
+}
+
+static void
+expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
+{
+ GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
+ pe_resource_t *p = rsc->parent;
+
+ if (p == NULL) {
+ return ;
+ }
+
+ /* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
+ /* The fixed value of the lower parent resource takes precedence and is not overwritten. */
+ while(p != NULL) {
+ /* A hash table for comparison is generated, including the id-ref. */
+ pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
+ rule_data, parent_orig_meta, NULL, FALSE, data_set);
+ p = p->parent;
+ }
+
+ /* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
+ if (parent_orig_meta != NULL) {
+ GHashTableIter iter;
+ char *key = NULL;
+ char *value = NULL;
+
+ g_hash_table_iter_init(&iter, parent_orig_meta);
+ while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
+ /* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
+ /* Attributes that already exist in the child lease are not updated. */
+ dup_attr(key, value, meta_hash);
+ }
+ }
+
+ if (parent_orig_meta != NULL) {
+ g_hash_table_destroy(parent_orig_meta);
+ }
+
+ return ;
+
+}
+void
+get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
+ pe_node_t * node, pe_working_set_t * data_set)
+{
+ pe_rsc_eval_data_t rsc_rule_data = {
+ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
+ };
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = &rsc_rule_data,
+ .op_data = NULL
+ };
+
+ if (node) {
+ rule_data.node_hash = node->details->attrs;
+ }
+
+ for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
+ const char *prop_name = (const char *) a->name;
+ const char *prop_value = crm_element_value(rsc->xml, prop_name);
+
+ add_hash_param(meta_hash, prop_name, prop_value);
+ }
+
+ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
+ meta_hash, NULL, FALSE, data_set);
+
+ /* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
+ /* If it is already explicitly set as a child, it will not be overwritten. */
+ if (rsc->parent != NULL) {
+ expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
+ }
+
+ /* check the defaults */
+ pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
+ &rule_data, meta_hash, NULL, FALSE, data_set);
+
+ /* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
+ /* The values already set up to this point will not be overwritten. */
+ if (rsc->parent) {
+ g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
+ }
+}
+
+void
+get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
+ const pe_node_t *node, pe_working_set_t *data_set)
+{
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ if (node) {
+ rule_data.node_hash = node->details->attrs;
+ }
+
+ pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
+ meta_hash, NULL, FALSE, data_set);
+
+ /* set anything else based on the parent */
+ if (rsc->parent != NULL) {
+ get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
+
+ } else {
+ /* and finally check the defaults */
+ pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
+ &rule_data, meta_hash, NULL, FALSE, data_set);
+ }
+}
+
+static char *
+template_op_key(xmlNode * op)
+{
+ const char *name = crm_element_value(op, "name");
+ const char *role = crm_element_value(op, "role");
+ char *key = NULL;
+
+ if ((role == NULL)
+ || pcmk__strcase_any_of(role, RSC_ROLE_STARTED_S, RSC_ROLE_UNPROMOTED_S,
+ RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
+ role = RSC_ROLE_UNKNOWN_S;
+ }
+
+ key = crm_strdup_printf("%s-%s", name, role);
+ return key;
+}
+
+static gboolean
+unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
+{
+ xmlNode *cib_resources = NULL;
+ xmlNode *template = NULL;
+ xmlNode *new_xml = NULL;
+ xmlNode *child_xml = NULL;
+ xmlNode *rsc_ops = NULL;
+ xmlNode *template_ops = NULL;
+ const char *template_ref = NULL;
+ const char *clone = NULL;
+ const char *id = NULL;
+
+ if (xml_obj == NULL) {
+ pe_err("No resource object for template unpacking");
+ return FALSE;
+ }
+
+ template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
+ if (template_ref == NULL) {
+ return TRUE;
+ }
+
+ id = ID(xml_obj);
+ if (id == NULL) {
+ pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ return FALSE;
+ }
+
+ if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
+ pe_err("The resource object '%s' should not reference itself", id);
+ return FALSE;
+ }
+
+ cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
+ if (cib_resources == NULL) {
+ pe_err("No resources configured");
+ return FALSE;
+ }
+
+ template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
+ XML_ATTR_ID, template_ref);
+ if (template == NULL) {
+ pe_err("No template named '%s'", template_ref);
+ return FALSE;
+ }
+
+ new_xml = copy_xml(template);
+ xmlNodeSetName(new_xml, xml_obj->name);
+ crm_xml_replace(new_xml, XML_ATTR_ID, id);
+
+ clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
+ if(clone) {
+ crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
+ }
+
+ template_ops = find_xml_node(new_xml, "operations", FALSE);
+
+ for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
+ child_xml = pcmk__xe_next(child_xml)) {
+ xmlNode *new_child = NULL;
+
+ new_child = add_node_copy(new_xml, child_xml);
+
+ if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
+ rsc_ops = new_child;
+ }
+ }
+
+ if (template_ops && rsc_ops) {
+ xmlNode *op = NULL;
+ GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
+
+ for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
+ op = pcmk__xe_next(op)) {
+
+ char *key = template_op_key(op);
+
+ g_hash_table_insert(rsc_ops_hash, key, op);
+ }
+
+ for (op = pcmk__xe_first_child(template_ops); op != NULL;
+ op = pcmk__xe_next(op)) {
+
+ char *key = template_op_key(op);
+
+ if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
+ add_node_copy(rsc_ops, op);
+ }
+
+ free(key);
+ }
+
+ if (rsc_ops_hash) {
+ g_hash_table_destroy(rsc_ops_hash);
+ }
+
+ free_xml(template_ops);
+ }
+
+ /*free_xml(*expanded_xml); */
+ *expanded_xml = new_xml;
+
+ /* Disable multi-level templates for now */
+ /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
+ free_xml(*expanded_xml);
+ *expanded_xml = NULL;
+
+ return FALSE;
+ } */
+
+ return TRUE;
+}
+
+static gboolean
+add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
+{
+ const char *template_ref = NULL;
+ const char *id = NULL;
+
+ if (xml_obj == NULL) {
+ pe_err("No resource object for processing resource list of template");
+ return FALSE;
+ }
+
+ template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
+ if (template_ref == NULL) {
+ return TRUE;
+ }
+
+ id = ID(xml_obj);
+ if (id == NULL) {
+ pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ return FALSE;
+ }
+
+ if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
+ pe_err("The resource object '%s' should not reference itself", id);
+ return FALSE;
+ }
+
+ if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static bool
+detect_promotable(pe_resource_t *rsc)
+{
+ const char *promotable = g_hash_table_lookup(rsc->meta,
+ XML_RSC_ATTR_PROMOTABLE);
+
+ if (crm_is_true(promotable)) {
+ return TRUE;
+ }
+
+ // @COMPAT deprecated since 2.0.0
+ if (pcmk__str_eq(crm_element_name(rsc->xml), PCMK_XE_PROMOTABLE_LEGACY,
+ pcmk__str_casei)) {
+ /* @TODO in some future version, pe_warn_once() here,
+ * then drop support in even later version
+ */
+ g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
+ strdup(XML_BOOLEAN_TRUE));
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static void
+free_params_table(gpointer data)
+{
+ g_hash_table_destroy((GHashTable *) data);
+}
+
+/*!
+ * \brief Get a table of resource parameters
+ *
+ * \param[in,out] rsc Resource to query
+ * \param[in] node Node for evaluating rules (NULL for defaults)
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Hash table containing resource parameter names and values
+ * (or NULL if \p rsc or \p data_set is NULL)
+ * \note The returned table will be destroyed when the resource is freed, so
+ * callers should not destroy it.
+ */
+GHashTable *
+pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
+ pe_working_set_t *data_set)
+{
+ GHashTable *params_on_node = NULL;
+
+ /* A NULL node is used to request the resource's default parameters
+ * (not evaluated for node), but we always want something non-NULL
+ * as a hash table key.
+ */
+ const char *node_name = "";
+
+ // Sanity check
+ if ((rsc == NULL) || (data_set == NULL)) {
+ return NULL;
+ }
+ if ((node != NULL) && (node->details->uname != NULL)) {
+ node_name = node->details->uname;
+ }
+
+ // Find the parameter table for given node
+ if (rsc->parameter_cache == NULL) {
+ rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
+ } else {
+ params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
+ }
+
+ // If none exists yet, create one with parameters evaluated for node
+ if (params_on_node == NULL) {
+ params_on_node = pcmk__strkey_table(free, free);
+ get_rsc_attributes(params_on_node, rsc, node, data_set);
+ g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
+ params_on_node);
+ }
+ return params_on_node;
+}
+
+/*!
+ * \internal
+ * \brief Unpack a resource's "requires" meta-attribute
+ *
+ * \param[in,out] rsc Resource being unpacked
+ * \param[in] value Value of "requires" meta-attribute
+ * \param[in] is_default Whether \p value was selected by default
+ */
+static void
+unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
+{
+ if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
+
+ } else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
+ pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
+
+ } else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
+ pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
+ if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ pcmk__config_warn("%s requires fencing but fencing is disabled",
+ rsc->id);
+ }
+
+ } else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
+ if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
+ "to \"" PCMK__VALUE_QUORUM "\" because fencing "
+ "devices cannot require unfencing", rsc->id);
+ unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
+ return;
+
+ } else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
+ "to \"" PCMK__VALUE_QUORUM "\" because fencing "
+ "is disabled", rsc->id);
+ unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
+ return;
+
+ } else {
+ pe__set_resource_flags(rsc,
+ pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
+ }
+
+ } else {
+ const char *orig_value = value;
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ value = PCMK__VALUE_QUORUM;
+
+ } else if ((rsc->variant == pe_native)
+ && xml_contains_remote_node(rsc->xml)) {
+ value = PCMK__VALUE_QUORUM;
+
+ } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
+ value = PCMK__VALUE_UNFENCING;
+
+ } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ value = PCMK__VALUE_FENCING;
+
+ } else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
+ value = PCMK__VALUE_NOTHING;
+
+ } else {
+ value = PCMK__VALUE_QUORUM;
+ }
+
+ if (orig_value != NULL) {
+ pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
+ "to '%s' because '%s' is not valid",
+ rsc->id, value, orig_value);
+ }
+ unpack_requires(rsc, value, true);
+ return;
+ }
+
+ pe_rsc_trace(rsc, "\tRequired to start: %s%s", value,
+ (is_default? " (default)" : ""));
+}
+
+#ifndef PCMK__COMPAT_2_0
+static void
+warn_about_deprecated_classes(pe_resource_t *rsc)
+{
+ const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+
+ if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
+ pe_warn_once(pe_wo_upstart,
+ "Support for Upstart resources (such as %s) is deprecated "
+ "and will be removed in a future release of Pacemaker",
+ rsc->id);
+
+ } else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
+ pe_warn_once(pe_wo_nagios,
+ "Support for Nagios resources (such as %s) is deprecated "
+ "and will be removed in a future release of Pacemaker",
+ rsc->id);
+ }
+}
+#endif
+
+/*!
+ * \internal
+ * \brief Unpack configuration XML for a given resource
+ *
+ * Unpack the XML object containing a resource's configuration into a new
+ * \c pe_resource_t object.
+ *
+ * \param[in] xml_obj XML node containing the resource's configuration
+ * \param[out] rsc Where to store the unpacked resource information
+ * \param[in] parent Resource's parent, if any
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Standard Pacemaker return code
+ * \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
+ * the caller is responsible for freeing it using its variant-specific
+ * free() method. Otherwise, \p *rsc is guaranteed to be NULL.
+ */
+int
+pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
+ pe_resource_t *parent, pe_working_set_t *data_set)
+{
+ xmlNode *expanded_xml = NULL;
+ xmlNode *ops = NULL;
+ const char *value = NULL;
+ const char *id = NULL;
+ bool guest_node = false;
+ bool remote_node = false;
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = NULL,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ CRM_CHECK(rsc != NULL, return EINVAL);
+ CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
+ *rsc = NULL;
+ return EINVAL);
+
+ rule_data.now = data_set->now;
+
+ crm_log_xml_trace(xml_obj, "[raw XML]");
+
+ id = crm_element_value(xml_obj, XML_ATTR_ID);
+ if (id == NULL) {
+ pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
+ crm_element_name(xml_obj));
+ return pcmk_rc_unpack_error;
+ }
+
+ if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
+ return pcmk_rc_unpack_error;
+ }
+
+ *rsc = calloc(1, sizeof(pe_resource_t));
+ if (*rsc == NULL) {
+ crm_crit("Unable to allocate memory for resource '%s'", id);
+ return ENOMEM;
+ }
+ (*rsc)->cluster = data_set;
+
+ if (expanded_xml) {
+ crm_log_xml_trace(expanded_xml, "[expanded XML]");
+ (*rsc)->xml = expanded_xml;
+ (*rsc)->orig_xml = xml_obj;
+
+ } else {
+ (*rsc)->xml = xml_obj;
+ (*rsc)->orig_xml = NULL;
+ }
+
+ /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
+
+ (*rsc)->parent = parent;
+
+ ops = find_xml_node((*rsc)->xml, "operations", FALSE);
+ (*rsc)->ops_xml = expand_idref(ops, data_set->input);
+
+ (*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
+ if ((*rsc)->variant == pe_unknown) {
+ pe_err("Ignoring resource '%s' of unknown type '%s'",
+ id, crm_element_name((*rsc)->xml));
+ common_free(*rsc);
+ *rsc = NULL;
+ return pcmk_rc_unpack_error;
+ }
+
+#ifndef PCMK__COMPAT_2_0
+ warn_about_deprecated_classes(*rsc);
+#endif
+
+ (*rsc)->meta = pcmk__strkey_table(free, free);
+ (*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
+ (*rsc)->known_on = pcmk__strkey_table(NULL, free);
+
+ value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
+ if (value) {
+ (*rsc)->id = crm_strdup_printf("%s:%s", id, value);
+ add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
+
+ } else {
+ (*rsc)->id = strdup(id);
+ }
+
+ (*rsc)->fns = &resource_class_functions[(*rsc)->variant];
+
+ get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
+ (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
+
+ (*rsc)->flags = 0;
+ pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
+
+ if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ pe__set_resource_flags(*rsc, pe_rsc_managed);
+ }
+
+ (*rsc)->rsc_cons = NULL;
+ (*rsc)->rsc_tickets = NULL;
+ (*rsc)->actions = NULL;
+ (*rsc)->role = RSC_ROLE_STOPPED;
+ (*rsc)->next_role = RSC_ROLE_UNKNOWN;
+
+ (*rsc)->recovery_type = recovery_stop_start;
+ (*rsc)->stickiness = 0;
+ (*rsc)->migration_threshold = INFINITY;
+ (*rsc)->failure_timeout = 0;
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
+ (*rsc)->priority = char2score(value);
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
+ if ((value == NULL) || crm_is_true(value)) {
+ pe__set_resource_flags(*rsc, pe_rsc_critical);
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
+ if (crm_is_true(value)) {
+ pe__set_resource_flags(*rsc, pe_rsc_notify);
+ }
+
+ if (xml_contains_remote_node((*rsc)->xml)) {
+ (*rsc)->is_remote_node = TRUE;
+ if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
+ guest_node = true;
+ } else {
+ remote_node = true;
+ }
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
+ if (crm_is_true(value)) {
+ pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ } else if ((value == NULL) && remote_node) {
+ /* By default, we want remote nodes to be able
+ * to float around the cluster without having to stop all the
+ * resources within the remote-node before moving. Allowing
+ * migration support enables this feature. If this ever causes
+ * problems, migration support can be explicitly turned off with
+ * allow-migrate=false.
+ */
+ pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
+ if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
+ if (crm_is_true(value)) {
+ pe__set_resource_flags(*rsc, pe_rsc_managed);
+ } else {
+ pe__clear_resource_flags(*rsc, pe_rsc_managed);
+ }
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
+ if (crm_is_true(value)) {
+ pe__clear_resource_flags(*rsc, pe_rsc_managed);
+ pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ }
+ if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ pe__clear_resource_flags(*rsc, pe_rsc_managed);
+ pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ }
+
+ if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
+ if (crm_is_true(value)) {
+ pe__set_resource_flags(*rsc, pe_rsc_unique);
+ }
+ if (detect_promotable(*rsc)) {
+ pe__set_resource_flags(*rsc, pe_rsc_promotable);
+ }
+ } else {
+ pe__set_resource_flags(*rsc, pe_rsc_unique);
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
+ if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
+ (*rsc)->restart_type = pe_restart_restart;
+ pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
+ (*rsc)->id);
+ pe_warn_once(pe_wo_restart_type,
+ "Support for restart-type is deprecated and will be removed in a future release");
+
+ } else {
+ (*rsc)->restart_type = pe_restart_ignore;
+ pe_rsc_trace((*rsc), "%s dependency restart handling: ignore",
+ (*rsc)->id);
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
+ if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
+ (*rsc)->recovery_type = recovery_stop_only;
+ pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
+ (*rsc)->id);
+
+ } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
+ (*rsc)->recovery_type = recovery_block;
+ pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
+ (*rsc)->id);
+
+ } else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
+ (*rsc)->recovery_type = recovery_stop_unexpected;
+ pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
+ "stop unexpected instances",
+ (*rsc)->id);
+
+ } else { // "stop_start"
+ if (!pcmk__str_eq(value, "stop_start",
+ pcmk__str_casei|pcmk__str_null_matches)) {
+ pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
+ ", using default of \"stop_start\"", value);
+ }
+ (*rsc)->recovery_type = recovery_stop_start;
+ pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
+ "stop/start", (*rsc)->id);
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
+ if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
+ (*rsc)->stickiness = char2score(value);
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
+ if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
+ (*rsc)->migration_threshold = char2score(value);
+ if ((*rsc)->migration_threshold < 0) {
+ /* @TODO We use 1 here to preserve previous behavior, but this
+ * should probably use the default (INFINITY) or 0 (to disable)
+ * instead.
+ */
+ pe_warn_once(pe_wo_neg_threshold,
+ XML_RSC_ATTR_FAIL_STICKINESS
+ " must be non-negative, using 1 instead");
+ (*rsc)->migration_threshold = 1;
+ }
+ }
+
+ if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
+ PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
+ pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
+ pe__set_resource_flags(*rsc, pe_rsc_fence_device);
+ }
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
+ unpack_requires(*rsc, value, false);
+
+ value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
+ if (value != NULL) {
+ // Stored as seconds
+ (*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
+ }
+
+ if (remote_node) {
+ GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
+
+ /* Grabbing the value now means that any rules based on node attributes
+ * will evaluate to false, so such rules should not be used with
+ * reconnect_interval.
+ *
+ * @TODO Evaluate per node before using
+ */
+ value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
+ if (value) {
+ /* reconnect delay works by setting failure_timeout and preventing the
+ * connection from starting until the failure is cleared. */
+ (*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
+ /* we want to override any default failure_timeout in use when remote
+ * reconnect_interval is in use. */
+ (*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
+ }
+ }
+
+ get_target_role(*rsc, &((*rsc)->next_role));
+ pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
+ (*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
+
+ if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
+ (*rsc)->fns->free(*rsc);
+ *rsc = NULL;
+ return pcmk_rc_unpack_error;
+ }
+
+ if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ // This tag must stay exactly the same because it is tested elsewhere
+ resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
+ } else if (guest_node) {
+ /* remote resources tied to a container resource must always be allowed
+ * to opt-in to the cluster. Whether the connection resource is actually
+ * allowed to be placed on a node is dependent on the container resource */
+ resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
+ }
+
+ pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
+ pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
+
+ (*rsc)->utilization = pcmk__strkey_table(free, free);
+
+ pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
+ (*rsc)->utilization, NULL, FALSE, data_set);
+
+ if (expanded_xml) {
+ if (add_template_rsc(xml_obj, data_set) == FALSE) {
+ (*rsc)->fns->free(*rsc);
+ *rsc = NULL;
+ return pcmk_rc_unpack_error;
+ }
+ }
+ return pcmk_rc_ok;
+}
+
+gboolean
+is_parent(pe_resource_t *child, pe_resource_t *rsc)
+{
+ pe_resource_t *parent = child;
+
+ if (parent == NULL || rsc == NULL) {
+ return FALSE;
+ }
+ while (parent->parent != NULL) {
+ if (parent->parent == rsc) {
+ return TRUE;
+ }
+ parent = parent->parent;
+ }
+ return FALSE;
+}
+
+pe_resource_t *
+uber_parent(pe_resource_t * rsc)
+{
+ pe_resource_t *parent = rsc;
+
+ if (parent == NULL) {
+ return NULL;
+ }
+ while (parent->parent != NULL && parent->parent->variant != pe_container) {
+ parent = parent->parent;
+ }
+ return parent;
+}
+
+/*!
+ * \internal
+ * \brief Get the topmost parent of a resource as a const pointer
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] include_bundle If true, go all the way to bundle
+ *
+ * \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
+ * the bundle if \p rsc is bundled and \p include_bundle is true,
+ * otherwise the topmost parent of \p rsc up to a clone
+ */
+const pe_resource_t *
+pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
+{
+ const pe_resource_t *parent = rsc;
+
+ if (parent == NULL) {
+ return NULL;
+ }
+ while (parent->parent != NULL) {
+ if (!include_bundle && (parent->parent->variant == pe_container)) {
+ break;
+ }
+ parent = parent->parent;
+ }
+ return parent;
+}
+
+void
+common_free(pe_resource_t * rsc)
+{
+ if (rsc == NULL) {
+ return;
+ }
+
+ pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
+
+ g_list_free(rsc->rsc_cons);
+ g_list_free(rsc->rsc_cons_lhs);
+ g_list_free(rsc->rsc_tickets);
+ g_list_free(rsc->dangling_migrations);
+
+ if (rsc->parameter_cache != NULL) {
+ g_hash_table_destroy(rsc->parameter_cache);
+ }
+ if (rsc->meta != NULL) {
+ g_hash_table_destroy(rsc->meta);
+ }
+ if (rsc->utilization != NULL) {
+ g_hash_table_destroy(rsc->utilization);
+ }
+
+ if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ free_xml(rsc->xml);
+ rsc->xml = NULL;
+ free_xml(rsc->orig_xml);
+ rsc->orig_xml = NULL;
+
+ /* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
+ } else if (rsc->orig_xml) {
+ free_xml(rsc->xml);
+ rsc->xml = NULL;
+ }
+ if (rsc->running_on) {
+ g_list_free(rsc->running_on);
+ rsc->running_on = NULL;
+ }
+ if (rsc->known_on) {
+ g_hash_table_destroy(rsc->known_on);
+ rsc->known_on = NULL;
+ }
+ if (rsc->actions) {
+ g_list_free(rsc->actions);
+ rsc->actions = NULL;
+ }
+ if (rsc->allowed_nodes) {
+ g_hash_table_destroy(rsc->allowed_nodes);
+ rsc->allowed_nodes = NULL;
+ }
+ g_list_free(rsc->fillers);
+ g_list_free(rsc->rsc_location);
+ pe_rsc_trace(rsc, "Resource freed");
+ free(rsc->id);
+ free(rsc->clone_name);
+ free(rsc->allocated_to);
+ free(rsc->variant_opaque);
+ free(rsc->pending_task);
+ free(rsc);
+}
+
+/*!
+ * \internal
+ * \brief Count a node and update most preferred to it as appropriate
+ *
+ * \param[in] rsc An active resource
+ * \param[in] node A node that \p rsc is active on
+ * \param[in,out] active This will be set to \p node if \p node is more
+ * preferred than the current value
+ * \param[in,out] count_all If not NULL, this will be incremented
+ * \param[in,out] count_clean If not NULL, this will be incremented if \p node
+ * is online and clean
+ *
+ * \return true if the count should continue, or false if sufficiently known
+ */
+bool
+pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
+ pe_node_t **active, unsigned int *count_all,
+ unsigned int *count_clean)
+{
+ bool keep_looking = false;
+ bool is_happy = false;
+
+ CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
+ return false);
+
+ is_happy = node->details->online && !node->details->unclean;
+
+ if (count_all != NULL) {
+ ++*count_all;
+ }
+ if ((count_clean != NULL) && is_happy) {
+ ++*count_clean;
+ }
+ if ((count_all != NULL) || (count_clean != NULL)) {
+ keep_looking = true; // We're counting, so go through entire list
+ }
+
+ if (rsc->partial_migration_source != NULL) {
+ if (node->details == rsc->partial_migration_source->details) {
+ *active = node; // This is the migration source
+ } else {
+ keep_looking = true;
+ }
+ } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ if (is_happy && ((*active == NULL) || !(*active)->details->online
+ || (*active)->details->unclean)) {
+ *active = node; // This is the first clean node
+ } else {
+ keep_looking = true;
+ }
+ }
+ if (*active == NULL) {
+ *active = node; // This is the first node checked
+ }
+ return keep_looking;
+}
+
+// Shared implementation of resource_object_functions_t:active_node()
+static pe_node_t *
+active_node(const pe_resource_t *rsc, unsigned int *count_all,
+ unsigned int *count_clean)
+{
+ pe_node_t *active = NULL;
+
+ if (count_all != NULL) {
+ *count_all = 0;
+ }
+ if (count_clean != NULL) {
+ *count_clean = 0;
+ }
+ if (rsc == NULL) {
+ return NULL;
+ }
+ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
+ if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
+ count_all, count_clean)) {
+ break; // Don't waste time iterating if we don't have to
+ }
+ }
+ return active;
+}
+
+/*!
+ * \brief
+ * \internal Find and count active nodes according to "requires"
+ *
+ * \param[in] rsc Resource to check
+ * \param[out] count If not NULL, will be set to count of active nodes
+ *
+ * \return An active node (or NULL if resource is not active anywhere)
+ *
+ * \note This is a convenience wrapper for active_node() where the count of all
+ * active nodes or only clean active nodes is desired according to the
+ * "requires" meta-attribute.
+ */
+pe_node_t *
+pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
+{
+ if (rsc == NULL) {
+ if (count != NULL) {
+ *count = 0;
+ }
+ return NULL;
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ return rsc->fns->active_node(rsc, count, NULL);
+
+ } else {
+ return rsc->fns->active_node(rsc, NULL, count);
+ }
+}
+
+void
+pe__count_common(pe_resource_t *rsc)
+{
+ if (rsc->children != NULL) {
+ for (GList *item = rsc->children; item != NULL; item = item->next) {
+ ((pe_resource_t *) item->data)->fns->count(item->data);
+ }
+
+ } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
+ || (rsc->role > RSC_ROLE_STOPPED)) {
+ rsc->cluster->ninstances++;
+ if (pe__resource_is_disabled(rsc)) {
+ rsc->cluster->disabled_resources++;
+ }
+ if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ rsc->cluster->blocked_resources++;
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Update a resource's next role
+ *
+ * \param[in,out] rsc Resource to be updated
+ * \param[in] role Resource's new next role
+ * \param[in] why Human-friendly reason why role is changing (for logs)
+ */
+void
+pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
+{
+ CRM_ASSERT((rsc != NULL) && (why != NULL));
+ if (rsc->next_role != role) {
+ pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
+ rsc->id, role2text(rsc->next_role), role2text(role), why);
+ rsc->next_role = role;
+ }
+}
diff --git a/lib/pengine/failcounts.c b/lib/pengine/failcounts.c
new file mode 100644
index 0000000..a4a3e11
--- /dev/null
+++ b/lib/pengine/failcounts.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2008-2023 the Pacemaker project contributors
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <sys/types.h>
+#include <regex.h>
+#include <glib.h>
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/common/util.h>
+#include <crm/pengine/internal.h>
+
+static gboolean
+is_matched_failure(const char *rsc_id, const xmlNode *conf_op_xml,
+ const xmlNode *lrm_op_xml)
+{
+ gboolean matched = FALSE;
+ const char *conf_op_name = NULL;
+ const char *lrm_op_task = NULL;
+ const char *conf_op_interval_spec = NULL;
+ guint conf_op_interval_ms = 0;
+ guint lrm_op_interval_ms = 0;
+ const char *lrm_op_id = NULL;
+ char *last_failure_key = NULL;
+
+ if (rsc_id == NULL || conf_op_xml == NULL || lrm_op_xml == NULL) {
+ return FALSE;
+ }
+
+ // Get name and interval from configured op
+ conf_op_name = crm_element_value(conf_op_xml, "name");
+ conf_op_interval_spec = crm_element_value(conf_op_xml,
+ XML_LRM_ATTR_INTERVAL);
+ conf_op_interval_ms = crm_parse_interval_spec(conf_op_interval_spec);
+
+ // Get name and interval from op history entry
+ lrm_op_task = crm_element_value(lrm_op_xml, XML_LRM_ATTR_TASK);
+ crm_element_value_ms(lrm_op_xml, XML_LRM_ATTR_INTERVAL_MS,
+ &lrm_op_interval_ms);
+
+ if ((conf_op_interval_ms != lrm_op_interval_ms)
+ || !pcmk__str_eq(conf_op_name, lrm_op_task, pcmk__str_casei)) {
+ return FALSE;
+ }
+
+ lrm_op_id = ID(lrm_op_xml);
+ last_failure_key = pcmk__op_key(rsc_id, "last_failure", 0);
+
+ if (pcmk__str_eq(last_failure_key, lrm_op_id, pcmk__str_casei)) {
+ matched = TRUE;
+
+ } else {
+ char *expected_op_key = pcmk__op_key(rsc_id, conf_op_name,
+ conf_op_interval_ms);
+
+ if (pcmk__str_eq(expected_op_key, lrm_op_id, pcmk__str_casei)) {
+ int rc = 0;
+ int target_rc = pe__target_rc_from_xml(lrm_op_xml);
+
+ crm_element_value_int(lrm_op_xml, XML_LRM_ATTR_RC, &rc);
+ if (rc != target_rc) {
+ matched = TRUE;
+ }
+ }
+ free(expected_op_key);
+ }
+
+ free(last_failure_key);
+ return matched;
+}
+
+static gboolean
+block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
+{
+ char *xml_name = clone_strip(rsc->id);
+
+ /* @TODO This xpath search occurs after template expansion, but it is unable
+ * to properly detect on-fail in id-ref, operation meta-attributes, or
+ * op_defaults, or evaluate rules.
+ *
+ * Also, on-fail defaults to block (in unpack_operation()) for stop actions
+ * when stonith is disabled.
+ *
+ * Ideally, we'd unpack the operation before this point, and pass in a
+ * meta-attributes table that takes all that into consideration.
+ */
+ char *xpath = crm_strdup_printf("//" XML_CIB_TAG_RESOURCE
+ "[@" XML_ATTR_ID "='%s']"
+ "//" XML_ATTR_OP
+ "[@" XML_OP_ATTR_ON_FAIL "='block']",
+ xml_name);
+
+ xmlXPathObject *xpathObj = xpath_search(rsc->xml, xpath);
+ gboolean should_block = FALSE;
+
+ free(xpath);
+
+ if (xpathObj) {
+ int max = numXpathResults(xpathObj);
+ int lpc = 0;
+
+ for (lpc = 0; lpc < max; lpc++) {
+ xmlNode *pref = getXpathResult(xpathObj, lpc);
+
+ if (xml_op) {
+ should_block = is_matched_failure(xml_name, pref, xml_op);
+ if (should_block) {
+ break;
+ }
+
+ } else {
+ const char *conf_op_name = NULL;
+ const char *conf_op_interval_spec = NULL;
+ guint conf_op_interval_ms = 0;
+ char *lrm_op_xpath = NULL;
+ xmlXPathObject *lrm_op_xpathObj = NULL;
+
+ // Get name and interval from configured op
+ conf_op_name = crm_element_value(pref, "name");
+ conf_op_interval_spec = crm_element_value(pref, XML_LRM_ATTR_INTERVAL);
+ conf_op_interval_ms = crm_parse_interval_spec(conf_op_interval_spec);
+
+#define XPATH_FMT "//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \
+ "//" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \
+ "/" XML_LRM_TAG_RSC_OP "[@" XML_LRM_ATTR_TASK "='%s']" \
+ "[@" XML_LRM_ATTR_INTERVAL "='%u']"
+
+ lrm_op_xpath = crm_strdup_printf(XPATH_FMT,
+ node->details->uname, xml_name,
+ conf_op_name,
+ conf_op_interval_ms);
+ lrm_op_xpathObj = xpath_search(rsc->cluster->input, lrm_op_xpath);
+
+ free(lrm_op_xpath);
+
+ if (lrm_op_xpathObj) {
+ int max2 = numXpathResults(lrm_op_xpathObj);
+ int lpc2 = 0;
+
+ for (lpc2 = 0; lpc2 < max2; lpc2++) {
+ xmlNode *lrm_op_xml = getXpathResult(lrm_op_xpathObj,
+ lpc2);
+
+ should_block = is_matched_failure(xml_name, pref,
+ lrm_op_xml);
+ if (should_block) {
+ break;
+ }
+ }
+ }
+ freeXpathObject(lrm_op_xpathObj);
+
+ if (should_block) {
+ break;
+ }
+ }
+ }
+ }
+
+ free(xml_name);
+ freeXpathObject(xpathObj);
+
+ return should_block;
+}
+
+/*!
+ * \internal
+ * \brief Get resource name as used in failure-related node attributes
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Newly allocated string containing resource's fail name
+ * \note The caller is responsible for freeing the result.
+ */
+static inline char *
+rsc_fail_name(const pe_resource_t *rsc)
+{
+ const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
+
+ return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
+}
+
+/*!
+ * \internal
+ * \brief Compile regular expression to match a failure-related node attribute
+ *
+ * \param[in] prefix Attribute prefix to match
+ * \param[in] rsc_name Resource name to match as used in failure attributes
+ * \param[in] is_legacy Whether DC uses per-resource fail counts
+ * \param[in] is_unique Whether the resource is a globally unique clone
+ * \param[out] re Where to store resulting regular expression
+ *
+ * \return Standard Pacemaker return code
+ * \note Fail attributes are named like PREFIX-RESOURCE#OP_INTERVAL.
+ * The caller is responsible for freeing re with regfree().
+ */
+static int
+generate_fail_regex(const char *prefix, const char *rsc_name,
+ gboolean is_legacy, gboolean is_unique, regex_t *re)
+{
+ char *pattern;
+
+ /* @COMPAT DC < 1.1.17: Fail counts used to be per-resource rather than
+ * per-operation.
+ */
+ const char *op_pattern = (is_legacy? "" : "#.+_[0-9]+");
+
+ /* Ignore instance numbers for anything other than globally unique clones.
+ * Anonymous clone fail counts could contain an instance number if the
+ * clone was initially unique, failed, then was converted to anonymous.
+ * @COMPAT Also, before 1.1.8, anonymous clone fail counts always contained
+ * clone instance numbers.
+ */
+ const char *instance_pattern = (is_unique? "" : "(:[0-9]+)?");
+
+ pattern = crm_strdup_printf("^%s-%s%s%s$", prefix, rsc_name,
+ instance_pattern, op_pattern);
+ if (regcomp(re, pattern, REG_EXTENDED|REG_NOSUB) != 0) {
+ free(pattern);
+ return EINVAL;
+ }
+
+ free(pattern);
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Compile regular expressions to match failure-related node attributes
+ *
+ * \param[in] rsc Resource being checked for failures
+ * \param[in] data_set Data set (for CRM feature set version)
+ * \param[out] failcount_re Storage for regular expression for fail count
+ * \param[out] lastfailure_re Storage for regular expression for last failure
+ *
+ * \return Standard Pacemaker return code
+ * \note On success, the caller is responsible for freeing the expressions with
+ * regfree().
+ */
+static int
+generate_fail_regexes(const pe_resource_t *rsc,
+ const pe_working_set_t *data_set,
+ regex_t *failcount_re, regex_t *lastfailure_re)
+{
+ char *rsc_name = rsc_fail_name(rsc);
+ const char *version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ gboolean is_legacy = (compare_version(version, "3.0.13") < 0);
+ int rc = pcmk_rc_ok;
+
+ if (generate_fail_regex(PCMK__FAIL_COUNT_PREFIX, rsc_name, is_legacy,
+ pcmk_is_set(rsc->flags, pe_rsc_unique),
+ failcount_re) != pcmk_rc_ok) {
+ rc = EINVAL;
+
+ } else if (generate_fail_regex(PCMK__LAST_FAILURE_PREFIX, rsc_name,
+ is_legacy,
+ pcmk_is_set(rsc->flags, pe_rsc_unique),
+ lastfailure_re) != pcmk_rc_ok) {
+ rc = EINVAL;
+ regfree(failcount_re);
+ }
+
+ free(rsc_name);
+ return rc;
+}
+
+int
+pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
+ time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+{
+ char *key = NULL;
+ const char *value = NULL;
+ regex_t failcount_re, lastfailure_re;
+ int failcount = 0;
+ time_t last = 0;
+ GHashTableIter iter;
+
+ CRM_CHECK(generate_fail_regexes(rsc, rsc->cluster, &failcount_re,
+ &lastfailure_re) == pcmk_rc_ok,
+ return 0);
+
+ /* Resource fail count is sum of all matching operation fail counts */
+ g_hash_table_iter_init(&iter, node->details->attrs);
+ while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
+ if (regexec(&failcount_re, key, 0, NULL, 0) == 0) {
+ failcount = pcmk__add_scores(failcount, char2score(value));
+ crm_trace("Added %s (%s) to %s fail count (now %s)",
+ key, value, rsc->id, pcmk_readable_score(failcount));
+ } else if (regexec(&lastfailure_re, key, 0, NULL, 0) == 0) {
+ long long last_ll;
+
+ if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
+ last = (time_t) QB_MAX(last, last_ll);
+ }
+ }
+ }
+
+ regfree(&failcount_re);
+ regfree(&lastfailure_re);
+
+ if ((failcount > 0) && (last > 0) && (last_failure != NULL)) {
+ *last_failure = last;
+ }
+
+ /* If failure blocks the resource, disregard any failure timeout */
+ if ((failcount > 0) && rsc->failure_timeout
+ && block_failure(node, rsc, xml_op)) {
+
+ pe_warn("Ignoring failure timeout %d for %s because it conflicts with on-fail=block",
+ rsc->failure_timeout, rsc->id);
+ rsc->failure_timeout = 0;
+ }
+
+ /* If all failures have expired, ignore fail count */
+ if (pcmk_is_set(flags, pe_fc_effective) && (failcount > 0) && (last > 0)
+ && rsc->failure_timeout) {
+
+ time_t now = get_effective_time(rsc->cluster);
+
+ if (now > (last + rsc->failure_timeout)) {
+ crm_debug("Failcount for %s on %s expired after %ds",
+ rsc->id, pe__node_name(node), rsc->failure_timeout);
+ failcount = 0;
+ }
+ }
+
+ /* We never want the fail counts of a bundle container's fillers to
+ * count towards the container's fail count.
+ *
+ * Most importantly, a Pacemaker Remote connection to a bundle container
+ * is a filler of the container, but can reside on a different node than the
+ * container itself. Counting its fail count on its node towards the
+ * container's fail count on that node could lead to attempting to stop the
+ * container on the wrong node.
+ */
+
+ if (pcmk_is_set(flags, pe_fc_fillers) && rsc->fillers
+ && !pe_rsc_is_bundled(rsc)) {
+
+ GList *gIter = NULL;
+
+ for (gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *filler = (pe_resource_t *) gIter->data;
+ time_t filler_last_failure = 0;
+
+ failcount += pe_get_failcount(node, filler, &filler_last_failure,
+ flags, xml_op);
+
+ if (last_failure && filler_last_failure > *last_failure) {
+ *last_failure = filler_last_failure;
+ }
+ }
+
+ if (failcount > 0) {
+ crm_info("Container %s and the resources within it "
+ "have failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(failcount),
+ pcmk__plural_s(failcount), pe__node_name(node));
+ }
+
+ } else if (failcount > 0) {
+ crm_info("%s has failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(failcount),
+ pcmk__plural_s(failcount), pe__node_name(node));
+ }
+
+ return failcount;
+}
+
+/*!
+ * \brief Schedule a controller operation to clear a fail count
+ *
+ * \param[in,out] rsc Resource with failure
+ * \param[in] node Node failure occurred on
+ * \param[in] reason Readable description why needed (for logging)
+ * \param[in,out] data_set Working set for cluster
+ *
+ * \return Scheduled action
+ */
+pe_action_t *
+pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
+ const char *reason, pe_working_set_t *data_set)
+{
+ char *key = NULL;
+ pe_action_t *clear = NULL;
+
+ CRM_CHECK(rsc && node && reason && data_set, return NULL);
+
+ key = pcmk__op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
+ clear = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE,
+ data_set);
+ add_hash_param(clear->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
+ crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s",
+ rsc->id, pe__node_name(node), reason, clear->uuid);
+ return clear;
+}
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
new file mode 100644
index 0000000..d54b01a
--- /dev/null
+++ b/lib/pengine/group.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdint.h>
+
+#include <crm/pengine/rules.h>
+#include <crm/pengine/status.h>
+#include <crm/pengine/internal.h>
+#include <crm/msg_xml.h>
+#include <crm/common/output.h>
+#include <crm/common/strings_internal.h>
+#include <crm/common/xml_internal.h>
+#include <pe_status_private.h>
+
+typedef struct group_variant_data_s {
+ pe_resource_t *last_child; // Last group member
+ uint32_t flags; // Group of enum pe__group_flags
+} group_variant_data_t;
+
+/*!
+ * \internal
+ * \brief Get a group's last member
+ *
+ * \param[in] group Group resource to check
+ *
+ * \return Last member of \p group if any, otherwise NULL
+ */
+pe_resource_t *
+pe__last_group_member(const pe_resource_t *group)
+{
+ if (group != NULL) {
+ CRM_CHECK((group->variant == pe_group)
+ && (group->variant_opaque != NULL), return NULL);
+ return ((group_variant_data_t *) group->variant_opaque)->last_child;
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a group flag is set
+ *
+ * \param[in] group Group resource to check
+ * \param[in] flags Flag or flags to check
+ *
+ * \return true if all \p flags are set for \p group, otherwise false
+ */
+bool
+pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
+{
+ group_variant_data_t *group_data = NULL;
+
+ CRM_CHECK((group != NULL) && (group->variant == pe_group)
+ && (group->variant_opaque != NULL), return false);
+ group_data = (group_variant_data_t *) group->variant_opaque;
+ return pcmk_all_flags_set(group_data->flags, flags);
+}
+
+/*!
+ * \internal
+ * \brief Set a (deprecated) group flag
+ *
+ * \param[in,out] group Group resource to check
+ * \param[in] option Name of boolean configuration option
+ * \param[in] flag Flag to set if \p option is true (which is default)
+ * \param[in] wo_bit "Warn once" flag to use for deprecation warning
+ */
+static void
+set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
+ uint32_t wo_bit)
+{
+ const char *value_s = NULL;
+ int value = 0;
+
+ value_s = g_hash_table_lookup(group->meta, option);
+
+ // We don't actually need the null check but it speeds up the common case
+ if ((value_s == NULL) || (crm_str_to_boolean(value_s, &value) < 0)
+ || (value != 0)) {
+
+ ((group_variant_data_t *) group->variant_opaque)->flags |= flag;
+
+ } else {
+ pe_warn_once(wo_bit,
+ "Support for the '%s' group meta-attribute is deprecated "
+ "and will be removed in a future release "
+ "(use a resource set instead)", option);
+ }
+}
+
+static int
+inactive_resources(pe_resource_t *rsc)
+{
+ int retval = 0;
+
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (!child_rsc->fns->active(child_rsc, TRUE)) {
+ retval++;
+ }
+ }
+
+ return retval;
+}
+
+static void
+group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+ int n_inactive, bool show_inactive, const char *desc)
+{
+ GString *attrs = NULL;
+
+ if (n_inactive > 0 && !show_inactive) {
+ attrs = g_string_sized_new(64);
+ g_string_append_printf(attrs, "%d member%s inactive", n_inactive,
+ pcmk__plural_s(n_inactive));
+ }
+
+ if (pe__resource_is_disabled(rsc)) {
+ pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
+ }
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
+
+ } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
+ }
+
+ if (attrs != NULL) {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)%s%s%s",
+ rsc->id,
+ (const char *) attrs->str, desc ? " (" : "",
+ desc ? desc : "", desc ? ")" : "");
+ g_string_free(attrs, TRUE);
+ } else {
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s%s%s%s",
+ rsc->id,
+ desc ? " (" : "", desc ? desc : "",
+ desc ? ")" : "");
+ }
+}
+
+static bool
+skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
+ GList *only_rsc, uint32_t show_opts)
+{
+ bool star_list = pcmk__list_of_1(only_rsc) &&
+ pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
+ bool child_filtered = child->fns->is_filtered(child, only_rsc, FALSE);
+ bool child_active = child->fns->active(child, FALSE);
+ bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs);
+
+ /* If the resource is in only_rsc by name (so, ignoring "*") then allow
+ * it regardless of if it's active or not.
+ */
+ if (!star_list && !child_filtered) {
+ return false;
+
+ } else if (!child_filtered && (child_active || show_inactive)) {
+ return false;
+
+ } else if (parent_passes && (child_active || show_inactive)) {
+ return false;
+
+ }
+
+ return true;
+}
+
+gboolean
+group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+{
+ xmlNode *xml_obj = rsc->xml;
+ xmlNode *xml_native_rsc = NULL;
+ group_variant_data_t *group_data = NULL;
+ const char *clone_id = NULL;
+
+ pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
+
+ group_data = calloc(1, sizeof(group_variant_data_t));
+ group_data->last_child = NULL;
+ rsc->variant_opaque = group_data;
+
+ // @COMPAT These are deprecated since 2.1.5
+ set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pe__group_ordered,
+ pe_wo_group_order);
+ set_group_flag(rsc, "collocated", pe__group_colocated, pe_wo_group_coloc);
+
+ clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
+
+ for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL;
+ xml_native_rsc = pcmk__xe_next(xml_native_rsc)) {
+
+ if (pcmk__str_eq((const char *)xml_native_rsc->name,
+ XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
+ pe_resource_t *new_rsc = NULL;
+
+ crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
+ if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc,
+ data_set) != pcmk_rc_ok) {
+ continue;
+ }
+
+ rsc->children = g_list_append(rsc->children, new_rsc);
+ group_data->last_child = new_rsc;
+ pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
+ }
+ }
+
+ if (rsc->children == NULL) {
+ /* The schema does not allow empty groups, but if validation is
+ * disabled, we allow them (members can be added later).
+ *
+ * @COMPAT At a major release bump, we should consider this a failure so
+ * that group methods can assume children is not NULL, and there
+ * are no strange effects from phantom groups due to their
+ * presence or meta-attributes.
+ */
+ pcmk__config_warn("Group %s will be ignored because it does not have "
+ "any members", rsc->id);
+ }
+ return TRUE;
+}
+
+gboolean
+group_active(pe_resource_t * rsc, gboolean all)
+{
+ gboolean c_all = TRUE;
+ gboolean c_any = FALSE;
+ GList *gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (child_rsc->fns->active(child_rsc, all)) {
+ c_any = TRUE;
+ } else {
+ c_all = FALSE;
+ }
+ }
+
+ if (c_any == FALSE) {
+ return FALSE;
+ } else if (all && c_all == FALSE) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+static void
+group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ GList *gIter = rsc->children;
+ char *child_text = crm_strdup_printf("%s ", pre_text);
+
+ status_print("%s<group " XML_ATTR_ID "=\"%s\" ", pre_text, rsc->id);
+ status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
+ status_print(">\n");
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ child_rsc->fns->print(child_rsc, child_text, options, print_data);
+ }
+
+ status_print("%s</group>\n", pre_text);
+ free(child_text);
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+void
+group_print(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ char *child_text = NULL;
+ GList *gIter = rsc->children;
+
+ if (pre_text == NULL) {
+ pre_text = " ";
+ }
+
+ if (options & pe_print_xml) {
+ group_print_xml(rsc, pre_text, options, print_data);
+ return;
+ }
+
+ child_text = crm_strdup_printf("%s ", pre_text);
+
+ status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
+
+ if (options & pe_print_html) {
+ status_print("\n<ul>\n");
+
+ } else if ((options & pe_print_log) == 0) {
+ status_print("\n");
+ }
+
+ if (options & pe_print_brief) {
+ print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
+
+ } else {
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (options & pe_print_html) {
+ status_print("<li>\n");
+ }
+ child_rsc->fns->print(child_rsc, child_text, options, print_data);
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+ }
+ }
+ }
+
+ if (options & pe_print_html) {
+ status_print("</ul>\n");
+ }
+ free(child_text);
+}
+
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__group_xml(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ const char *desc = NULL;
+ GList *gIter = rsc->children;
+
+ int rc = pcmk_rc_no_output;
+
+ gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
+ continue;
+ }
+
+ if (rc == pcmk_rc_no_output) {
+ char *count = pcmk__itoa(g_list_length(gIter));
+ const char *maint_s = pe__rsc_bool_str(rsc, pe_rsc_maintenance);
+ const char *managed_s = pe__rsc_bool_str(rsc, pe_rsc_managed);
+ const char *disabled_s = pcmk__btoa(pe__resource_is_disabled(rsc));
+
+ rc = pe__name_and_nvpairs_xml(out, true, "group", 5,
+ XML_ATTR_ID, rsc->id,
+ "number_resources", count,
+ "maintenance", maint_s,
+ "managed", managed_s,
+ "disabled", disabled_s,
+ "description", desc);
+ free(count);
+ CRM_ASSERT(rc == pcmk_rc_ok);
+ }
+
+ out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc,
+ only_node, only_rsc);
+ }
+
+ if (rc == pcmk_rc_ok) {
+ pcmk__output_xml_pop_parent(out);
+ }
+
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__group_default(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ const char *desc = NULL;
+ int rc = pcmk_rc_no_output;
+
+ gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
+
+ gboolean active = rsc->fns->active(rsc, TRUE);
+ gboolean partially_active = rsc->fns->active(rsc, FALSE);
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_brief)) {
+ GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc);
+
+ if (rscs != NULL) {
+ group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
+ pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc);
+ pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs);
+
+ rc = pcmk_rc_ok;
+ g_list_free(rscs);
+ }
+
+ } else {
+ for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
+ continue;
+ }
+
+ group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
+ pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc);
+ out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
+ child_rsc, only_node, only_rsc);
+ }
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+
+ return rc;
+}
+
+void
+group_free(pe_resource_t * rsc)
+{
+ CRM_CHECK(rsc != NULL, return);
+
+ pe_rsc_trace(rsc, "Freeing %s", rsc->id);
+
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ CRM_ASSERT(child_rsc);
+ pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
+ child_rsc->fns->free(child_rsc);
+ }
+
+ pe_rsc_trace(rsc, "Freeing child list");
+ g_list_free(rsc->children);
+
+ common_free(rsc);
+}
+
+enum rsc_role_e
+group_resource_state(const pe_resource_t * rsc, gboolean current)
+{
+ enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
+ GList *gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
+
+ if (role > group_role) {
+ group_role = role;
+ }
+ }
+
+ pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
+ return group_role;
+}
+
+gboolean
+pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+ gboolean check_parent)
+{
+ gboolean passes = FALSE;
+
+ if (check_parent
+ && pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc,
+ false)),
+ only_rsc, pcmk__str_star_matches)) {
+ passes = TRUE;
+ } else if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
+ passes = TRUE;
+ } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
+ passes = TRUE;
+ } else {
+ for (const GList *iter = rsc->children;
+ iter != NULL; iter = iter->next) {
+
+ const pe_resource_t *child_rsc = (const pe_resource_t *) iter->data;
+
+ if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ }
+ }
+ }
+
+ return !passes;
+}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
new file mode 100644
index 0000000..5e92ddc
--- /dev/null
+++ b/lib/pengine/native.c
@@ -0,0 +1,1414 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdint.h>
+
+#include <crm/common/output.h>
+#include <crm/pengine/rules.h>
+#include <crm/pengine/status.h>
+#include <crm/pengine/complex.h>
+#include <crm/pengine/internal.h>
+#include <crm/msg_xml.h>
+#include <pe_status_private.h>
+
+#ifdef PCMK__COMPAT_2_0
+#define PROVIDER_SEP "::"
+#else
+#define PROVIDER_SEP ":"
+#endif
+
+/*!
+ * \internal
+ * \brief Check whether a resource is active on multiple nodes
+ */
+static bool
+is_multiply_active(const pe_resource_t *rsc)
+{
+ unsigned int count = 0;
+
+ if (rsc->variant == pe_native) {
+ pe__find_active_requires(rsc, &count);
+ }
+ return count > 1;
+}
+
+static void
+native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
+{
+ int priority = 0;
+
+ if ((rsc->priority == 0) || (failed == TRUE)) {
+ return;
+ }
+
+ if (rsc->role == RSC_ROLE_PROMOTED) {
+ // Promoted instance takes base priority + 1
+ priority = rsc->priority + 1;
+
+ } else {
+ priority = rsc->priority;
+ }
+
+ node->details->priority += priority;
+ pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
+ pe__node_name(node), node->details->priority,
+ (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ rsc->id, rsc->priority,
+ (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "");
+
+ /* Priority of a resource running on a guest node is added to the cluster
+ * node as well. */
+ if (node->details->remote_rsc
+ && node->details->remote_rsc->container) {
+ GList *gIter = node->details->remote_rsc->container->running_on;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *a_node = gIter->data;
+
+ a_node->details->priority += priority;
+ pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) "
+ "from guest node %s",
+ pe__node_name(a_node), a_node->details->priority,
+ (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ rsc->id, rsc->priority,
+ (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "",
+ pe__node_name(node));
+ }
+ }
+}
+
+void
+native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed)
+{
+ GList *gIter = rsc->running_on;
+
+ CRM_CHECK(node != NULL, return);
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *a_node = (pe_node_t *) gIter->data;
+
+ CRM_CHECK(a_node != NULL, return);
+ if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
+ return;
+ }
+ }
+
+ pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node),
+ pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
+
+ rsc->running_on = g_list_append(rsc->running_on, node);
+ if (rsc->variant == pe_native) {
+ node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
+
+ native_priority_to_node(rsc, node, failed);
+ }
+
+ if (rsc->variant == pe_native && node->details->maintenance) {
+ pe__clear_resource_flags(rsc, pe_rsc_managed);
+ pe__set_resource_flags(rsc, pe_rsc_maintenance);
+ }
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pe_resource_t *p = rsc->parent;
+
+ pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
+ resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
+
+ while(p && node->details->online) {
+ /* add without the additional location constraint */
+ p->running_on = g_list_append(p->running_on, node);
+ p = p->parent;
+ }
+ return;
+ }
+
+ if (is_multiply_active(rsc)) {
+ switch (rsc->recovery_type) {
+ case recovery_stop_only:
+ {
+ GHashTableIter gIter;
+ pe_node_t *local_node = NULL;
+
+ /* make sure it doesn't come up again */
+ if (rsc->allowed_nodes != NULL) {
+ g_hash_table_destroy(rsc->allowed_nodes);
+ }
+ rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
+ g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
+ while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
+ local_node->weight = -INFINITY;
+ }
+ }
+ break;
+ case recovery_block:
+ pe__clear_resource_flags(rsc, pe_rsc_managed);
+ pe__set_resource_flags(rsc, pe_rsc_block);
+
+ /* If the resource belongs to a group or bundle configured with
+ * multiple-active=block, block the entire entity.
+ */
+ if (rsc->parent
+ && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
+ && rsc->parent->recovery_type == recovery_block) {
+ GList *gIter = rsc->parent->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child = (pe_resource_t *) gIter->data;
+
+ pe__clear_resource_flags(child, pe_rsc_managed);
+ pe__set_resource_flags(child, pe_rsc_block);
+ }
+ }
+ break;
+ default: // recovery_stop_start, recovery_stop_unexpected
+ /* The scheduler will do the right thing because the relevant
+ * variables and flags are set when unpacking the history.
+ */
+ break;
+ }
+ crm_debug("%s is active on multiple nodes including %s: %s",
+ rsc->id, pe__node_name(node),
+ recovery2text(rsc->recovery_type));
+
+ } else {
+ pe_rsc_trace(rsc, "Resource %s is active on %s",
+ rsc->id, pe__node_name(node));
+ }
+
+ if (rsc->parent != NULL) {
+ native_add_running(rsc->parent, node, data_set, FALSE);
+ }
+}
+
+static void
+recursive_clear_unique(pe_resource_t *rsc, gpointer user_data)
+{
+ pe__clear_resource_flags(rsc, pe_rsc_unique);
+ add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
+ g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
+}
+
+gboolean
+native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+{
+ pe_resource_t *parent = uber_parent(rsc);
+ const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ uint32_t ra_caps = pcmk_get_ra_caps(standard);
+
+ pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
+
+ // Only some agent standards support unique and promotable clones
+ if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
+ && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
+
+ /* @COMPAT We should probably reject this situation as an error (as we
+ * do for promotable below) rather than warn and convert, but that would
+ * be a backward-incompatible change that we should probably do with a
+ * transform at a schema major version bump.
+ */
+ pe__force_anon(standard, parent, rsc->id, data_set);
+
+ /* Clear globally-unique on the parent and all its descendants unpacked
+ * so far (clearing the parent should make any future children unpacking
+ * correct). We have to clear this resource explicitly because it isn't
+ * hooked into the parent's children yet.
+ */
+ recursive_clear_unique(parent, NULL);
+ recursive_clear_unique(rsc, NULL);
+ }
+ if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
+ && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
+
+ pe_err("Resource %s is of type %s and therefore "
+ "cannot be used as a promotable clone resource",
+ rsc->id, standard);
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static bool
+rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
+{
+ pe_rsc_trace(rsc, "Checking whether %s is on %s",
+ rsc->id, pe__node_name(node));
+
+ if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
+
+ for (GList *iter = rsc->running_on; iter; iter = iter->next) {
+ pe_node_t *loc = (pe_node_t *) iter->data;
+
+ if (loc->details == node->details) {
+ return true;
+ }
+ }
+
+ } else if (pcmk_is_set(flags, pe_find_inactive)
+ && (rsc->running_on == NULL)) {
+ return true;
+
+ } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
+ && (rsc->allocated_to->details == node->details)) {
+ return true;
+ }
+ return false;
+}
+
+pe_resource_t *
+native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
+ int flags)
+{
+ bool match = false;
+ pe_resource_t *result = NULL;
+
+ CRM_CHECK(id && rsc && rsc->id, return NULL);
+
+ if (flags & pe_find_clone) {
+ const char *rid = ID(rsc->xml);
+
+ if (!pe_rsc_is_clone(pe__const_top_resource(rsc, false))) {
+ match = false;
+
+ } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) {
+ match = true;
+ }
+
+ } else if (!strcmp(id, rsc->id)) {
+ match = true;
+
+ } else if (pcmk_is_set(flags, pe_find_renamed)
+ && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
+ match = true;
+
+ } else if (pcmk_is_set(flags, pe_find_any)
+ || (pcmk_is_set(flags, pe_find_anon)
+ && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
+ match = pe_base_name_eq(rsc, id);
+ }
+
+ if (match && on_node) {
+ if (!rsc_is_on_node(rsc, on_node, flags)) {
+ match = false;
+ }
+ }
+
+ if (match) {
+ return rsc;
+ }
+
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child = (pe_resource_t *) gIter->data;
+
+ result = rsc->fns->find_rsc(child, id, on_node, flags);
+ if (result) {
+ return result;
+ }
+ }
+ return NULL;
+}
+
+// create is ignored
+char *
+native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
+ pe_working_set_t * data_set)
+{
+ char *value_copy = NULL;
+ const char *value = NULL;
+ GHashTable *params = NULL;
+
+ CRM_CHECK(rsc != NULL, return NULL);
+ CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
+
+ pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
+ params = pe_rsc_params(rsc, node, data_set);
+ value = g_hash_table_lookup(params, name);
+ if (value == NULL) {
+ /* try meta attributes instead */
+ value = g_hash_table_lookup(rsc->meta, name);
+ }
+ pcmk__str_update(&value_copy, value);
+ return value_copy;
+}
+
+gboolean
+native_active(pe_resource_t * rsc, gboolean all)
+{
+ for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *a_node = (pe_node_t *) gIter->data;
+
+ if (a_node->details->unclean) {
+ pe_rsc_trace(rsc, "Resource %s: %s is unclean",
+ rsc->id, pe__node_name(a_node));
+ return TRUE;
+ } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pe_rsc_trace(rsc, "Resource %s: %s is offline",
+ rsc->id, pe__node_name(a_node));
+ } else {
+ pe_rsc_trace(rsc, "Resource %s active on %s",
+ rsc->id, pe__node_name(a_node));
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+struct print_data_s {
+ long options;
+ void *print_data;
+};
+
+static const char *
+native_pending_state(const pe_resource_t *rsc)
+{
+ const char *pending_state = NULL;
+
+ if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
+ pending_state = "Starting";
+
+ } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ pending_state = "Stopping";
+
+ } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
+ pending_state = "Migrating";
+
+ } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
+ /* Work might be done in here. */
+ pending_state = "Migrating";
+
+ } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
+ pending_state = "Promoting";
+
+ } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
+ pending_state = "Demoting";
+ }
+
+ return pending_state;
+}
+
+static const char *
+native_pending_task(const pe_resource_t *rsc)
+{
+ const char *pending_task = NULL;
+
+ if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ pending_task = "Monitoring";
+
+ /* Pending probes are not printed, even if pending
+ * operations are requested. If someone ever requests that
+ * behavior, uncomment this and the corresponding part of
+ * unpack.c:unpack_rsc_op().
+ */
+ /*
+ } else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
+ pending_task = "Checking";
+ */
+ }
+
+ return pending_task;
+}
+
+static enum rsc_role_e
+native_displayable_role(const pe_resource_t *rsc)
+{
+ enum rsc_role_e role = rsc->role;
+
+ if ((role == RSC_ROLE_STARTED)
+ && pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
+ pe_rsc_promotable)) {
+
+ role = RSC_ROLE_UNPROMOTED;
+ }
+ return role;
+}
+
+static const char *
+native_displayable_state(const pe_resource_t *rsc, bool print_pending)
+{
+ const char *rsc_state = NULL;
+
+ if (print_pending) {
+ rsc_state = native_pending_state(rsc);
+ }
+ if (rsc_state == NULL) {
+ rsc_state = role2text(native_displayable_role(rsc));
+ }
+ return rsc_state;
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+static void
+native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
+ const char *rsc_state = native_displayable_state(rsc, pcmk_is_set(options, pe_print_pending));
+ const char *target_role = NULL;
+
+ /* resource information. */
+ status_print("%s<resource ", pre_text);
+ status_print(XML_ATTR_ID "=\"%s\" ", rsc_printable_id(rsc));
+ status_print("resource_agent=\"%s%s%s:%s\" ", class,
+ ((prov == NULL)? "" : PROVIDER_SEP),
+ ((prov == NULL)? "" : prov),
+ crm_element_value(rsc->xml, XML_ATTR_TYPE));
+
+ status_print("role=\"%s\" ", rsc_state);
+ if (rsc->meta) {
+ target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+ }
+ if (target_role) {
+ status_print("target_role=\"%s\" ", target_role);
+ }
+ status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
+ status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
+ status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
+ status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("failure_ignored=\"%s\" ",
+ pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
+
+ if (options & pe_print_pending) {
+ const char *pending_task = native_pending_task(rsc);
+
+ if (pending_task) {
+ status_print("pending=\"%s\" ", pending_task);
+ }
+ }
+
+ /* print out the nodes this resource is running on */
+ if (options & pe_print_rsconly) {
+ status_print("/>\n");
+ /* do nothing */
+ } else if (rsc->running_on != NULL) {
+ GList *gIter = rsc->running_on;
+
+ status_print(">\n");
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = (pe_node_t *) gIter->data;
+
+ status_print("%s <node name=\"%s\" " XML_ATTR_ID "=\"%s\" "
+ "cached=\"%s\"/>\n",
+ pre_text, pcmk__s(node->details->uname, ""),
+ node->details->id, pcmk__btoa(!node->details->online));
+ }
+ status_print("%s</resource>\n", pre_text);
+ } else {
+ status_print("/>\n");
+ }
+}
+
+// Append a flag to resource description string's flags list
+static bool
+add_output_flag(GString *s, const char *flag_desc, bool have_flags)
+{
+ g_string_append(s, (have_flags? ", " : " ("));
+ g_string_append(s, flag_desc);
+ return true;
+}
+
+// Append a node name to resource description string's node list
+static bool
+add_output_node(GString *s, const char *node, bool have_nodes)
+{
+ g_string_append(s, (have_nodes? " " : " [ "));
+ g_string_append(s, node);
+ return true;
+}
+
+/*!
+ * \internal
+ * \brief Create a string description of a resource
+ *
+ * \param[in] rsc Resource to describe
+ * \param[in] name Desired identifier for the resource
+ * \param[in] node If not NULL, node that resource is "on"
+ * \param[in] show_opts Bitmask of pcmk_show_opt_e.
+ * \param[in] target_role Resource's target role
+ * \param[in] show_nodes Whether to display nodes when multiply active
+ *
+ * \return Newly allocated string description of resource
+ * \note Caller must free the result with g_free().
+ */
+gchar *
+pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
+ const pe_node_t *node, uint32_t show_opts,
+ const char *target_role, bool show_nodes)
+{
+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *provider = NULL;
+ const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
+ GString *outstr = NULL;
+ bool have_flags = false;
+
+ if (rsc->variant != pe_native) {
+ return NULL;
+ }
+
+ CRM_CHECK(name != NULL, name = "unknown");
+ CRM_CHECK(kind != NULL, kind = "unknown");
+ CRM_CHECK(class != NULL, class = "unknown");
+
+ if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
+ provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
+ }
+
+ if ((node == NULL) && (rsc->lock_node != NULL)) {
+ node = rsc->lock_node;
+ }
+ if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only)
+ || pcmk__list_of_multiple(rsc->running_on)) {
+ node = NULL;
+ }
+
+ outstr = g_string_sized_new(128);
+
+ // Resource name and agent
+ pcmk__g_strcat(outstr,
+ name, "\t(", class, ((provider == NULL)? "" : PROVIDER_SEP),
+ pcmk__s(provider, ""), ":", kind, "):\t", NULL);
+
+ // State on node
+ if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ g_string_append(outstr, " ORPHANED");
+ }
+ if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ enum rsc_role_e role = native_displayable_role(rsc);
+
+ g_string_append(outstr, " FAILED");
+ if (role > RSC_ROLE_UNPROMOTED) {
+ pcmk__add_word(&outstr, 0, role2text(role));
+ }
+ } else {
+ bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending);
+
+ pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending));
+ }
+ if (node) {
+ pcmk__add_word(&outstr, 0, pe__node_name(node));
+ }
+
+ // Failed probe operation
+ if (native_displayable_role(rsc) == RSC_ROLE_STOPPED) {
+ xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
+ if (probe_op != NULL) {
+ int rc;
+
+ pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0);
+ pcmk__g_strcat(outstr, " (", services_ocf_exitcode_str(rc), ") ",
+ NULL);
+ }
+ }
+
+ // Flags, as: (<flag> [...])
+ if (node && !(node->details->online) && node->details->unclean) {
+ have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
+ }
+ if (node && (node == rsc->lock_node)) {
+ have_flags = add_output_flag(outstr, "LOCKED", have_flags);
+ }
+ if (pcmk_is_set(show_opts, pcmk_show_pending)) {
+ const char *pending_task = native_pending_task(rsc);
+
+ if (pending_task) {
+ have_flags = add_output_flag(outstr, pending_task, have_flags);
+ }
+ }
+ if (target_role) {
+ enum rsc_role_e target_role_e = text2role(target_role);
+
+ /* Only show target role if it limits our abilities (i.e. ignore
+ * Started, as it is the default anyways, and doesn't prevent the
+ * resource from becoming promoted).
+ */
+ if (target_role_e == RSC_ROLE_STOPPED) {
+ have_flags = add_output_flag(outstr, "disabled", have_flags);
+
+ } else if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
+ pe_rsc_promotable)
+ && target_role_e == RSC_ROLE_UNPROMOTED) {
+ have_flags = add_output_flag(outstr, "target-role:", have_flags);
+ g_string_append(outstr, target_role);
+ }
+ }
+
+ // Blocked or maintenance implies unmanaged
+ if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ have_flags = add_output_flag(outstr, "blocked", have_flags);
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ have_flags = add_output_flag(outstr, "maintenance", have_flags);
+ }
+ } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ have_flags = add_output_flag(outstr, "unmanaged", have_flags);
+ }
+
+ if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ have_flags = add_output_flag(outstr, "failure ignored", have_flags);
+ }
+
+
+ if (have_flags) {
+ g_string_append_c(outstr, ')');
+ }
+
+ // User-supplied description
+ if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
+ || pcmk__list_of_multiple(rsc->running_on)) {
+ const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
+
+ if (desc) {
+ g_string_append(outstr, " (");
+ g_string_append(outstr, desc);
+ g_string_append(outstr, ")");
+
+ }
+ }
+
+ if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only)
+ && pcmk__list_of_multiple(rsc->running_on)) {
+ bool have_nodes = false;
+
+ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
+ pe_node_t *n = (pe_node_t *) iter->data;
+
+ have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
+ }
+ if (have_nodes) {
+ g_string_append(outstr, " ]");
+ }
+ }
+
+ return g_string_free(outstr, FALSE);
+}
+
+int
+pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
+ const char *name, const pe_node_t *node,
+ uint32_t show_opts)
+{
+ const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
+ const char *target_role = NULL;
+
+ xmlNodePtr list_node = NULL;
+ const char *cl = NULL;
+
+ CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(kind != NULL);
+
+ if (rsc->meta) {
+ const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
+
+ if (crm_is_true(is_internal)
+ && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
+
+ crm_trace("skipping print of internal resource %s", rsc->id);
+ return pcmk_rc_no_output;
+ }
+ target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+ }
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ cl = "rsc-managed";
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ cl = "rsc-failed";
+
+ } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
+ cl = "rsc-failed";
+
+ } else if (pcmk__list_of_multiple(rsc->running_on)) {
+ cl = "rsc-multiple";
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ cl = "rsc-failure-ignored";
+
+ } else {
+ cl = "rsc-ok";
+ }
+
+ {
+ gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
+ target_role, true);
+
+ list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
+ pcmk_create_html_node(list_node, "span", NULL, cl, s);
+ g_free(s);
+ }
+
+ return pcmk_rc_ok;
+}
+
+int
+pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
+ const char *name, const pe_node_t *node,
+ uint32_t show_opts)
+{
+ const char *target_role = NULL;
+
+ CRM_ASSERT(rsc->variant == pe_native);
+
+ if (rsc->meta) {
+ const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
+
+ if (crm_is_true(is_internal)
+ && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
+
+ crm_trace("skipping print of internal resource %s", rsc->id);
+ return pcmk_rc_no_output;
+ }
+ target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+ }
+
+ {
+ gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
+ target_role, true);
+
+ out->list_item(out, NULL, "%s", s);
+ g_free(s);
+ }
+
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+void
+common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
+ const pe_node_t *node, long options, void *print_data)
+{
+ const char *target_role = NULL;
+
+ CRM_ASSERT(rsc->variant == pe_native);
+
+ if (rsc->meta) {
+ const char *is_internal = g_hash_table_lookup(rsc->meta,
+ XML_RSC_ATTR_INTERNAL_RSC);
+
+ if (crm_is_true(is_internal)
+ && !pcmk_is_set(options, pe_print_implicit)) {
+
+ crm_trace("skipping print of internal resource %s", rsc->id);
+ return;
+ }
+ target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+ }
+
+ if (options & pe_print_xml) {
+ native_print_xml(rsc, pre_text, options, print_data);
+ return;
+ }
+
+ if ((pre_text == NULL) && (options & pe_print_printf)) {
+ pre_text = " ";
+ }
+
+ if (options & pe_print_html) {
+ if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ status_print("<font color=\"yellow\">");
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ status_print("<font color=\"red\">");
+
+ } else if (rsc->running_on == NULL) {
+ status_print("<font color=\"red\">");
+
+ } else if (pcmk__list_of_multiple(rsc->running_on)) {
+ status_print("<font color=\"orange\">");
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ status_print("<font color=\"yellow\">");
+
+ } else {
+ status_print("<font color=\"green\">");
+ }
+ }
+
+ {
+ gchar *resource_s = pcmk__native_output_string(rsc, name, node, options,
+ target_role, false);
+ status_print("%s%s", (pre_text? pre_text : ""), resource_s);
+ g_free(resource_s);
+ }
+
+ if (pcmk_is_set(options, pe_print_html)) {
+ status_print(" </font> ");
+ }
+
+ if (!pcmk_is_set(options, pe_print_rsconly)
+ && pcmk__list_of_multiple(rsc->running_on)) {
+
+ GList *gIter = rsc->running_on;
+ int counter = 0;
+
+ if (options & pe_print_html) {
+ status_print("<ul>\n");
+ } else if ((options & pe_print_printf)
+ || (options & pe_print_ncurses)) {
+ status_print("[");
+ }
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *n = (pe_node_t *) gIter->data;
+
+ counter++;
+
+ if (options & pe_print_html) {
+ status_print("<li>\n%s", pe__node_name(n));
+
+ } else if ((options & pe_print_printf)
+ || (options & pe_print_ncurses)) {
+ status_print(" %s", pe__node_name(n));
+
+ } else if ((options & pe_print_log)) {
+ status_print("\t%d : %s", counter, pe__node_name(n));
+
+ } else {
+ status_print("%s", pe__node_name(n));
+ }
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+
+ }
+ }
+
+ if (options & pe_print_html) {
+ status_print("</ul>\n");
+ } else if ((options & pe_print_printf)
+ || (options & pe_print_ncurses)) {
+ status_print(" ]");
+ }
+ }
+
+ if (options & pe_print_html) {
+ status_print("<br/>\n");
+ } else if (options & pe_print_suppres_nl) {
+ /* nothing */
+ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
+ status_print("\n");
+ }
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+void
+native_print(pe_resource_t *rsc, const char *pre_text, long options,
+ void *print_data)
+{
+ const pe_node_t *node = NULL;
+
+ CRM_ASSERT(rsc->variant == pe_native);
+ if (options & pe_print_xml) {
+ native_print_xml(rsc, pre_text, options, print_data);
+ return;
+ }
+
+ node = pe__current_node(rsc);
+
+ if (node == NULL) {
+ // This is set only if a non-probe action is pending on this node
+ node = rsc->pending_node;
+ }
+
+ common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
+}
+
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__resource_xml(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending);
+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
+ const char *rsc_state = native_displayable_state(rsc, print_pending);
+
+ const char *desc = NULL;
+ char ra_name[LINE_MAX];
+ char *nodes_running_on = NULL;
+ const char *lock_node_name = NULL;
+ int rc = pcmk_rc_no_output;
+ const char *target_role = NULL;
+
+ desc = pe__resource_description(rsc, show_opts);
+
+ if (rsc->meta != NULL) {
+ target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+ }
+
+ CRM_ASSERT(rsc->variant == pe_native);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return pcmk_rc_no_output;
+ }
+
+ /* resource information. */
+ snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class,
+ ((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov),
+ crm_element_value(rsc->xml, XML_ATTR_TYPE));
+
+ nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on));
+
+ if (rsc->lock_node != NULL) {
+ lock_node_name = rsc->lock_node->details->uname;
+ }
+
+ rc = pe__name_and_nvpairs_xml(out, true, "resource", 15,
+ "id", rsc_printable_id(rsc),
+ "resource_agent", ra_name,
+ "role", rsc_state,
+ "target_role", target_role,
+ "active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
+ "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
+ "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
+ "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
+ "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "nodes_running_on", nodes_running_on,
+ "pending", (print_pending? native_pending_task(rsc) : NULL),
+ "locked_to", lock_node_name,
+ "description", desc);
+ free(nodes_running_on);
+
+ CRM_ASSERT(rc == pcmk_rc_ok);
+
+ if (rsc->running_on != NULL) {
+ GList *gIter = rsc->running_on;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = (pe_node_t *) gIter->data;
+
+ rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
+ "name", node->details->uname,
+ "id", node->details->id,
+ "cached", pcmk__btoa(node->details->online));
+ CRM_ASSERT(rc == pcmk_rc_ok);
+ }
+ }
+
+ pcmk__output_xml_pop_parent(out);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__resource_html(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ const pe_node_t *node = pe__current_node(rsc);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return pcmk_rc_no_output;
+ }
+
+ CRM_ASSERT(rsc->variant == pe_native);
+
+ if (node == NULL) {
+ // This is set only if a non-probe action is pending on this node
+ node = rsc->pending_node;
+ }
+ return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
+}
+
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+int
+pe__resource_text(pcmk__output_t *out, va_list args)
+{
+ uint32_t show_opts = va_arg(args, uint32_t);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ const pe_node_t *node = pe__current_node(rsc);
+
+ CRM_ASSERT(rsc->variant == pe_native);
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return pcmk_rc_no_output;
+ }
+
+ if (node == NULL) {
+ // This is set only if a non-probe action is pending on this node
+ node = rsc->pending_node;
+ }
+ return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts);
+}
+
+void
+native_free(pe_resource_t * rsc)
+{
+ pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
+ common_free(rsc);
+}
+
+enum rsc_role_e
+native_resource_state(const pe_resource_t * rsc, gboolean current)
+{
+ enum rsc_role_e role = rsc->next_role;
+
+ if (current) {
+ role = rsc->role;
+ }
+ pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
+ return role;
+}
+
+/*!
+ * \internal
+ * \brief List nodes where a resource (or any of its children) is
+ *
+ * \param[in] rsc Resource to check
+ * \param[out] list List to add result to
+ * \param[in] current 0 = where allocated, 1 = where running,
+ * 2 = where running or pending
+ *
+ * \return If list contains only one node, that node, or NULL otherwise
+ */
+pe_node_t *
+native_location(const pe_resource_t *rsc, GList **list, int current)
+{
+ pe_node_t *one = NULL;
+ GList *result = NULL;
+
+ if (rsc->children) {
+ GList *gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child = (pe_resource_t *) gIter->data;
+
+ child->fns->location(child, &result, current);
+ }
+
+ } else if (current) {
+
+ if (rsc->running_on) {
+ result = g_list_copy(rsc->running_on);
+ }
+ if ((current == 2) && rsc->pending_node
+ && !pe_find_node_id(result, rsc->pending_node->details->id)) {
+ result = g_list_append(result, rsc->pending_node);
+ }
+
+ } else if (current == FALSE && rsc->allocated_to) {
+ result = g_list_append(NULL, rsc->allocated_to);
+ }
+
+ if (result && (result->next == NULL)) {
+ one = result->data;
+ }
+
+ if (list) {
+ GList *gIter = result;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = (pe_node_t *) gIter->data;
+
+ if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
+ *list = g_list_append(*list, node);
+ }
+ }
+ }
+
+ g_list_free(result);
+ return one;
+}
+
+static void
+get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table)
+{
+ GList *gIter = rsc_list;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+
+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
+
+ int offset = 0;
+ char buffer[LINE_MAX];
+
+ int *rsc_counter = NULL;
+ int *active_counter = NULL;
+
+ if (rsc->variant != pe_native) {
+ continue;
+ }
+
+ offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
+ if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
+ const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
+
+ if (prov != NULL) {
+ offset += snprintf(buffer + offset, LINE_MAX - offset,
+ PROVIDER_SEP "%s", prov);
+ }
+ }
+ offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
+ CRM_LOG_ASSERT(offset > 0);
+
+ if (rsc_table) {
+ rsc_counter = g_hash_table_lookup(rsc_table, buffer);
+ if (rsc_counter == NULL) {
+ rsc_counter = calloc(1, sizeof(int));
+ *rsc_counter = 0;
+ g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
+ }
+ (*rsc_counter)++;
+ }
+
+ if (active_table) {
+ GList *gIter2 = rsc->running_on;
+
+ for (; gIter2 != NULL; gIter2 = gIter2->next) {
+ pe_node_t *node = (pe_node_t *) gIter2->data;
+ GHashTable *node_table = NULL;
+
+ if (node->details->unclean == FALSE && node->details->online == FALSE &&
+ pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ continue;
+ }
+
+ node_table = g_hash_table_lookup(active_table, node->details->uname);
+ if (node_table == NULL) {
+ node_table = pcmk__strkey_table(free, free);
+ g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
+ }
+
+ active_counter = g_hash_table_lookup(node_table, buffer);
+ if (active_counter == NULL) {
+ active_counter = calloc(1, sizeof(int));
+ *active_counter = 0;
+ g_hash_table_insert(node_table, strdup(buffer), active_counter);
+ }
+ (*active_counter)++;
+ }
+ }
+ }
+}
+
+static void
+destroy_node_table(gpointer data)
+{
+ GHashTable *node_table = data;
+
+ if (node_table) {
+ g_hash_table_destroy(node_table);
+ }
+}
+
+/*!
+ * \internal
+ * \deprecated This function will be removed in a future release
+ */
+void
+print_rscs_brief(GList *rsc_list, const char *pre_text, long options,
+ void *print_data, gboolean print_all)
+{
+ GHashTable *rsc_table = pcmk__strkey_table(free, free);
+ GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
+ GHashTableIter hash_iter;
+ char *type = NULL;
+ int *rsc_counter = NULL;
+
+ get_rscs_brief(rsc_list, rsc_table, active_table);
+
+ g_hash_table_iter_init(&hash_iter, rsc_table);
+ while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
+ GHashTableIter hash_iter2;
+ char *node_name = NULL;
+ GHashTable *node_table = NULL;
+ int active_counter_all = 0;
+
+ g_hash_table_iter_init(&hash_iter2, active_table);
+ while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
+ int *active_counter = g_hash_table_lookup(node_table, type);
+
+ if (active_counter == NULL || *active_counter == 0) {
+ continue;
+
+ } else {
+ active_counter_all += *active_counter;
+ }
+
+ if (options & pe_print_rsconly) {
+ node_name = NULL;
+ }
+
+ if (options & pe_print_html) {
+ status_print("<li>\n");
+ }
+
+ if (print_all) {
+ status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
+ active_counter ? *active_counter : 0,
+ rsc_counter ? *rsc_counter : 0, type,
+ active_counter && (*active_counter > 0) && node_name ? node_name : "");
+ } else {
+ status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
+ active_counter ? *active_counter : 0, type,
+ active_counter && (*active_counter > 0) && node_name ? node_name : "");
+ }
+
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+ }
+ }
+
+ if (print_all && active_counter_all == 0) {
+ if (options & pe_print_html) {
+ status_print("<li>\n");
+ }
+
+ status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
+ active_counter_all,
+ rsc_counter ? *rsc_counter : 0, type);
+
+ if (options & pe_print_html) {
+ status_print("</li>\n");
+ }
+ }
+ }
+
+ if (rsc_table) {
+ g_hash_table_destroy(rsc_table);
+ rsc_table = NULL;
+ }
+ if (active_table) {
+ g_hash_table_destroy(active_table);
+ active_table = NULL;
+ }
+}
+
+int
+pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
+{
+ GHashTable *rsc_table = pcmk__strkey_table(free, free);
+ GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
+ GList *sorted_rscs;
+ int rc = pcmk_rc_no_output;
+
+ get_rscs_brief(rsc_list, rsc_table, active_table);
+
+ /* Make a list of the rsc_table keys so that it can be sorted. This is to make sure
+ * output order stays consistent between systems.
+ */
+ sorted_rscs = g_hash_table_get_keys(rsc_table);
+ sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
+
+ for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) {
+ char *type = (char *) gIter->data;
+ int *rsc_counter = g_hash_table_lookup(rsc_table, type);
+
+ GList *sorted_nodes = NULL;
+ int active_counter_all = 0;
+
+ /* Also make a list of the active_table keys so it can be sorted. If there's
+ * more than one instance of a type of resource running, we need the nodes to
+ * be sorted to make sure output order stays consistent between systems.
+ */
+ sorted_nodes = g_hash_table_get_keys(active_table);
+ sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp);
+
+ for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) {
+ char *node_name = (char *) gIter2->data;
+ GHashTable *node_table = g_hash_table_lookup(active_table, node_name);
+ int *active_counter = NULL;
+
+ if (node_table == NULL) {
+ continue;
+ }
+
+ active_counter = g_hash_table_lookup(node_table, type);
+
+ if (active_counter == NULL || *active_counter == 0) {
+ continue;
+
+ } else {
+ active_counter_all += *active_counter;
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) {
+ node_name = NULL;
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
+ out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
+ *active_counter,
+ rsc_counter ? *rsc_counter : 0, type,
+ (*active_counter > 0) && node_name ? node_name : "");
+ } else {
+ out->list_item(out, NULL, "%d\t(%s):\tActive %s",
+ *active_counter, type,
+ (*active_counter > 0) && node_name ? node_name : "");
+ }
+
+ rc = pcmk_rc_ok;
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) {
+ out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
+ active_counter_all,
+ rsc_counter ? *rsc_counter : 0, type);
+ rc = pcmk_rc_ok;
+ }
+
+ if (sorted_nodes) {
+ g_list_free(sorted_nodes);
+ }
+ }
+
+ if (rsc_table) {
+ g_hash_table_destroy(rsc_table);
+ rsc_table = NULL;
+ }
+ if (active_table) {
+ g_hash_table_destroy(active_table);
+ active_table = NULL;
+ }
+ if (sorted_rscs) {
+ g_list_free(sorted_rscs);
+ }
+
+ return rc;
+}
+
+gboolean
+pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+ gboolean check_parent)
+{
+ if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
+ pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
+ return FALSE;
+ } else if (check_parent && rsc->parent) {
+ const pe_resource_t *up = pe__const_top_resource(rsc, true);
+
+ return up->fns->is_filtered(up, only_rsc, FALSE);
+ }
+
+ return TRUE;
+}
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
new file mode 100644
index 0000000..ed7f0da
--- /dev/null
+++ b/lib/pengine/pe_actions.c
@@ -0,0 +1,1686 @@
+/*
+ * Copyright 2004-2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <glib.h>
+#include <stdbool.h>
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/pengine/internal.h>
+#include "pe_status_private.h"
+
+static void unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
+ const pe_resource_t *container,
+ pe_working_set_t *data_set, guint interval_ms);
+
+static void
+add_singleton(pe_working_set_t *data_set, pe_action_t *action)
+{
+ if (data_set->singletons == NULL) {
+ data_set->singletons = pcmk__strkey_table(NULL, NULL);
+ }
+ g_hash_table_insert(data_set->singletons, action->uuid, action);
+}
+
+static pe_action_t *
+lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
+{
+ if (data_set->singletons == NULL) {
+ return NULL;
+ }
+ return g_hash_table_lookup(data_set->singletons, action_uuid);
+}
+
+/*!
+ * \internal
+ * \brief Find an existing action that matches arguments
+ *
+ * \param[in] key Action key to match
+ * \param[in] rsc Resource to match (if any)
+ * \param[in] node Node to match (if any)
+ * \param[in] data_set Cluster working set
+ *
+ * \return Existing action that matches arguments (or NULL if none)
+ */
+static pe_action_t *
+find_existing_action(const char *key, const pe_resource_t *rsc,
+ const pe_node_t *node, const pe_working_set_t *data_set)
+{
+ GList *matches = NULL;
+ pe_action_t *action = NULL;
+
+ /* When rsc is NULL, it would be quicker to check data_set->singletons,
+ * but checking all data_set->actions takes the node into account.
+ */
+ matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
+ key, node);
+ if (matches == NULL) {
+ return NULL;
+ }
+ CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches));
+
+ action = matches->data;
+ g_list_free(matches);
+ return action;
+}
+
+static xmlNode *
+find_rsc_op_entry_helper(const pe_resource_t *rsc, const char *key,
+ gboolean include_disabled)
+{
+ guint interval_ms = 0;
+ gboolean do_retry = TRUE;
+ char *local_key = NULL;
+ const char *name = NULL;
+ const char *interval_spec = NULL;
+ char *match_key = NULL;
+ xmlNode *op = NULL;
+ xmlNode *operation = NULL;
+
+ retry:
+ for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
+ operation = pcmk__xe_next(operation)) {
+
+ if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ bool enabled = false;
+
+ name = crm_element_value(operation, "name");
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
+ !enabled) {
+ continue;
+ }
+
+ interval_ms = crm_parse_interval_spec(interval_spec);
+ match_key = pcmk__op_key(rsc->id, name, interval_ms);
+ if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
+ op = operation;
+ }
+ free(match_key);
+
+ if (rsc->clone_name) {
+ match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
+ if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
+ op = operation;
+ }
+ free(match_key);
+ }
+
+ if (op != NULL) {
+ free(local_key);
+ return op;
+ }
+ }
+ }
+
+ free(local_key);
+ if (do_retry == FALSE) {
+ return NULL;
+ }
+
+ do_retry = FALSE;
+ if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
+ local_key = pcmk__op_key(rsc->id, "migrate", 0);
+ key = local_key;
+ goto retry;
+
+ } else if (strstr(key, "_notify_")) {
+ local_key = pcmk__op_key(rsc->id, "notify", 0);
+ key = local_key;
+ goto retry;
+ }
+
+ return NULL;
+}
+
+xmlNode *
+find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
+{
+ return find_rsc_op_entry_helper(rsc, key, FALSE);
+}
+
+/*!
+ * \internal
+ * \brief Create a new action object
+ *
+ * \param[in] key Action key
+ * \param[in] task Action name
+ * \param[in,out] rsc Resource that action is for (if any)
+ * \param[in] node Node that action is on (if any)
+ * \param[in] optional Whether action should be considered optional
+ * \param[in] for_graph Whether action should be recorded in transition graph
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Newly allocated action
+ * \note This function takes ownership of \p key. It is the caller's
+ * responsibility to free the return value with pe_free_action().
+ */
+static pe_action_t *
+new_action(char *key, const char *task, pe_resource_t *rsc,
+ const pe_node_t *node, bool optional, bool for_graph,
+ pe_working_set_t *data_set)
+{
+ pe_action_t *action = calloc(1, sizeof(pe_action_t));
+
+ CRM_ASSERT(action != NULL);
+
+ action->rsc = rsc;
+ action->task = strdup(task); CRM_ASSERT(action->task != NULL);
+ action->uuid = key;
+ action->extra = pcmk__strkey_table(free, free);
+ action->meta = pcmk__strkey_table(free, free);
+
+ if (node) {
+ action->node = pe__copy_node(node);
+ }
+
+ if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
+ // Resource history deletion for a node can be done on the DC
+ pe__set_action_flags(action, pe_action_dc);
+ }
+
+ pe__set_action_flags(action, pe_action_runnable);
+ if (optional) {
+ pe__set_action_flags(action, pe_action_optional);
+ } else {
+ pe__clear_action_flags(action, pe_action_optional);
+ }
+
+ if (rsc != NULL) {
+ guint interval_ms = 0;
+
+ action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
+ parse_op_key(key, NULL, NULL, &interval_ms);
+ unpack_operation(action, action->op_entry, rsc->container, data_set,
+ interval_ms);
+ }
+
+ if (for_graph) {
+ pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
+ (optional? "optional" : "required"),
+ data_set->action_id, key, task,
+ ((rsc == NULL)? "no resource" : rsc->id),
+ pe__node_name(node));
+ action->id = data_set->action_id++;
+
+ data_set->actions = g_list_prepend(data_set->actions, action);
+ if (rsc == NULL) {
+ add_singleton(data_set, action);
+ } else {
+ rsc->actions = g_list_prepend(rsc->actions, action);
+ }
+ }
+ return action;
+}
+
+/*!
+ * \internal
+ * \brief Evaluate node attribute values for an action
+ *
+ * \param[in,out] action Action to unpack attributes for
+ * \param[in,out] data_set Cluster working set
+ */
+static void
+unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
+{
+ if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
+ && (action->op_entry != NULL)) {
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = action->node->details->attrs,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ pe__set_action_flags(action, pe_action_have_node_attrs);
+ pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
+ &rule_data, action->extra, NULL,
+ FALSE, data_set);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Update an action's optional flag
+ *
+ * \param[in,out] action Action to update
+ * \param[in] optional Requested optional status
+ */
+static void
+update_action_optional(pe_action_t *action, gboolean optional)
+{
+ // Force a non-recurring action to be optional if its resource is unmanaged
+ if ((action->rsc != NULL) && (action->node != NULL)
+ && !pcmk_is_set(action->flags, pe_action_pseudo)
+ && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
+ && (g_hash_table_lookup(action->meta,
+ XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
+ pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
+ action->uuid, pe__node_name(action->node),
+ action->rsc->id);
+ pe__set_action_flags(action, pe_action_optional);
+ // We shouldn't clear runnable here because ... something
+
+ // Otherwise require the action if requested
+ } else if (!optional) {
+ pe__clear_action_flags(action, pe_action_optional);
+ }
+}
+
+static enum pe_quorum_policy
+effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
+{
+ enum pe_quorum_policy policy = data_set->no_quorum_policy;
+
+ if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ policy = no_quorum_ignore;
+
+ } else if (data_set->no_quorum_policy == no_quorum_demote) {
+ switch (rsc->role) {
+ case RSC_ROLE_PROMOTED:
+ case RSC_ROLE_UNPROMOTED:
+ if (rsc->next_role > RSC_ROLE_UNPROMOTED) {
+ pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED,
+ "no-quorum-policy=demote");
+ }
+ policy = no_quorum_ignore;
+ break;
+ default:
+ policy = no_quorum_stop;
+ break;
+ }
+ }
+ return policy;
+}
+
+/*!
+ * \internal
+ * \brief Update a resource action's runnable flag
+ *
+ * \param[in,out] action Action to update
+ * \param[in] for_graph Whether action should be recorded in transition graph
+ * \param[in,out] data_set Cluster working set
+ *
+ * \note This may also schedule fencing if a stop is unrunnable.
+ */
+static void
+update_resource_action_runnable(pe_action_t *action, bool for_graph,
+ pe_working_set_t *data_set)
+{
+ if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ return;
+ }
+
+ if (action->node == NULL) {
+ pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
+ action->uuid);
+ pe__clear_action_flags(action, pe_action_runnable);
+
+ } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ && !(action->node->details->online)
+ && (!pe__is_guest_node(action->node)
+ || action->node->details->remote_requires_reset)) {
+ pe__clear_action_flags(action, pe_action_runnable);
+ do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
+ "%s on %s is unrunnable (node is offline)",
+ action->uuid, pe__node_name(action->node));
+ if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
+ && for_graph
+ && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ && !(action->node->details->unclean)) {
+ pe_fence_node(data_set, action->node, "stop is unrunnable", false);
+ }
+
+ } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ && action->node->details->pending) {
+ pe__clear_action_flags(action, pe_action_runnable);
+ do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
+ "Action %s on %s is unrunnable (node is pending)",
+ action->uuid, pe__node_name(action->node));
+
+ } else if (action->needs == rsc_req_nothing) {
+ pe_action_set_reason(action, NULL, TRUE);
+ if (pe__is_guest_node(action->node)
+ && !pe_can_fence(data_set, action->node)) {
+ /* An action that requires nothing usually does not require any
+ * fencing in order to be runnable. However, there is an exception:
+ * such an action cannot be completed if it is on a guest node whose
+ * host is unclean and cannot be fenced.
+ */
+ pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
+ "(node's host cannot be fenced)",
+ action->uuid, pe__node_name(action->node));
+ pe__clear_action_flags(action, pe_action_runnable);
+ } else {
+ pe_rsc_trace(action->rsc,
+ "%s on %s does not require fencing or quorum",
+ action->uuid, pe__node_name(action->node));
+ pe__set_action_flags(action, pe_action_runnable);
+ }
+
+ } else {
+ switch (effective_quorum_policy(action->rsc, data_set)) {
+ case no_quorum_stop:
+ pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
+ action->uuid, pe__node_name(action->node));
+ pe__clear_action_flags(action, pe_action_runnable);
+ pe_action_set_reason(action, "no quorum", true);
+ break;
+
+ case no_quorum_freeze:
+ if (!action->rsc->fns->active(action->rsc, TRUE)
+ || (action->rsc->next_role > action->rsc->role)) {
+ pe_rsc_debug(action->rsc,
+ "%s on %s is unrunnable (no quorum)",
+ action->uuid, pe__node_name(action->node));
+ pe__clear_action_flags(action, pe_action_runnable);
+ pe_action_set_reason(action, "quorum freeze", true);
+ }
+ break;
+
+ default:
+ //pe_action_set_reason(action, NULL, TRUE);
+ pe__set_action_flags(action, pe_action_runnable);
+ break;
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Update a resource object's flags for a new action on it
+ *
+ * \param[in,out] rsc Resource that action is for (if any)
+ * \param[in] action New action
+ */
+static void
+update_resource_flags_for_action(pe_resource_t *rsc, const pe_action_t *action)
+{
+ /* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used
+ * within Pacemaker, and should be deprecated and eventually removed
+ */
+ if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ pe__set_resource_flags(rsc, pe_rsc_stopping);
+
+ } else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pe_action_runnable)) {
+ pe__set_resource_flags(rsc, pe_rsc_starting);
+ } else {
+ pe__clear_resource_flags(rsc, pe_rsc_starting);
+ }
+ }
+}
+
+static bool
+valid_stop_on_fail(const char *value)
+{
+ return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
+}
+
+static const char *
+unpack_operation_on_fail(pe_action_t * action)
+{
+ const char *name = NULL;
+ const char *role = NULL;
+ const char *on_fail = NULL;
+ const char *interval_spec = NULL;
+ const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+
+ if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ && !valid_stop_on_fail(value)) {
+
+ pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
+ "action to default value because '%s' is not "
+ "allowed for stop", action->rsc->id, value);
+ return NULL;
+
+ } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
+ // demote on_fail defaults to monitor value for promoted role if present
+ xmlNode *operation = NULL;
+
+ CRM_CHECK(action->rsc != NULL, return NULL);
+
+ for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
+ (operation != NULL) && (value == NULL);
+ operation = pcmk__xe_next(operation)) {
+ bool enabled = false;
+
+ if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ continue;
+ }
+ name = crm_element_value(operation, "name");
+ role = crm_element_value(operation, "role");
+ on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (!on_fail) {
+ continue;
+ } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) {
+ continue;
+ } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei)
+ || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
+ RSC_ROLE_PROMOTED_LEGACY_S,
+ NULL)) {
+ continue;
+ } else if (crm_parse_interval_spec(interval_spec) == 0) {
+ continue;
+ } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
+ continue;
+ }
+
+ value = on_fail;
+ }
+ } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
+ value = "ignore";
+
+ } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ name = crm_element_value(action->op_entry, "name");
+ role = crm_element_value(action->op_entry, "role");
+ interval_spec = crm_element_value(action->op_entry,
+ XML_LRM_ATTR_INTERVAL);
+
+ if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
+ && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
+ || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
+ RSC_ROLE_PROMOTED_LEGACY_S, NULL)
+ || (crm_parse_interval_spec(interval_spec) == 0))) {
+ pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
+ "action to default value because 'demote' is not "
+ "allowed for it", action->rsc->id, name);
+ return NULL;
+ }
+ }
+
+ return value;
+}
+
+static int
+unpack_timeout(const char *value)
+{
+ int timeout_ms = crm_get_msec(value);
+
+ if (timeout_ms < 0) {
+ timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ }
+ return timeout_ms;
+}
+
+// true if value contains valid, non-NULL interval origin for recurring op
+static bool
+unpack_interval_origin(const char *value, const xmlNode *xml_obj,
+ guint interval_ms, const crm_time_t *now,
+ long long *start_delay)
+{
+ long long result = 0;
+ guint interval_sec = interval_ms / 1000;
+ crm_time_t *origin = NULL;
+
+ // Ignore unspecified values and non-recurring operations
+ if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
+ return false;
+ }
+
+ // Parse interval origin from text
+ origin = crm_time_new(value);
+ if (origin == NULL) {
+ pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
+ "'%s' because '%s' is not valid",
+ (ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
+ return false;
+ }
+
+ // Get seconds since origin (negative if origin is in the future)
+ result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
+ crm_time_free(origin);
+
+ // Calculate seconds from closest interval to now
+ result = result % interval_sec;
+
+ // Calculate seconds remaining until next interval
+ result = ((result <= 0)? 0 : interval_sec) - result;
+ crm_info("Calculated a start delay of %llds for operation '%s'",
+ result,
+ (ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
+
+ if (start_delay != NULL) {
+ *start_delay = result * 1000; // milliseconds
+ }
+ return true;
+}
+
+static int
+unpack_start_delay(const char *value, GHashTable *meta)
+{
+ int start_delay = 0;
+
+ if (value != NULL) {
+ start_delay = crm_get_msec(value);
+
+ if (start_delay < 0) {
+ start_delay = 0;
+ }
+
+ if (meta) {
+ g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY),
+ pcmk__itoa(start_delay));
+ }
+ }
+
+ return start_delay;
+}
+
+static xmlNode *
+find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
+{
+ guint interval_ms = 0;
+ guint min_interval_ms = G_MAXUINT;
+ const char *name = NULL;
+ const char *interval_spec = NULL;
+ xmlNode *op = NULL;
+ xmlNode *operation = NULL;
+
+ for (operation = pcmk__xe_first_child(rsc->ops_xml);
+ operation != NULL;
+ operation = pcmk__xe_next(operation)) {
+
+ if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ bool enabled = false;
+
+ name = crm_element_value(operation, "name");
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
+ !enabled) {
+ continue;
+ }
+
+ if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
+ continue;
+ }
+
+ interval_ms = crm_parse_interval_spec(interval_spec);
+
+ if (interval_ms && (interval_ms < min_interval_ms)) {
+ min_interval_ms = interval_ms;
+ op = operation;
+ }
+ }
+ }
+
+ return op;
+}
+
+/*!
+ * \brief Unpack operation XML into an action structure
+ *
+ * Unpack an operation's meta-attributes (normalizing the interval, timeout,
+ * and start delay values as integer milliseconds), requirements, and
+ * failure policy.
+ *
+ * \param[in,out] action Action to unpack into
+ * \param[in] xml_obj Operation XML (or NULL if all defaults)
+ * \param[in] container Resource that contains affected resource, if any
+ * \param[in,out] data_set Cluster state
+ * \param[in] interval_ms How frequently to perform the operation
+ */
+static void
+unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
+ const pe_resource_t *container,
+ pe_working_set_t *data_set, guint interval_ms)
+{
+ int timeout_ms = 0;
+ const char *value = NULL;
+ bool is_probe = false;
+
+ pe_rsc_eval_data_t rsc_rule_data = {
+ .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
+ };
+
+ pe_op_eval_data_t op_rule_data = {
+ .op_name = action->task,
+ .interval = interval_ms
+ };
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = &rsc_rule_data,
+ .op_data = &op_rule_data
+ };
+
+ CRM_CHECK(action && action->rsc, return);
+
+ is_probe = pcmk_is_probe(action->task, interval_ms);
+
+ // Cluster-wide <op_defaults> <meta_attributes>
+ pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
+ action->meta, NULL, FALSE, data_set);
+
+ // Determine probe default timeout differently
+ if (is_probe) {
+ xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
+
+ if (min_interval_mon) {
+ value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
+ if (value) {
+ crm_trace("\t%s: Setting default timeout to minimum-interval "
+ "monitor's timeout '%s'", action->uuid, value);
+ g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
+ strdup(value));
+ }
+ }
+ }
+
+ if (xml_obj) {
+ xmlAttrPtr xIter = NULL;
+
+ // <op> <meta_attributes> take precedence over defaults
+ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
+ action->meta, NULL, TRUE, data_set);
+
+ /* Anything set as an <op> XML property has highest precedence.
+ * This ensures we use the name and interval from the <op> tag.
+ */
+ for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
+ const char *prop_name = (const char *)xIter->name;
+ const char *prop_value = crm_element_value(xml_obj, prop_name);
+
+ g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
+ }
+ }
+
+ g_hash_table_remove(action->meta, "id");
+
+ // Normalize interval to milliseconds
+ if (interval_ms > 0) {
+ g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
+ crm_strdup_printf("%u", interval_ms));
+ } else {
+ g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
+ }
+
+ /*
+ * Timeout order of precedence:
+ * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
+ * and task is start or a probe; pcmk_monitor_timeout works
+ * by default for a recurring monitor)
+ * 2. explicit op timeout on the primitive
+ * 3. default op timeout
+ * a. if probe, then min-interval monitor's timeout
+ * b. else, in XML_CIB_TAG_OPCONFIG
+ * 4. CRM_DEFAULT_OP_TIMEOUT_S
+ *
+ * #1 overrides general rule of <op> XML property having highest
+ * precedence.
+ */
+ if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
+ pcmk_ra_cap_fence_params)
+ && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
+ || is_probe)) {
+
+ GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
+
+ value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+
+ if (value) {
+ crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
+ "overriding default", action->uuid, value);
+ g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
+ strdup(value));
+ }
+ }
+
+ // Normalize timeout to positive milliseconds
+ value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
+ timeout_ms = unpack_timeout(value);
+ g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
+ pcmk__itoa(timeout_ms));
+
+ if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
+ action->needs = rsc_req_nothing;
+ value = "nothing (not start or promote)";
+
+ } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
+ action->needs = rsc_req_stonith;
+ value = "fencing";
+
+ } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
+ action->needs = rsc_req_quorum;
+ value = "quorum";
+
+ } else {
+ action->needs = rsc_req_nothing;
+ value = "nothing";
+ }
+ pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
+
+ value = unpack_operation_on_fail(action);
+
+ if (value == NULL) {
+
+ } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
+ action->on_fail = action_fail_block;
+ g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
+ value = "block"; // The above could destroy the original string
+
+ } else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
+ action->on_fail = action_fail_fence;
+ value = "node fencing";
+
+ if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
+ "operation '%s' to 'stop' because 'fence' is not "
+ "valid when fencing is disabled", action->uuid);
+ action->on_fail = action_fail_stop;
+ action->fail_role = RSC_ROLE_STOPPED;
+ value = "stop resource";
+ }
+
+ } else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
+ action->on_fail = action_fail_standby;
+ value = "node standby";
+
+ } else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
+ NULL)) {
+ action->on_fail = action_fail_ignore;
+ value = "ignore";
+
+ } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
+ action->on_fail = action_fail_migrate;
+ value = "force migration";
+
+ } else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
+ action->on_fail = action_fail_stop;
+ action->fail_role = RSC_ROLE_STOPPED;
+ value = "stop resource";
+
+ } else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
+ action->on_fail = action_fail_recover;
+ value = "restart (and possibly migrate)";
+
+ } else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
+ if (container) {
+ action->on_fail = action_fail_restart_container;
+ value = "restart container (and possibly migrate)";
+
+ } else {
+ value = NULL;
+ }
+
+ } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ action->on_fail = action_fail_demote;
+ value = "demote instance";
+
+ } else {
+ pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
+ value = NULL;
+ }
+
+ /* defaults */
+ if (value == NULL && container) {
+ action->on_fail = action_fail_restart_container;
+ value = "restart container (and possibly migrate) (default)";
+
+ /* For remote nodes, ensure that any failure that results in dropping an
+ * active connection to the node results in fencing of the node.
+ *
+ * There are only two action failures that don't result in fencing.
+ * 1. probes - probe failures are expected.
+ * 2. start - a start failure indicates that an active connection does not already
+ * exist. The user can set op on-fail=fence if they really want to fence start
+ * failures. */
+ } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
+ && pe__resource_is_remote_conn(action->rsc, data_set)
+ && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ && (interval_ms == 0))
+ && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
+
+ if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
+ action->on_fail = action_fail_stop;
+ action->fail_role = RSC_ROLE_STOPPED;
+ value = "stop unmanaged remote node (enforcing default)";
+
+ } else {
+ if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ value = "fence remote node (default)";
+ } else {
+ value = "recover remote node connection (default)";
+ }
+
+ if (action->rsc->remote_reconnect_ms) {
+ action->fail_role = RSC_ROLE_STOPPED;
+ }
+ action->on_fail = action_fail_reset_remote;
+ }
+
+ } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ action->on_fail = action_fail_fence;
+ value = "resource fence (default)";
+
+ } else {
+ action->on_fail = action_fail_block;
+ value = "resource block (default)";
+ }
+
+ } else if (value == NULL) {
+ action->on_fail = action_fail_recover;
+ value = "restart (and possibly migrate) (default)";
+ }
+
+ pe_rsc_trace(action->rsc, "%s failure handling: %s",
+ action->uuid, value);
+
+ value = NULL;
+ if (xml_obj != NULL) {
+ value = g_hash_table_lookup(action->meta, "role_after_failure");
+ if (value) {
+ pe_warn_once(pe_wo_role_after,
+ "Support for role_after_failure is deprecated and will be removed in a future release");
+ }
+ }
+ if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
+ action->fail_role = text2role(value);
+ }
+ /* defaults */
+ if (action->fail_role == RSC_ROLE_UNKNOWN) {
+ if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
+ action->fail_role = RSC_ROLE_UNPROMOTED;
+ } else {
+ action->fail_role = RSC_ROLE_STARTED;
+ }
+ }
+ pe_rsc_trace(action->rsc, "%s failure results in: %s",
+ action->uuid, role2text(action->fail_role));
+
+ value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
+ if (value) {
+ unpack_start_delay(value, action->meta);
+ } else {
+ long long start_delay = 0;
+
+ value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
+ if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
+ &start_delay)) {
+ g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
+ crm_strdup_printf("%lld", start_delay));
+ }
+ }
+}
+
+/*!
+ * \brief Create or update an action object
+ *
+ * \param[in,out] rsc Resource that action is for (if any)
+ * \param[in,out] key Action key (must be non-NULL)
+ * \param[in] task Action name (must be non-NULL)
+ * \param[in] on_node Node that action is on (if any)
+ * \param[in] optional Whether action should be considered optional
+ * \param[in] save_action Whether action should be recorded in transition graph
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Action object corresponding to arguments
+ * \note This function takes ownership of (and might free) \p key. If
+ * \p save_action is true, \p data_set will own the returned action,
+ * otherwise it is the caller's responsibility to free the return value
+ * with pe_free_action().
+ */
+pe_action_t *
+custom_action(pe_resource_t *rsc, char *key, const char *task,
+ const pe_node_t *on_node, gboolean optional, gboolean save_action,
+ pe_working_set_t *data_set)
+{
+ pe_action_t *action = NULL;
+
+ CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
+
+ if (save_action) {
+ action = find_existing_action(key, rsc, on_node, data_set);
+ }
+
+ if (action == NULL) {
+ action = new_action(key, task, rsc, on_node, optional, save_action,
+ data_set);
+ } else {
+ free(key);
+ }
+
+ update_action_optional(action, optional);
+
+ if (rsc != NULL) {
+ if (action->node != NULL) {
+ unpack_action_node_attributes(action, data_set);
+ }
+
+ update_resource_action_runnable(action, save_action, data_set);
+
+ if (save_action) {
+ update_resource_flags_for_action(rsc, action);
+ }
+ }
+
+ return action;
+}
+
+pe_action_t *
+get_pseudo_op(const char *name, pe_working_set_t * data_set)
+{
+ pe_action_t *op = lookup_singleton(data_set, name);
+
+ if (op == NULL) {
+ op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
+ pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
+ }
+ return op;
+}
+
+static GList *
+find_unfencing_devices(GList *candidates, GList *matches)
+{
+ for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *candidate = gIter->data;
+
+ if (candidate->children != NULL) {
+ matches = find_unfencing_devices(candidate->children, matches);
+
+ } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
+ continue;
+
+ } else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) {
+ matches = g_list_prepend(matches, candidate);
+
+ } else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
+ PCMK_STONITH_PROVIDES),
+ PCMK__VALUE_UNFENCING,
+ pcmk__str_casei)) {
+ matches = g_list_prepend(matches, candidate);
+ }
+ }
+ return matches;
+}
+
+static int
+node_priority_fencing_delay(const pe_node_t *node,
+ const pe_working_set_t *data_set)
+{
+ int member_count = 0;
+ int online_count = 0;
+ int top_priority = 0;
+ int lowest_priority = 0;
+ GList *gIter = NULL;
+
+ // `priority-fencing-delay` is disabled
+ if (data_set->priority_fencing_delay <= 0) {
+ return 0;
+ }
+
+ /* No need to request a delay if the fencing target is not a normal cluster
+ * member, for example if it's a remote node or a guest node. */
+ if (node->details->type != node_member) {
+ return 0;
+ }
+
+ // No need to request a delay if the fencing target is in our partition
+ if (node->details->online) {
+ return 0;
+ }
+
+ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *n = gIter->data;
+
+ if (n->details->type != node_member) {
+ continue;
+ }
+
+ member_count ++;
+
+ if (n->details->online) {
+ online_count++;
+ }
+
+ if (member_count == 1
+ || n->details->priority > top_priority) {
+ top_priority = n->details->priority;
+ }
+
+ if (member_count == 1
+ || n->details->priority < lowest_priority) {
+ lowest_priority = n->details->priority;
+ }
+ }
+
+ // No need to delay if we have more than half of the cluster members
+ if (online_count > member_count / 2) {
+ return 0;
+ }
+
+ /* All the nodes have equal priority.
+ * Any configured corresponding `pcmk_delay_base/max` will be applied. */
+ if (lowest_priority == top_priority) {
+ return 0;
+ }
+
+ if (node->details->priority < top_priority) {
+ return 0;
+ }
+
+ return data_set->priority_fencing_delay;
+}
+
+pe_action_t *
+pe_fence_op(pe_node_t *node, const char *op, bool optional,
+ const char *reason, bool priority_delay, pe_working_set_t *data_set)
+{
+ char *op_key = NULL;
+ pe_action_t *stonith_op = NULL;
+
+ if(op == NULL) {
+ op = data_set->stonith_action;
+ }
+
+ op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
+
+ stonith_op = lookup_singleton(data_set, op_key);
+ if(stonith_op == NULL) {
+ stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
+
+ add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
+ add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
+ add_hash_param(stonith_op->meta, "stonith_action", op);
+
+ if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ /* Extra work to detect device changes
+ */
+ GString *digests_all = g_string_sized_new(1024);
+ GString *digests_secure = g_string_sized_new(1024);
+
+ GList *matches = find_unfencing_devices(data_set->resources, NULL);
+
+ char *key = NULL;
+ char *value = NULL;
+
+ for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *match = gIter->data;
+ const char *agent = g_hash_table_lookup(match->meta,
+ XML_ATTR_TYPE);
+ op_digest_cache_t *data = NULL;
+
+ data = pe__compare_fencing_digest(match, agent, node, data_set);
+ if(data->rc == RSC_DIGEST_ALL) {
+ optional = FALSE;
+ crm_notice("Unfencing node %s because the definition of "
+ "%s changed", pe__node_name(node), match->id);
+ if (!pcmk__is_daemon && data_set->priv != NULL) {
+ pcmk__output_t *out = data_set->priv;
+
+ out->info(out,
+ "notice: Unfencing node %s because the "
+ "definition of %s changed",
+ pe__node_name(node), match->id);
+ }
+ }
+
+ pcmk__g_strcat(digests_all,
+ match->id, ":", agent, ":",
+ data->digest_all_calc, ",", NULL);
+ pcmk__g_strcat(digests_secure,
+ match->id, ":", agent, ":",
+ data->digest_secure_calc, ",", NULL);
+ }
+ key = strdup(XML_OP_ATTR_DIGESTS_ALL);
+ value = strdup((const char *) digests_all->str);
+ CRM_ASSERT((key != NULL) && (value != NULL));
+ g_hash_table_insert(stonith_op->meta, key, value);
+ g_string_free(digests_all, TRUE);
+
+ key = strdup(XML_OP_ATTR_DIGESTS_SECURE);
+ value = strdup((const char *) digests_secure->str);
+ CRM_ASSERT((key != NULL) && (value != NULL));
+ g_hash_table_insert(stonith_op->meta, key, value);
+ g_string_free(digests_secure, TRUE);
+ }
+
+ } else {
+ free(op_key);
+ }
+
+ if (data_set->priority_fencing_delay > 0
+
+ /* It's a suitable case where `priority-fencing-delay` applies.
+ * At least add `priority-fencing-delay` field as an indicator. */
+ && (priority_delay
+
+ /* The priority delay needs to be recalculated if this function has
+ * been called by schedule_fencing_and_shutdowns() after node
+ * priority has already been calculated by native_add_running().
+ */
+ || g_hash_table_lookup(stonith_op->meta,
+ XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
+
+ /* Add `priority-fencing-delay` to the fencing op even if it's 0 for
+ * the targeting node. So that it takes precedence over any possible
+ * `pcmk_delay_base/max`.
+ */
+ char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
+
+ g_hash_table_insert(stonith_op->meta,
+ strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
+ delay_s);
+ }
+
+ if(optional == FALSE && pe_can_fence(data_set, node)) {
+ pe__clear_action_flags(stonith_op, pe_action_optional);
+ pe_action_set_reason(stonith_op, reason, false);
+
+ } else if(reason && stonith_op->reason == NULL) {
+ stonith_op->reason = strdup(reason);
+ }
+
+ return stonith_op;
+}
+
+void
+pe_free_action(pe_action_t * action)
+{
+ if (action == NULL) {
+ return;
+ }
+ g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
+ g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
+ if (action->extra) {
+ g_hash_table_destroy(action->extra);
+ }
+ if (action->meta) {
+ g_hash_table_destroy(action->meta);
+ }
+ free(action->cancel_task);
+ free(action->reason);
+ free(action->task);
+ free(action->uuid);
+ free(action->node);
+ free(action);
+}
+
+int
+pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
+{
+ xmlNode *child = NULL;
+ GHashTable *action_meta = NULL;
+ const char *timeout_spec = NULL;
+ int timeout_ms = 0;
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ child != NULL; child = crm_next_same_xml(child)) {
+ if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME),
+ pcmk__str_casei)) {
+ timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT);
+ break;
+ }
+ }
+
+ if (timeout_spec == NULL && data_set->op_defaults) {
+ action_meta = pcmk__strkey_table(free, free);
+ pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
+ &rule_data, action_meta, NULL, FALSE, data_set);
+ timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
+ }
+
+ // @TODO check meta-attributes
+ // @TODO maybe use min-interval monitor timeout as default for monitors
+
+ timeout_ms = crm_get_msec(timeout_spec);
+ if (timeout_ms < 0) {
+ timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ }
+
+ if (action_meta != NULL) {
+ g_hash_table_destroy(action_meta);
+ }
+ return timeout_ms;
+}
+
+enum action_tasks
+get_complex_task(const pe_resource_t *rsc, const char *name)
+{
+ enum action_tasks task = text2task(name);
+
+ if ((rsc != NULL) && (rsc->variant == pe_native)) {
+ switch (task) {
+ case stopped_rsc:
+ case started_rsc:
+ case action_demoted:
+ case action_promoted:
+ crm_trace("Folding %s back into its atomic counterpart for %s",
+ name, rsc->id);
+ --task;
+ break;
+ default:
+ break;
+ }
+ }
+ return task;
+}
+
+/*!
+ * \internal
+ * \brief Find first matching action in a list
+ *
+ * \param[in] input List of actions to search
+ * \param[in] uuid If not NULL, action must have this UUID
+ * \param[in] task If not NULL, action must have this action name
+ * \param[in] on_node If not NULL, action must be on this node
+ *
+ * \return First action in list that matches criteria, or NULL if none
+ */
+pe_action_t *
+find_first_action(const GList *input, const char *uuid, const char *task,
+ const pe_node_t *on_node)
+{
+ CRM_CHECK(uuid || task, return NULL);
+
+ for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
+ pe_action_t *action = (pe_action_t *) gIter->data;
+
+ if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
+ continue;
+
+ } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
+ continue;
+
+ } else if (on_node == NULL) {
+ return action;
+
+ } else if (action->node == NULL) {
+ continue;
+
+ } else if (on_node->details == action->node->details) {
+ return action;
+ }
+ }
+
+ return NULL;
+}
+
+GList *
+find_actions(GList *input, const char *key, const pe_node_t *on_node)
+{
+ GList *gIter = input;
+ GList *result = NULL;
+
+ CRM_CHECK(key != NULL, return NULL);
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_action_t *action = (pe_action_t *) gIter->data;
+
+ if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
+ continue;
+
+ } else if (on_node == NULL) {
+ crm_trace("Action %s matches (ignoring node)", key);
+ result = g_list_prepend(result, action);
+
+ } else if (action->node == NULL) {
+ crm_trace("Action %s matches (unallocated, assigning to %s)",
+ key, pe__node_name(on_node));
+
+ action->node = pe__copy_node(on_node);
+ result = g_list_prepend(result, action);
+
+ } else if (on_node->details == action->node->details) {
+ crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
+ result = g_list_prepend(result, action);
+ }
+ }
+
+ return result;
+}
+
+GList *
+find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
+{
+ GList *result = NULL;
+
+ CRM_CHECK(key != NULL, return NULL);
+
+ if (on_node == NULL) {
+ return NULL;
+ }
+
+ for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
+ pe_action_t *action = (pe_action_t *) gIter->data;
+
+ if ((action->node != NULL)
+ && pcmk__str_eq(key, action->uuid, pcmk__str_casei)
+ && pcmk__str_eq(on_node->details->id, action->node->details->id,
+ pcmk__str_casei)) {
+
+ crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
+ result = g_list_prepend(result, action);
+ }
+ }
+
+ return result;
+}
+
+/*!
+ * \brief Find all actions of given type for a resource
+ *
+ * \param[in] rsc Resource to search
+ * \param[in] node Find only actions scheduled on this node
+ * \param[in] task Action name to search for
+ * \param[in] require_node If TRUE, NULL node or action node will not match
+ *
+ * \return List of actions found (or NULL if none)
+ * \note If node is not NULL and require_node is FALSE, matching actions
+ * without a node will be assigned to node.
+ */
+GList *
+pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
+ const char *task, bool require_node)
+{
+ GList *result = NULL;
+ char *key = pcmk__op_key(rsc->id, task, 0);
+
+ if (require_node) {
+ result = find_actions_exact(rsc->actions, key, node);
+ } else {
+ result = find_actions(rsc->actions, key, node);
+ }
+ free(key);
+ return result;
+}
+
+/*!
+ * \internal
+ * \brief Create an action reason string based on the action itself
+ *
+ * \param[in] action Action to create reason string for
+ * \param[in] flag Action flag that was cleared
+ *
+ * \return Newly allocated string suitable for use as action reason
+ * \note It is the caller's responsibility to free() the result.
+ */
+char *
+pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
+{
+ const char *change = NULL;
+
+ switch (flag) {
+ case pe_action_runnable:
+ case pe_action_migrate_runnable:
+ change = "unrunnable";
+ break;
+ case pe_action_optional:
+ change = "required";
+ break;
+ default:
+ // Bug: caller passed unsupported flag
+ CRM_CHECK(change != NULL, change = "");
+ break;
+ }
+ return crm_strdup_printf("%s%s%s %s", change,
+ (action->rsc == NULL)? "" : " ",
+ (action->rsc == NULL)? "" : action->rsc->id,
+ action->task);
+}
+
+void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
+{
+ if (action->reason != NULL && overwrite) {
+ pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
+ action->uuid, action->reason, pcmk__s(reason, "(none)"));
+ } else if (action->reason == NULL) {
+ pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
+ action->uuid, pcmk__s(reason, "(none)"));
+ } else {
+ // crm_assert(action->reason != NULL && !overwrite);
+ return;
+ }
+
+ pcmk__str_update(&action->reason, reason);
+}
+
+/*!
+ * \internal
+ * \brief Create an action to clear a resource's history from CIB
+ *
+ * \param[in,out] rsc Resource to clear
+ * \param[in] node Node to clear history on
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return New action to clear resource history
+ */
+pe_action_t *
+pe__clear_resource_history(pe_resource_t *rsc, const pe_node_t *node,
+ pe_working_set_t *data_set)
+{
+ char *key = NULL;
+
+ CRM_ASSERT(rsc && node);
+ key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
+ return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
+ data_set);
+}
+
+#define sort_return(an_int, why) do { \
+ free(a_uuid); \
+ free(b_uuid); \
+ crm_trace("%s (%d) %c %s (%d) : %s", \
+ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
+ b_xml_id, b_call_id, why); \
+ return an_int; \
+ } while(0)
+
+int
+pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
+ bool same_node_default)
+{
+ int a_call_id = -1;
+ int b_call_id = -1;
+
+ char *a_uuid = NULL;
+ char *b_uuid = NULL;
+
+ const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
+ const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
+
+ const char *a_node = crm_element_value(xml_a, XML_LRM_ATTR_TARGET);
+ const char *b_node = crm_element_value(xml_b, XML_LRM_ATTR_TARGET);
+ bool same_node = true;
+
+ /* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via
+ * 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c).
+ *
+ * In case that any of the lrm_rsc_op entries doesn't have on_node
+ * attribute, we need to explicitly tell whether the two operations are on
+ * the same node.
+ */
+ if (a_node == NULL || b_node == NULL) {
+ same_node = same_node_default;
+
+ } else {
+ same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei);
+ }
+
+ if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) {
+ /* We have duplicate lrm_rsc_op entries in the status
+ * section which is unlikely to be a good thing
+ * - we can handle it easily enough, but we need to get
+ * to the bottom of why it's happening.
+ */
+ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
+ sort_return(0, "duplicate");
+ }
+
+ crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
+ crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
+
+ if (a_call_id == -1 && b_call_id == -1) {
+ /* both are pending ops so it doesn't matter since
+ * stops are never pending
+ */
+ sort_return(0, "pending");
+
+ } else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) {
+ sort_return(-1, "call id");
+
+ } else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) {
+ sort_return(1, "call id");
+
+ } else if (a_call_id >= 0 && b_call_id >= 0
+ && (!same_node || a_call_id == b_call_id)) {
+ /*
+ * The op and last_failed_op are the same
+ * Order on last-rc-change
+ */
+ time_t last_a = -1;
+ time_t last_b = -1;
+
+ crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
+ crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
+
+ crm_trace("rc-change: %lld vs %lld",
+ (long long) last_a, (long long) last_b);
+ if (last_a >= 0 && last_a < last_b) {
+ sort_return(-1, "rc-change");
+
+ } else if (last_b >= 0 && last_a > last_b) {
+ sort_return(1, "rc-change");
+ }
+ sort_return(0, "rc-change");
+
+ } else {
+ /* One of the inputs is a pending operation
+ * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
+ */
+
+ int a_id = -1;
+ int b_id = -1;
+
+ const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
+ const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
+
+ CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
+ if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
+ NULL)) {
+ sort_return(0, "bad magic a");
+ }
+ if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
+ NULL)) {
+ sort_return(0, "bad magic b");
+ }
+ /* try to determine the relative age of the operation...
+ * some pending operations (e.g. a start) may have been superseded
+ * by a subsequent stop
+ *
+ * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
+ */
+ if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
+ /*
+ * some of the logic in here may be redundant...
+ *
+ * if the UUID from the TE doesn't match then one better
+ * be a pending operation.
+ * pending operations don't survive between elections and joins
+ * because we query the LRM directly
+ */
+
+ if (b_call_id == -1) {
+ sort_return(-1, "transition + call");
+
+ } else if (a_call_id == -1) {
+ sort_return(1, "transition + call");
+ }
+
+ } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
+ sort_return(-1, "transition");
+
+ } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
+ sort_return(1, "transition");
+ }
+ }
+
+ /* we should never end up here */
+ CRM_CHECK(FALSE, sort_return(0, "default"));
+}
+
+gint
+sort_op_by_callid(gconstpointer a, gconstpointer b)
+{
+ const xmlNode *xml_a = a;
+ const xmlNode *xml_b = b;
+
+ return pe__is_newer_op(xml_a, xml_b, true);
+}
+
+/*!
+ * \internal
+ * \brief Create a new pseudo-action for a resource
+ *
+ * \param[in,out] rsc Resource to create action for
+ * \param[in] task Action name
+ * \param[in] optional Whether action should be considered optional
+ * \param[in] runnable Whethe action should be considered runnable
+ *
+ * \return New action object corresponding to arguments
+ */
+pe_action_t *
+pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
+ bool runnable)
+{
+ pe_action_t *action = NULL;
+
+ CRM_ASSERT((rsc != NULL) && (task != NULL));
+
+ action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
+ optional, TRUE, rsc->cluster);
+ pe__set_action_flags(action, pe_action_pseudo);
+ if (runnable) {
+ pe__set_action_flags(action, pe_action_runnable);
+ }
+ return action;
+}
+
+/*!
+ * \internal
+ * \brief Add the expected result to an action
+ *
+ * \param[in,out] action Action to add expected result to
+ * \param[in] expected_result Expected result to add
+ *
+ * \note This is more efficient than calling add_hash_param().
+ */
+void
+pe__add_action_expected_result(pe_action_t *action, int expected_result)
+{
+ char *name = NULL;
+
+ CRM_ASSERT((action != NULL) && (action->meta != NULL));
+
+ name = strdup(XML_ATTR_TE_TARGET_RC);
+ CRM_ASSERT (name != NULL);
+
+ g_hash_table_insert(action->meta, name, pcmk__itoa(expected_result));
+}
diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c
new file mode 100644
index 0000000..b8047da
--- /dev/null
+++ b/lib/pengine/pe_digest.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright 2004-2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <glib.h>
+#include <stdbool.h>
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/common/xml_internal.h>
+#include <crm/pengine/internal.h>
+#include "pe_status_private.h"
+
+extern bool pcmk__is_daemon;
+
+/*!
+ * \internal
+ * \brief Free an operation digest cache entry
+ *
+ * \param[in,out] ptr Pointer to cache entry to free
+ *
+ * \note The argument is a gpointer so this can be used as a hash table
+ * free function.
+ */
+void
+pe__free_digests(gpointer ptr)
+{
+ op_digest_cache_t *data = ptr;
+
+ if (data != NULL) {
+ free_xml(data->params_all);
+ free_xml(data->params_secure);
+ free_xml(data->params_restart);
+
+ free(data->digest_all_calc);
+ free(data->digest_restart_calc);
+ free(data->digest_secure_calc);
+
+ free(data);
+ }
+}
+
+// Return true if XML attribute name is not substring of a given string
+static bool
+attr_not_in_string(xmlAttrPtr a, void *user_data)
+{
+ bool filter = false;
+ char *name = crm_strdup_printf(" %s ", (const char *) a->name);
+
+ if (strstr((const char *) user_data, name) == NULL) {
+ crm_trace("Filtering %s (not found in '%s')",
+ (const char *) a->name, (const char *) user_data);
+ filter = true;
+ }
+ free(name);
+ return filter;
+}
+
+// Return true if XML attribute name is substring of a given string
+static bool
+attr_in_string(xmlAttrPtr a, void *user_data)
+{
+ bool filter = false;
+ char *name = crm_strdup_printf(" %s ", (const char *) a->name);
+
+ if (strstr((const char *) user_data, name) != NULL) {
+ crm_trace("Filtering %s (found in '%s')",
+ (const char *) a->name, (const char *) user_data);
+ filter = true;
+ }
+ free(name);
+ return filter;
+}
+
+/*!
+ * \internal
+ * \brief Add digest of all parameters to a digest cache entry
+ *
+ * \param[out] data Digest cache entry to modify
+ * \param[in,out] rsc Resource that action was for
+ * \param[in] node Node action was performed on
+ * \param[in] params Resource parameters evaluated for node
+ * \param[in] task Name of action performed
+ * \param[in,out] interval_ms Action's interval (will be reset if in overrides)
+ * \param[in] xml_op Unused
+ * \param[in] op_version CRM feature set to use for digest calculation
+ * \param[in] overrides Key/value table to override resource parameters
+ * \param[in,out] data_set Cluster working set
+ */
+static void
+calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
+ const pe_node_t *node, GHashTable *params,
+ const char *task, guint *interval_ms,
+ const xmlNode *xml_op, const char *op_version,
+ GHashTable *overrides, pe_working_set_t *data_set)
+{
+ pe_action_t *action = NULL;
+
+ data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
+
+ /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers
+ * that themselves are Pacemaker Remote nodes
+ */
+ (void) pe__add_bundle_remote_name(rsc, data_set, data->params_all,
+ XML_RSC_ATTR_REMOTE_RA_ADDR);
+
+ // If interval was overridden, reset it
+ if (overrides != NULL) {
+ const char *interval_s = g_hash_table_lookup(overrides, CRM_META "_"
+ XML_LRM_ATTR_INTERVAL);
+
+ if (interval_s != NULL) {
+ long long value_ll;
+
+ if ((pcmk__scan_ll(interval_s, &value_ll, 0LL) == pcmk_rc_ok)
+ && (value_ll >= 0) && (value_ll <= G_MAXUINT)) {
+ *interval_ms = (guint) value_ll;
+ }
+ }
+ }
+
+ action = custom_action(rsc, pcmk__op_key(rsc->id, task, *interval_ms),
+ task, node, TRUE, FALSE, data_set);
+ if (overrides != NULL) {
+ g_hash_table_foreach(overrides, hash2field, data->params_all);
+ }
+ g_hash_table_foreach(params, hash2field, data->params_all);
+ g_hash_table_foreach(action->extra, hash2field, data->params_all);
+ g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
+
+ pcmk__filter_op_for_digest(data->params_all);
+
+ /* Given a non-recurring operation with extra parameters configured,
+ * in case that the main digest doesn't match, even if the restart
+ * digest matches, enforce a restart rather than a reload-agent anyway.
+ * So that it ensures any changes of the extra parameters get applied
+ * for this specific operation, and the digests calculated for the
+ * resulting lrm_rsc_op will be correct.
+ * Mark the implied rc RSC_DIGEST_RESTART for the case that the main
+ * digest doesn't match.
+ */
+ if (*interval_ms == 0
+ && g_hash_table_size(action->extra) > 0) {
+ data->rc = RSC_DIGEST_RESTART;
+ }
+
+ pe_free_action(action);
+
+ data->digest_all_calc = calculate_operation_digest(data->params_all,
+ op_version);
+}
+
+// Return true if XML attribute name is a Pacemaker-defined fencing parameter
+static bool
+is_fence_param(xmlAttrPtr attr, void *user_data)
+{
+ return pcmk_stonith_param((const char *) attr->name);
+}
+
+/*!
+ * \internal
+ * \brief Add secure digest to a digest cache entry
+ *
+ * \param[out] data Digest cache entry to modify
+ * \param[in] rsc Resource that action was for
+ * \param[in] params Resource parameters evaluated for node
+ * \param[in] xml_op XML of operation in CIB status (if available)
+ * \param[in] op_version CRM feature set to use for digest calculation
+ * \param[in] overrides Key/value hash table to override resource parameters
+ */
+static void
+calculate_secure_digest(op_digest_cache_t *data, const pe_resource_t *rsc,
+ GHashTable *params, const xmlNode *xml_op,
+ const char *op_version, GHashTable *overrides)
+{
+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *secure_list = NULL;
+ bool old_version = (compare_version(op_version, "3.16.0") < 0);
+
+ if (xml_op == NULL) {
+ secure_list = " passwd password user ";
+ } else {
+ secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
+ }
+
+ if (old_version) {
+ data->params_secure = create_xml_node(NULL, XML_TAG_PARAMS);
+ if (overrides != NULL) {
+ g_hash_table_foreach(overrides, hash2field, data->params_secure);
+ }
+
+ g_hash_table_foreach(params, hash2field, data->params_secure);
+
+ } else {
+ // Start with a copy of all parameters
+ data->params_secure = copy_xml(data->params_all);
+ }
+
+ if (secure_list != NULL) {
+ pcmk__xe_remove_matching_attrs(data->params_secure, attr_in_string,
+ (void *) secure_list);
+ }
+ if (old_version
+ && pcmk_is_set(pcmk_get_ra_caps(class),
+ pcmk_ra_cap_fence_params)) {
+ /* For stonith resources, Pacemaker adds special parameters,
+ * but these are not listed in fence agent meta-data, so with older
+ * versions of DC, the controller will not hash them. That means we have
+ * to filter them out before calculating our hash for comparison.
+ */
+ pcmk__xe_remove_matching_attrs(data->params_secure, is_fence_param,
+ NULL);
+ }
+ pcmk__filter_op_for_digest(data->params_secure);
+
+ /* CRM_meta_timeout *should* be part of a digest for recurring operations.
+ * However, with older versions of DC, the controller does not add timeout
+ * to secure digests, because it only includes parameters declared by the
+ * resource agent.
+ * Remove any timeout that made it this far, to match.
+ */
+ if (old_version) {
+ xml_remove_prop(data->params_secure, CRM_META "_" XML_ATTR_TIMEOUT);
+ }
+
+ data->digest_secure_calc = calculate_operation_digest(data->params_secure,
+ op_version);
+}
+
+/*!
+ * \internal
+ * \brief Add restart digest to a digest cache entry
+ *
+ * \param[out] data Digest cache entry to modify
+ * \param[in] xml_op XML of operation in CIB status (if available)
+ * \param[in] op_version CRM feature set to use for digest calculation
+ *
+ * \note This function doesn't need to handle overrides because it starts with
+ * data->params_all, which already has overrides applied.
+ */
+static void
+calculate_restart_digest(op_digest_cache_t *data, const xmlNode *xml_op,
+ const char *op_version)
+{
+ const char *value = NULL;
+
+ // We must have XML of resource operation history
+ if (xml_op == NULL) {
+ return;
+ }
+
+ // And the history must have a restart digest to compare against
+ if (crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) == NULL) {
+ return;
+ }
+
+ // Start with a copy of all parameters
+ data->params_restart = copy_xml(data->params_all);
+
+ // Then filter out reloadable parameters, if any
+ value = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
+ if (value != NULL) {
+ pcmk__xe_remove_matching_attrs(data->params_restart, attr_not_in_string,
+ (void *) value);
+ }
+
+ value = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
+ data->digest_restart_calc = calculate_operation_digest(data->params_restart,
+ value);
+}
+
+/*!
+ * \internal
+ * \brief Create a new digest cache entry with calculated digests
+ *
+ * \param[in,out] rsc Resource that action was for
+ * \param[in] task Name of action performed
+ * \param[in,out] interval_ms Action's interval (will be reset if in overrides)
+ * \param[in] node Node action was performed on
+ * \param[in] xml_op XML of operation in CIB status (if available)
+ * \param[in] overrides Key/value table to override resource parameters
+ * \param[in] calc_secure Whether to calculate secure digest
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Pointer to new digest cache entry (or NULL on memory error)
+ * \note It is the caller's responsibility to free the result using
+ * pe__free_digests().
+ */
+op_digest_cache_t *
+pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
+ const pe_node_t *node, const xmlNode *xml_op,
+ GHashTable *overrides, bool calc_secure,
+ pe_working_set_t *data_set)
+{
+ op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t));
+ const char *op_version = NULL;
+ GHashTable *params = NULL;
+
+ if (data == NULL) {
+ return NULL;
+ }
+
+ data->rc = RSC_DIGEST_MATCH;
+
+ if (xml_op != NULL) {
+ op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
+ }
+
+ if (op_version == NULL && data_set != NULL && data_set->input != NULL) {
+ op_version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ }
+
+ if (op_version == NULL) {
+ op_version = CRM_FEATURE_SET;
+ }
+
+ params = pe_rsc_params(rsc, node, data_set);
+ calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op,
+ op_version, overrides, data_set);
+ if (calc_secure) {
+ calculate_secure_digest(data, rsc, params, xml_op, op_version,
+ overrides);
+ }
+ calculate_restart_digest(data, xml_op, op_version);
+ return data;
+}
+
+/*!
+ * \internal
+ * \brief Calculate action digests and store in node's digest cache
+ *
+ * \param[in,out] rsc Resource that action was for
+ * \param[in] task Name of action performed
+ * \param[in] interval_ms Action's interval
+ * \param[in,out] node Node action was performed on
+ * \param[in] xml_op XML of operation in CIB status (if available)
+ * \param[in] calc_secure Whether to calculate secure digest
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Pointer to node's digest cache entry
+ */
+static op_digest_cache_t *
+rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
+ pe_node_t *node, const xmlNode *xml_op,
+ bool calc_secure, pe_working_set_t *data_set)
+{
+ op_digest_cache_t *data = NULL;
+ char *key = pcmk__op_key(rsc->id, task, interval_ms);
+
+ data = g_hash_table_lookup(node->details->digest_cache, key);
+ if (data == NULL) {
+ data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
+ NULL, calc_secure, data_set);
+ CRM_ASSERT(data != NULL);
+ g_hash_table_insert(node->details->digest_cache, strdup(key), data);
+ }
+ free(key);
+ return data;
+}
+
+/*!
+ * \internal
+ * \brief Calculate operation digests and compare against an XML history entry
+ *
+ * \param[in,out] rsc Resource to check
+ * \param[in] xml_op Resource history XML
+ * \param[in,out] node Node to use for digest calculation
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Pointer to node's digest cache entry, with comparison result set
+ */
+op_digest_cache_t *
+rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
+ pe_node_t *node, pe_working_set_t *data_set)
+{
+ op_digest_cache_t *data = NULL;
+ guint interval_ms = 0;
+
+ const char *op_version;
+ const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
+ const char *digest_all;
+ const char *digest_restart;
+
+ CRM_ASSERT(node != NULL);
+
+ op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
+ digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
+ digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
+
+ crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
+ data = rsc_action_digest(rsc, task, interval_ms, node, xml_op,
+ pcmk_is_set(data_set->flags, pe_flag_sanitized),
+ data_set);
+
+ if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
+ pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s "
+ "changed: hash was %s vs. now %s (restart:%s) %s",
+ interval_ms, task, rsc->id, pe__node_name(node),
+ pcmk__s(digest_restart, "missing"),
+ data->digest_restart_calc,
+ op_version,
+ crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
+ data->rc = RSC_DIGEST_RESTART;
+
+ } else if (digest_all == NULL) {
+ /* it is unknown what the previous op digest was */
+ data->rc = RSC_DIGEST_UNKNOWN;
+
+ } else if (strcmp(digest_all, data->digest_all_calc) != 0) {
+ /* Given a non-recurring operation with extra parameters configured,
+ * in case that the main digest doesn't match, even if the restart
+ * digest matches, enforce a restart rather than a reload-agent anyway.
+ * So that it ensures any changes of the extra parameters get applied
+ * for this specific operation, and the digests calculated for the
+ * resulting lrm_rsc_op will be correct.
+ * Preserve the implied rc RSC_DIGEST_RESTART for the case that the main
+ * digest doesn't match.
+ */
+ if (interval_ms == 0
+ && data->rc == RSC_DIGEST_RESTART) {
+ pe_rsc_info(rsc, "Parameters containing extra ones to %ums-interval"
+ " %s action for %s on %s "
+ "changed: hash was %s vs. now %s (restart:%s) %s",
+ interval_ms, task, rsc->id, pe__node_name(node),
+ pcmk__s(digest_all, "missing"), data->digest_all_calc,
+ op_version,
+ crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
+
+ } else {
+ pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s "
+ "changed: hash was %s vs. now %s (%s:%s) %s",
+ interval_ms, task, rsc->id, pe__node_name(node),
+ pcmk__s(digest_all, "missing"), data->digest_all_calc,
+ (interval_ms > 0)? "reschedule" : "reload",
+ op_version,
+ crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
+ data->rc = RSC_DIGEST_ALL;
+ }
+
+ } else {
+ data->rc = RSC_DIGEST_MATCH;
+ }
+ return data;
+}
+
+/*!
+ * \internal
+ * \brief Create an unfencing summary for use in special node attribute
+ *
+ * Create a string combining a fence device's resource ID, agent type, and
+ * parameter digest (whether for all parameters or just non-private parameters).
+ * This can be stored in a special node attribute, allowing us to detect changes
+ * in either the agent type or parameters, to know whether unfencing must be
+ * redone or can be safely skipped when the device's history is cleaned.
+ *
+ * \param[in] rsc_id Fence device resource ID
+ * \param[in] agent_type Fence device agent
+ * \param[in] param_digest Fence device parameter digest
+ *
+ * \return Newly allocated string with unfencing digest
+ * \note The caller is responsible for freeing the result.
+ */
+static inline char *
+create_unfencing_summary(const char *rsc_id, const char *agent_type,
+ const char *param_digest)
+{
+ return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
+}
+
+/*!
+ * \internal
+ * \brief Check whether a node can skip unfencing
+ *
+ * Check whether a fence device's current definition matches a node's
+ * stored summary of when it was last unfenced by the device.
+ *
+ * \param[in] rsc_id Fence device's resource ID
+ * \param[in] agent Fence device's agent type
+ * \param[in] digest_calc Fence device's current parameter digest
+ * \param[in] node_summary Value of node's special unfencing node attribute
+ * (a comma-separated list of unfencing summaries for
+ * all devices that have unfenced this node)
+ *
+ * \return TRUE if digest matches, FALSE otherwise
+ */
+static bool
+unfencing_digest_matches(const char *rsc_id, const char *agent,
+ const char *digest_calc, const char *node_summary)
+{
+ bool matches = FALSE;
+
+ if (rsc_id && agent && digest_calc && node_summary) {
+ char *search_secure = create_unfencing_summary(rsc_id, agent,
+ digest_calc);
+
+ /* The digest was calculated including the device ID and agent,
+ * so there is no risk of collision using strstr().
+ */
+ matches = (strstr(node_summary, search_secure) != NULL);
+ crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
+ search_secure, matches? "" : "not ", node_summary);
+ free(search_secure);
+ }
+ return matches;
+}
+
+/* Magic string to use as action name for digest cache entries used for
+ * unfencing checks. This is not a real action name (i.e. "on"), so
+ * pcmk__check_action_config() won't confuse these entries with real actions.
+ */
+#define STONITH_DIGEST_TASK "stonith-on"
+
+/*!
+ * \internal
+ * \brief Calculate fence device digests and digest comparison result
+ *
+ * \param[in,out] rsc Fence device resource
+ * \param[in] agent Fence device's agent type
+ * \param[in,out] node Node with digest cache to use
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Node's digest cache entry
+ */
+op_digest_cache_t *
+pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
+ pe_node_t *node, pe_working_set_t *data_set)
+{
+ const char *node_summary = NULL;
+
+ // Calculate device's current parameter digests
+ op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U,
+ node, NULL, TRUE, data_set);
+
+ // Check whether node has special unfencing summary node attribute
+ node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
+ if (node_summary == NULL) {
+ data->rc = RSC_DIGEST_UNKNOWN;
+ return data;
+ }
+
+ // Check whether full parameter digest matches
+ if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
+ node_summary)) {
+ data->rc = RSC_DIGEST_MATCH;
+ return data;
+ }
+
+ // Check whether secure parameter digest matches
+ node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
+ if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
+ node_summary)) {
+ data->rc = RSC_DIGEST_MATCH;
+ if (!pcmk__is_daemon && data_set->priv != NULL) {
+ pcmk__output_t *out = data_set->priv;
+ out->info(out, "Only 'private' parameters to %s "
+ "for unfencing %s changed", rsc->id,
+ pe__node_name(node));
+ }
+ return data;
+ }
+
+ // Parameters don't match
+ data->rc = RSC_DIGEST_ALL;
+ if (pcmk_is_set(data_set->flags, pe_flag_sanitized) && data->digest_secure_calc) {
+ if (data_set->priv != NULL) {
+ pcmk__output_t *out = data_set->priv;
+ char *digest = create_unfencing_summary(rsc->id, agent,
+ data->digest_secure_calc);
+
+ out->info(out, "Parameters to %s for unfencing "
+ "%s changed, try '%s'", rsc->id,
+ pe__node_name(node), digest);
+ free(digest);
+ } else if (!pcmk__is_daemon) {
+ char *digest = create_unfencing_summary(rsc->id, agent,
+ data->digest_secure_calc);
+
+ printf("Parameters to %s for unfencing %s changed, try '%s'\n",
+ rsc->id, pe__node_name(node), digest);
+ free(digest);
+ }
+ }
+ return data;
+}
diff --git a/lib/pengine/pe_health.c b/lib/pengine/pe_health.c
new file mode 100644
index 0000000..6419fdf
--- /dev/null
+++ b/lib/pengine/pe_health.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/pengine/status.h>
+#include <crm/pengine/internal.h>
+#include "pe_status_private.h"
+
+/*!
+ * \internal
+ * \brief Set the node health values to use for "red", "yellow", and "green"
+ *
+ * \param[in,out] data_set Cluster working set
+ */
+void
+pe__unpack_node_health_scores(pe_working_set_t *data_set)
+{
+ switch (pe__health_strategy(data_set)) {
+ case pcmk__health_strategy_none:
+ pcmk__score_red = 0;
+ pcmk__score_yellow = 0;
+ pcmk__score_green = 0;
+ break;
+
+ case pcmk__health_strategy_no_red:
+ pcmk__score_red = -INFINITY;
+ pcmk__score_yellow = 0;
+ pcmk__score_green = 0;
+ break;
+
+ case pcmk__health_strategy_only_green:
+ pcmk__score_red = -INFINITY;
+ pcmk__score_yellow = -INFINITY;
+ pcmk__score_green = 0;
+ break;
+
+ default: // progressive or custom
+ pcmk__score_red = pe__health_score(PCMK__OPT_NODE_HEALTH_RED,
+ data_set);
+ pcmk__score_green = pe__health_score(PCMK__OPT_NODE_HEALTH_GREEN,
+ data_set);
+ pcmk__score_yellow = pe__health_score(PCMK__OPT_NODE_HEALTH_YELLOW,
+ data_set);
+ break;
+ }
+
+ if ((pcmk__score_red != 0) || (pcmk__score_yellow != 0)
+ || (pcmk__score_green != 0)) {
+ crm_debug("Values of node health scores: "
+ PCMK__VALUE_RED "=%d "
+ PCMK__VALUE_YELLOW "=%d "
+ PCMK__VALUE_GREEN "=%d",
+ pcmk__score_red, pcmk__score_yellow, pcmk__score_green);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Add node attribute value to an integer, if it is a health attribute
+ *
+ * \param[in] key Name of node attribute
+ * \param[in] value String value of node attribute
+ * \param[in,out] user_data Address of integer to which \p value should be
+ * added if \p key is a node health attribute
+ */
+static void
+add_node_health_value(gpointer key, gpointer value, gpointer user_data)
+{
+ if (pcmk__starts_with((const char *) key, "#health")) {
+ int score = char2score((const char *) value);
+ int *health = (int *) user_data;
+
+ *health = pcmk__add_scores(score, *health);
+ crm_trace("Combined '%s' into node health score (now %s)",
+ (const char *) value, pcmk_readable_score(*health));
+ }
+}
+
+/*!
+ * \internal
+ * \brief Sum a node's health attribute scores
+ *
+ * \param[in] node Node whose health attributes should be added
+ * \param[in] base_health Add this number to the total
+ *
+ * \return Sum of all health attribute scores of \p node plus \p base_health
+ */
+int
+pe__sum_node_health_scores(const pe_node_t *node, int base_health)
+{
+ CRM_ASSERT(node != NULL);
+ g_hash_table_foreach(node->details->attrs, add_node_health_value,
+ &base_health);
+ return base_health;
+}
+
+/*!
+ * \internal
+ * \brief Check the general health status for a node
+ *
+ * \param[in,out] node Node to check
+ *
+ * \return A negative value if any health attribute for \p node is red,
+ * otherwise 0 if any attribute is yellow, otherwise a positive value.
+ */
+int
+pe__node_health(pe_node_t *node)
+{
+ GHashTableIter iter;
+ const char *name = NULL;
+ const char *value = NULL;
+ enum pcmk__health_strategy strategy;
+ int score = 0;
+ int rc = 1;
+
+ CRM_ASSERT(node != NULL);
+
+ strategy = pe__health_strategy(node->details->data_set);
+ if (strategy == pcmk__health_strategy_none) {
+ return rc;
+ }
+
+ g_hash_table_iter_init(&iter, node->details->attrs);
+ while (g_hash_table_iter_next(&iter, (gpointer *) &name,
+ (gpointer *) &value)) {
+ if (pcmk__starts_with(name, "#health")) {
+ /* It's possible that pcmk__score_red equals pcmk__score_yellow,
+ * or pcmk__score_yellow equals pcmk__score_green, so check the
+ * textual value first to be able to distinguish those.
+ */
+ if (pcmk__str_eq(value, PCMK__VALUE_RED, pcmk__str_casei)) {
+ return -1;
+ } else if (pcmk__str_eq(value, PCMK__VALUE_YELLOW,
+ pcmk__str_casei)) {
+ rc = 0;
+ continue;
+ }
+
+ // The value is an integer, so compare numerically
+ score = char2score(value);
+ if (score <= pcmk__score_red) {
+ return -1;
+ } else if ((score <= pcmk__score_yellow)
+ && (pcmk__score_yellow != pcmk__score_green)) {
+ rc = 0;
+ }
+ }
+ }
+ return rc;
+}
diff --git a/lib/pengine/pe_notif.c b/lib/pengine/pe_notif.c
new file mode 100644
index 0000000..7ed490f
--- /dev/null
+++ b/lib/pengine/pe_notif.c
@@ -0,0 +1,996 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+#include <crm/msg_xml.h>
+#include <pacemaker-internal.h>
+
+#include "pe_status_private.h"
+
+typedef struct notify_entry_s {
+ const pe_resource_t *rsc;
+ const pe_node_t *node;
+} notify_entry_t;
+
+/*!
+ * \internal
+ * \brief Compare two notification entries
+ *
+ * Compare two notification entries, where the one with the alphabetically first
+ * resource name (or if equal, node name) sorts as first, with NULL sorting as
+ * less than non-NULL.
+ *
+ * \param[in] a First notification entry to compare
+ * \param[in] b Second notification entry to compare
+ *
+ * \return -1 if \p a sorts before \p b, 0 if they are equal, otherwise 1
+ */
+static gint
+compare_notify_entries(gconstpointer a, gconstpointer b)
+{
+ int tmp;
+ const notify_entry_t *entry_a = a;
+ const notify_entry_t *entry_b = b;
+
+ // NULL a or b is not actually possible
+ if ((entry_a == NULL) && (entry_b == NULL)) {
+ return 0;
+ }
+ if (entry_a == NULL) {
+ return 1;
+ }
+ if (entry_b == NULL) {
+ return -1;
+ }
+
+ // NULL resources sort first
+ if ((entry_a->rsc == NULL) && (entry_b->rsc == NULL)) {
+ return 0;
+ }
+ if (entry_a->rsc == NULL) {
+ return 1;
+ }
+ if (entry_b->rsc == NULL) {
+ return -1;
+ }
+
+ // Compare resource names
+ tmp = strcmp(entry_a->rsc->id, entry_b->rsc->id);
+ if (tmp != 0) {
+ return tmp;
+ }
+
+ // Otherwise NULL nodes sort first
+ if ((entry_a->node == NULL) && (entry_b->node == NULL)) {
+ return 0;
+ }
+ if (entry_a->node == NULL) {
+ return 1;
+ }
+ if (entry_b->node == NULL) {
+ return -1;
+ }
+
+ // Finally, compare node names
+ return strcmp(entry_a->node->details->id, entry_b->node->details->id);
+}
+
+/*!
+ * \internal
+ * \brief Duplicate a notification entry
+ *
+ * \param[in] entry Entry to duplicate
+ *
+ * \return Newly allocated duplicate of \p entry
+ * \note It is the caller's responsibility to free the return value.
+ */
+static notify_entry_t *
+dup_notify_entry(const notify_entry_t *entry)
+{
+ notify_entry_t *dup = calloc(1, sizeof(notify_entry_t));
+
+ CRM_ASSERT(dup != NULL);
+ dup->rsc = entry->rsc;
+ dup->node = entry->node;
+ return dup;
+}
+
+/*!
+ * \internal
+ * \brief Given a list of nodes, create strings with node names
+ *
+ * \param[in] list List of nodes (as pe_node_t *)
+ * \param[out] all_node_names If not NULL, will be set to space-separated list
+ * of the names of all nodes in \p list
+ * \param[out] host_node_names Same as \p all_node_names, except active
+ * guest nodes will list the name of their host
+ *
+ * \note The caller is responsible for freeing the output argument values using
+ * \p g_string_free().
+ */
+static void
+get_node_names(const GList *list, GString **all_node_names,
+ GString **host_node_names)
+{
+ if (all_node_names != NULL) {
+ *all_node_names = NULL;
+ }
+ if (host_node_names != NULL) {
+ *host_node_names = NULL;
+ }
+
+ for (const GList *iter = list; iter != NULL; iter = iter->next) {
+ const pe_node_t *node = (const pe_node_t *) iter->data;
+
+ if (node->details->uname == NULL) {
+ continue;
+ }
+
+ // Always add to list of all node names
+ if (all_node_names != NULL) {
+ pcmk__add_word(all_node_names, 1024, node->details->uname);
+ }
+
+ // Add to host node name list if appropriate
+ if (host_node_names != NULL) {
+ if (pe__is_guest_node(node)
+ && (node->details->remote_rsc->container->running_on != NULL)) {
+ node = pe__current_node(node->details->remote_rsc->container);
+ if (node->details->uname == NULL) {
+ continue;
+ }
+ }
+ pcmk__add_word(host_node_names, 1024, node->details->uname);
+ }
+ }
+
+ if ((all_node_names != NULL) && (*all_node_names == NULL)) {
+ *all_node_names = g_string_new(" ");
+ }
+ if ((host_node_names != NULL) && (*host_node_names == NULL)) {
+ *host_node_names = g_string_new(" ");
+ }
+}
+
+/*!
+ * \internal
+ * \brief Create strings of instance and node names from notification entries
+ *
+ * \param[in,out] list List of notification entries (will be sorted here)
+ * \param[out] rsc_names If not NULL, will be set to space-separated list
+ * of clone instances from \p list
+ * \param[out] node_names If not NULL, will be set to space-separated list
+ * of node names from \p list
+ *
+ * \return (Possibly new) head of sorted \p list
+ * \note The caller is responsible for freeing the output argument values using
+ * \p g_list_free_full() and \p g_string_free().
+ */
+static GList *
+notify_entries_to_strings(GList *list, GString **rsc_names,
+ GString **node_names)
+{
+ const char *last_rsc_id = NULL;
+
+ // Initialize output lists to NULL
+ if (rsc_names != NULL) {
+ *rsc_names = NULL;
+ }
+ if (node_names != NULL) {
+ *node_names = NULL;
+ }
+
+ // Sort input list for user-friendliness (and ease of filtering duplicates)
+ list = g_list_sort(list, compare_notify_entries);
+
+ for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
+ notify_entry_t *entry = (notify_entry_t *) gIter->data;
+
+ // Entry must have a resource (with ID)
+ CRM_LOG_ASSERT((entry != NULL) && (entry->rsc != NULL)
+ && (entry->rsc->id != NULL));
+ if ((entry == NULL) || (entry->rsc == NULL)
+ || (entry->rsc->id == NULL)) {
+ continue;
+ }
+
+ // Entry must have a node unless listing inactive resources
+ CRM_LOG_ASSERT((node_names == NULL) || (entry->node != NULL));
+ if ((node_names != NULL) && (entry->node == NULL)) {
+ continue;
+ }
+
+ // Don't add duplicates of a particular clone instance
+ if (pcmk__str_eq(entry->rsc->id, last_rsc_id, pcmk__str_none)) {
+ continue;
+ }
+ last_rsc_id = entry->rsc->id;
+
+ if (rsc_names != NULL) {
+ pcmk__add_word(rsc_names, 1024, entry->rsc->id);
+ }
+ if ((node_names != NULL) && (entry->node->details->uname != NULL)) {
+ pcmk__add_word(node_names, 1024, entry->node->details->uname);
+ }
+ }
+
+ // If there are no entries, return "empty" lists
+ if ((rsc_names != NULL) && (*rsc_names == NULL)) {
+ *rsc_names = g_string_new(" ");
+ }
+ if ((node_names != NULL) && (*node_names == NULL)) {
+ *node_names = g_string_new(" ");
+ }
+
+ return list;
+}
+
+/*!
+ * \internal
+ * \brief Copy a meta-attribute into a notify action
+ *
+ * \param[in] key Name of meta-attribute to copy
+ * \param[in] value Value of meta-attribute to copy
+ * \param[in,out] user_data Notify action to copy into
+ */
+static void
+copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
+{
+ pe_action_t *notify = (pe_action_t *) user_data;
+
+ /* Any existing meta-attributes (for example, the action timeout) are for
+ * the notify action itself, so don't override those.
+ */
+ if (g_hash_table_lookup(notify->meta, (const char *) key) != NULL) {
+ return;
+ }
+
+ g_hash_table_insert(notify->meta, strdup((const char *) key),
+ strdup((const char *) value));
+}
+
+static void
+add_notify_data_to_action_meta(const notify_data_t *n_data, pe_action_t *action)
+{
+ for (const GSList *item = n_data->keys; item; item = item->next) {
+ const pcmk_nvpair_t *nvpair = (const pcmk_nvpair_t *) item->data;
+
+ add_hash_param(action->meta, nvpair->name, nvpair->value);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Create a new notify pseudo-action for a clone resource
+ *
+ * \param[in,out] rsc Clone resource that notification is for
+ * \param[in] action Action to use in notify action key
+ * \param[in] notif_action RSC_NOTIFY or RSC_NOTIFIED
+ * \param[in] notif_type "pre", "post", "confirmed-pre", "confirmed-post"
+ *
+ * \return Newly created notify pseudo-action
+ */
+static pe_action_t *
+new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action,
+ const char *notif_action, const char *notif_type)
+{
+ pe_action_t *notify = NULL;
+
+ notify = custom_action(rsc,
+ pcmk__notify_key(rsc->id, notif_type, action->task),
+ notif_action, NULL,
+ pcmk_is_set(action->flags, pe_action_optional),
+ TRUE, rsc->cluster);
+ pe__set_action_flags(notify, pe_action_pseudo);
+ add_hash_param(notify->meta, "notify_key_type", notif_type);
+ add_hash_param(notify->meta, "notify_key_operation", action->task);
+ return notify;
+}
+
+/*!
+ * \internal
+ * \brief Create a new notify action for a clone instance
+ *
+ * \param[in,out] rsc Clone instance that notification is for
+ * \param[in] node Node that notification is for
+ * \param[in,out] op Action that notification is for
+ * \param[in,out] notify_done Parent pseudo-action for notifications complete
+ * \param[in] n_data Notification values to add to action meta-data
+ *
+ * \return Newly created notify action
+ */
+static pe_action_t *
+new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
+ pe_action_t *notify_done, const notify_data_t *n_data)
+{
+ char *key = NULL;
+ pe_action_t *notify_action = NULL;
+ const char *value = NULL;
+ const char *task = NULL;
+ const char *skip_reason = NULL;
+
+ CRM_CHECK((rsc != NULL) && (node != NULL), return NULL);
+
+ // Ensure we have all the info we need
+ if (op == NULL) {
+ skip_reason = "no action";
+ } else if (notify_done == NULL) {
+ skip_reason = "no parent notification";
+ } else if (!node->details->online) {
+ skip_reason = "node offline";
+ } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ skip_reason = "original action not runnable";
+ }
+ if (skip_reason != NULL) {
+ pe_rsc_trace(rsc, "Skipping notify action for %s on %s: %s",
+ rsc->id, pe__node_name(node), skip_reason);
+ return NULL;
+ }
+
+ value = g_hash_table_lookup(op->meta, "notify_type"); // "pre" or "post"
+ task = g_hash_table_lookup(op->meta, "notify_operation"); // original action
+
+ pe_rsc_trace(rsc, "Creating notify action for %s on %s (%s-%s)",
+ rsc->id, pe__node_name(node), value, task);
+
+ // Create the notify action
+ key = pcmk__notify_key(rsc->id, value, task);
+ notify_action = custom_action(rsc, key, op->task, node,
+ pcmk_is_set(op->flags, pe_action_optional),
+ TRUE, rsc->cluster);
+
+ // Add meta-data to notify action
+ g_hash_table_foreach(op->meta, copy_meta_to_notify, notify_action);
+ add_notify_data_to_action_meta(n_data, notify_action);
+
+ // Order notify after original action and before parent notification
+ order_actions(op, notify_action, pe_order_optional);
+ order_actions(notify_action, notify_done, pe_order_optional);
+ return notify_action;
+}
+
+/*!
+ * \internal
+ * \brief Create a new "post-" notify action for a clone instance
+ *
+ * \param[in,out] rsc Clone instance that notification is for
+ * \param[in] node Node that notification is for
+ * \param[in,out] n_data Notification values to add to action meta-data
+ */
+static void
+new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
+ notify_data_t *n_data)
+{
+ pe_action_t *notify = NULL;
+
+ CRM_ASSERT(n_data != NULL);
+
+ // Create the "post-" notify action for specified instance
+ notify = new_notify_action(rsc, node, n_data->post, n_data->post_done,
+ n_data);
+ if (notify != NULL) {
+ notify->priority = INFINITY;
+ }
+
+ // Order recurring monitors after all "post-" notifications complete
+ if (n_data->post_done == NULL) {
+ return;
+ }
+ for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
+ pe_action_t *mon = (pe_action_t *) iter->data;
+ const char *interval_ms_s = NULL;
+
+ interval_ms_s = g_hash_table_lookup(mon->meta,
+ XML_LRM_ATTR_INTERVAL_MS);
+ if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
+ || pcmk__str_eq(mon->task, RSC_CANCEL, pcmk__str_none)) {
+ continue; // Not a recurring monitor
+ }
+ order_actions(n_data->post_done, mon, pe_order_optional);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Create and order notification pseudo-actions for a clone action
+ *
+ * In addition to the actual notify actions needed for each clone instance,
+ * clone notifications also require pseudo-actions to provide ordering points
+ * in the notification process. This creates the notification data, along with
+ * appropriate pseudo-actions and their orderings.
+ *
+ * For example, the ordering sequence for starting a clone is:
+ *
+ * "pre-" notify pseudo-action for clone
+ * -> "pre-" notify actions for each clone instance
+ * -> "pre-" notifications complete pseudo-action for clone
+ * -> start actions for each clone instance
+ * -> "started" pseudo-action for clone
+ * -> "post-" notify pseudo-action for clone
+ * -> "post-" notify actions for each clone instance
+ * -> "post-" notifications complete pseudo-action for clone
+ *
+ * \param[in,out] rsc Clone that notifications are for
+ * \param[in] task Name of action that notifications are for
+ * \param[in,out] action If not NULL, create a "pre-" pseudo-action ordered
+ * before a "pre-" complete pseudo-action, ordered
+ * before this action
+ * \param[in,out] complete If not NULL, create a "post-" pseudo-action ordered
+ * after this action, and a "post-" complete
+ * pseudo-action ordered after that
+ *
+ * \return Newly created notification data
+ */
+notify_data_t *
+pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
+ pe_action_t *action, pe_action_t *complete)
+{
+ notify_data_t *n_data = NULL;
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_notify)) {
+ return NULL;
+ }
+
+ n_data = calloc(1, sizeof(notify_data_t));
+ CRM_ASSERT(n_data != NULL);
+
+ n_data->action = task;
+
+ if (action != NULL) { // Need "pre-" pseudo-actions
+
+ // Create "pre-" notify pseudo-action for clone
+ n_data->pre = new_notify_pseudo_action(rsc, action, RSC_NOTIFY, "pre");
+ pe__set_action_flags(n_data->pre, pe_action_runnable);
+ add_hash_param(n_data->pre->meta, "notify_type", "pre");
+ add_hash_param(n_data->pre->meta, "notify_operation", n_data->action);
+
+ // Create "pre-" notifications complete pseudo-action for clone
+ n_data->pre_done = new_notify_pseudo_action(rsc, action, RSC_NOTIFIED,
+ "confirmed-pre");
+ pe__set_action_flags(n_data->pre_done, pe_action_runnable);
+ add_hash_param(n_data->pre_done->meta, "notify_type", "pre");
+ add_hash_param(n_data->pre_done->meta,
+ "notify_operation", n_data->action);
+
+ // Order "pre-" -> "pre-" complete -> original action
+ order_actions(n_data->pre, n_data->pre_done, pe_order_optional);
+ order_actions(n_data->pre_done, action, pe_order_optional);
+ }
+
+ if (complete != NULL) { // Need "post-" pseudo-actions
+
+ // Create "post-" notify pseudo-action for clone
+ n_data->post = new_notify_pseudo_action(rsc, complete, RSC_NOTIFY,
+ "post");
+ n_data->post->priority = INFINITY;
+ if (pcmk_is_set(complete->flags, pe_action_runnable)) {
+ pe__set_action_flags(n_data->post, pe_action_runnable);
+ } else {
+ pe__clear_action_flags(n_data->post, pe_action_runnable);
+ }
+ add_hash_param(n_data->post->meta, "notify_type", "post");
+ add_hash_param(n_data->post->meta, "notify_operation", n_data->action);
+
+ // Create "post-" notifications complete pseudo-action for clone
+ n_data->post_done = new_notify_pseudo_action(rsc, complete,
+ RSC_NOTIFIED,
+ "confirmed-post");
+ n_data->post_done->priority = INFINITY;
+ if (pcmk_is_set(complete->flags, pe_action_runnable)) {
+ pe__set_action_flags(n_data->post_done, pe_action_runnable);
+ } else {
+ pe__clear_action_flags(n_data->post_done, pe_action_runnable);
+ }
+ add_hash_param(n_data->post_done->meta, "notify_type", "post");
+ add_hash_param(n_data->post_done->meta,
+ "notify_operation", n_data->action);
+
+ // Order original action complete -> "post-" -> "post-" complete
+ order_actions(complete, n_data->post, pe_order_implies_then);
+ order_actions(n_data->post, n_data->post_done, pe_order_implies_then);
+ }
+
+ // If we created both, order "pre-" complete -> "post-"
+ if ((action != NULL) && (complete != NULL)) {
+ order_actions(n_data->pre_done, n_data->post, pe_order_optional);
+ }
+ return n_data;
+}
+
+/*!
+ * \internal
+ * \brief Create a new notification entry
+ *
+ * \param[in] rsc Resource for notification
+ * \param[in] node Node for notification
+ *
+ * \return Newly allocated notification entry
+ * \note The caller is responsible for freeing the return value.
+ */
+static notify_entry_t *
+new_notify_entry(const pe_resource_t *rsc, const pe_node_t *node)
+{
+ notify_entry_t *entry = calloc(1, sizeof(notify_entry_t));
+
+ CRM_ASSERT(entry != NULL);
+ entry->rsc = rsc;
+ entry->node = node;
+ return entry;
+}
+
+/*!
+ * \internal
+ * \brief Add notification data for resource state and optionally actions
+ *
+ * \param[in] rsc Clone or clone instance being notified
+ * \param[in] activity Whether to add notification entries for actions
+ * \param[in,out] n_data Notification data for clone
+ */
+static void
+collect_resource_data(const pe_resource_t *rsc, bool activity,
+ notify_data_t *n_data)
+{
+ const GList *iter = NULL;
+ notify_entry_t *entry = NULL;
+ const pe_node_t *node = NULL;
+
+ if (n_data == NULL) {
+ return;
+ }
+
+ if (n_data->allowed_nodes == NULL) {
+ n_data->allowed_nodes = rsc->allowed_nodes;
+ }
+
+ // If this is a clone, call recursively for each instance
+ if (rsc->children != NULL) {
+ for (iter = rsc->children; iter != NULL; iter = iter->next) {
+ const pe_resource_t *child = (const pe_resource_t *) iter->data;
+
+ collect_resource_data(child, activity, n_data);
+ }
+ return;
+ }
+
+ // This is a notification for a single clone instance
+
+ if (rsc->running_on != NULL) {
+ node = rsc->running_on->data; // First is sufficient
+ }
+ entry = new_notify_entry(rsc, node);
+
+ // Add notification indicating the resource state
+ switch (rsc->role) {
+ case RSC_ROLE_STOPPED:
+ n_data->inactive = g_list_prepend(n_data->inactive, entry);
+ break;
+
+ case RSC_ROLE_STARTED:
+ n_data->active = g_list_prepend(n_data->active, entry);
+ break;
+
+ case RSC_ROLE_UNPROMOTED:
+ n_data->unpromoted = g_list_prepend(n_data->unpromoted, entry);
+ n_data->active = g_list_prepend(n_data->active,
+ dup_notify_entry(entry));
+ break;
+
+ case RSC_ROLE_PROMOTED:
+ n_data->promoted = g_list_prepend(n_data->promoted, entry);
+ n_data->active = g_list_prepend(n_data->active,
+ dup_notify_entry(entry));
+ break;
+
+ default:
+ crm_err("Resource %s role on %s (%s) is not supported for "
+ "notifications (bug?)",
+ rsc->id, pe__node_name(node), role2text(rsc->role));
+ free(entry);
+ break;
+ }
+
+ if (!activity) {
+ return;
+ }
+
+ // Add notification entries for each of the resource's actions
+ for (iter = rsc->actions; iter != NULL; iter = iter->next) {
+ const pe_action_t *op = (const pe_action_t *) iter->data;
+
+ if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ enum action_tasks task = text2task(op->task);
+
+ if ((task == stop_rsc) && op->node->details->unclean) {
+ // Create anyway (additional noise if node can't be fenced)
+ } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ continue;
+ }
+
+ entry = new_notify_entry(rsc, op->node);
+
+ switch (task) {
+ case start_rsc:
+ n_data->start = g_list_prepend(n_data->start, entry);
+ break;
+ case stop_rsc:
+ n_data->stop = g_list_prepend(n_data->stop, entry);
+ break;
+ case action_promote:
+ n_data->promote = g_list_prepend(n_data->promote, entry);
+ break;
+ case action_demote:
+ n_data->demote = g_list_prepend(n_data->demote, entry);
+ break;
+ default:
+ free(entry);
+ break;
+ }
+ }
+ }
+}
+
+// For (char *) value
+#define add_notify_env(n_data, key, value) do { \
+ n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, value); \
+ } while (0)
+
+// For (GString *) value
+#define add_notify_env_gs(n_data, key, value) do { \
+ n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, \
+ (const char *) value->str); \
+ } while (0)
+
+// For (GString *) value
+#define add_notify_env_free_gs(n_data, key, value) do { \
+ n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, \
+ (const char *) value->str); \
+ g_string_free(value, TRUE); value = NULL; \
+ } while (0)
+
+/*!
+ * \internal
+ * \brief Create notification name/value pairs from structured data
+ *
+ * \param[in] rsc Resource that notification is for
+ * \param[in,out] n_data Notification data
+ */
+static void
+add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
+{
+ bool required = false; // Whether to make notify actions required
+ GString *rsc_list = NULL;
+ GString *node_list = NULL;
+ GString *metal_list = NULL;
+ const char *source = NULL;
+ GList *nodes = NULL;
+
+ n_data->stop = notify_entries_to_strings(n_data->stop,
+ &rsc_list, &node_list);
+ if ((strcmp(" ", (const char *) rsc_list->str) != 0)
+ && pcmk__str_eq(n_data->action, RSC_STOP, pcmk__str_none)) {
+ required = true;
+ }
+ add_notify_env_free_gs(n_data, "notify_stop_resource", rsc_list);
+ add_notify_env_free_gs(n_data, "notify_stop_uname", node_list);
+
+ if ((n_data->start != NULL)
+ && pcmk__str_eq(n_data->action, RSC_START, pcmk__str_none)) {
+ required = true;
+ }
+ n_data->start = notify_entries_to_strings(n_data->start,
+ &rsc_list, &node_list);
+ add_notify_env_free_gs(n_data, "notify_start_resource", rsc_list);
+ add_notify_env_free_gs(n_data, "notify_start_uname", node_list);
+
+ if ((n_data->demote != NULL)
+ && pcmk__str_eq(n_data->action, RSC_DEMOTE, pcmk__str_none)) {
+ required = true;
+ }
+ n_data->demote = notify_entries_to_strings(n_data->demote,
+ &rsc_list, &node_list);
+ add_notify_env_free_gs(n_data, "notify_demote_resource", rsc_list);
+ add_notify_env_free_gs(n_data, "notify_demote_uname", node_list);
+
+ if ((n_data->promote != NULL)
+ && pcmk__str_eq(n_data->action, RSC_PROMOTE, pcmk__str_none)) {
+ required = true;
+ }
+ n_data->promote = notify_entries_to_strings(n_data->promote,
+ &rsc_list, &node_list);
+ add_notify_env_free_gs(n_data, "notify_promote_resource", rsc_list);
+ add_notify_env_free_gs(n_data, "notify_promote_uname", node_list);
+
+ n_data->active = notify_entries_to_strings(n_data->active,
+ &rsc_list, &node_list);
+ add_notify_env_free_gs(n_data, "notify_active_resource", rsc_list);
+ add_notify_env_free_gs(n_data, "notify_active_uname", node_list);
+
+ n_data->unpromoted = notify_entries_to_strings(n_data->unpromoted,
+ &rsc_list, &node_list);
+ add_notify_env_gs(n_data, "notify_unpromoted_resource", rsc_list);
+ add_notify_env_gs(n_data, "notify_unpromoted_uname", node_list);
+
+ // Deprecated: kept for backward compatibility with older resource agents
+ add_notify_env_free_gs(n_data, "notify_slave_resource", rsc_list);
+ add_notify_env_free_gs(n_data, "notify_slave_uname", node_list);
+
+ n_data->promoted = notify_entries_to_strings(n_data->promoted,
+ &rsc_list, &node_list);
+ add_notify_env_gs(n_data, "notify_promoted_resource", rsc_list);
+ add_notify_env_gs(n_data, "notify_promoted_uname", node_list);
+
+ // Deprecated: kept for backward compatibility with older resource agents
+ add_notify_env_free_gs(n_data, "notify_master_resource", rsc_list);
+ add_notify_env_free_gs(n_data, "notify_master_uname", node_list);
+
+ n_data->inactive = notify_entries_to_strings(n_data->inactive,
+ &rsc_list, NULL);
+ add_notify_env_free_gs(n_data, "notify_inactive_resource", rsc_list);
+
+ nodes = g_hash_table_get_values(n_data->allowed_nodes);
+ if (!pcmk__is_daemon) {
+ /* For display purposes, sort the node list, for consistent
+ * regression test output (while avoiding the performance hit
+ * for the live cluster).
+ */
+ nodes = g_list_sort(nodes, pe__cmp_node_name);
+ }
+ get_node_names(nodes, &node_list, NULL);
+ add_notify_env_free_gs(n_data, "notify_available_uname", node_list);
+ g_list_free(nodes);
+
+ source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
+ if (pcmk__str_eq("host", source, pcmk__str_none)) {
+ get_node_names(rsc->cluster->nodes, &node_list, &metal_list);
+ add_notify_env_free_gs(n_data, "notify_all_hosts", metal_list);
+ } else {
+ get_node_names(rsc->cluster->nodes, &node_list, NULL);
+ }
+ add_notify_env_free_gs(n_data, "notify_all_uname", node_list);
+
+ if (required && (n_data->pre != NULL)) {
+ pe__clear_action_flags(n_data->pre, pe_action_optional);
+ pe__clear_action_flags(n_data->pre_done, pe_action_optional);
+ }
+
+ if (required && (n_data->post != NULL)) {
+ pe__clear_action_flags(n_data->post, pe_action_optional);
+ pe__clear_action_flags(n_data->post_done, pe_action_optional);
+ }
+}
+
+/*
+ * \internal
+ * \brief Find any remote connection start relevant to an action
+ *
+ * \param[in] action Action to check
+ *
+ * \return If action is behind a remote connection, connection's start
+ */
+static pe_action_t *
+find_remote_start(pe_action_t *action)
+{
+ if ((action != NULL) && (action->node != NULL)) {
+ pe_resource_t *remote_rsc = action->node->details->remote_rsc;
+
+ if (remote_rsc != NULL) {
+ return find_first_action(remote_rsc->actions, NULL, RSC_START,
+ NULL);
+ }
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \brief Create notify actions, and add notify data to original actions
+ *
+ * \param[in,out] rsc Clone or clone instance that notification is for
+ * \param[in,out] n_data Clone notification data for some action
+ */
+static void
+create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
+{
+ GList *iter = NULL;
+ pe_action_t *stop = NULL;
+ pe_action_t *start = NULL;
+ enum action_tasks task = text2task(n_data->action);
+
+ // If this is a clone, call recursively for each instance
+ if (rsc->children != NULL) {
+ g_list_foreach(rsc->children, (GFunc) create_notify_actions, n_data);
+ return;
+ }
+
+ // Add notification meta-attributes to original actions
+ for (iter = rsc->actions; iter != NULL; iter = iter->next) {
+ pe_action_t *op = (pe_action_t *) iter->data;
+
+ if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ switch (text2task(op->task)) {
+ case start_rsc:
+ case stop_rsc:
+ case action_promote:
+ case action_demote:
+ add_notify_data_to_action_meta(n_data, op);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Skip notify action itself if original action was not needed
+ switch (task) {
+ case start_rsc:
+ if (n_data->start == NULL) {
+ pe_rsc_trace(rsc, "No notify action needed for %s %s",
+ rsc->id, n_data->action);
+ return;
+ }
+ break;
+
+ case action_promote:
+ if (n_data->promote == NULL) {
+ pe_rsc_trace(rsc, "No notify action needed for %s %s",
+ rsc->id, n_data->action);
+ return;
+ }
+ break;
+
+ case action_demote:
+ if (n_data->demote == NULL) {
+ pe_rsc_trace(rsc, "No notify action needed for %s %s",
+ rsc->id, n_data->action);
+ return;
+ }
+ break;
+
+ default:
+ // We cannot do same for stop because it might be implied by fencing
+ break;
+ }
+
+ pe_rsc_trace(rsc, "Creating notify actions for %s %s",
+ rsc->id, n_data->action);
+
+ // Create notify actions for stop or demote
+ if ((rsc->role != RSC_ROLE_STOPPED)
+ && ((task == stop_rsc) || (task == action_demote))) {
+
+ stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL);
+
+ for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
+ pe_node_t *current_node = (pe_node_t *) iter->data;
+
+ /* If a stop is a pseudo-action implied by fencing, don't try to
+ * notify the node getting fenced.
+ */
+ if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)
+ && (current_node->details->unclean
+ || current_node->details->remote_requires_reset)) {
+ continue;
+ }
+
+ new_notify_action(rsc, current_node, n_data->pre,
+ n_data->pre_done, n_data);
+
+ if ((task == action_demote) || (stop == NULL)
+ || pcmk_is_set(stop->flags, pe_action_optional)) {
+ new_post_notify_action(rsc, current_node, n_data);
+ }
+ }
+ }
+
+ // Create notify actions for start or promote
+ if ((rsc->next_role != RSC_ROLE_STOPPED)
+ && ((task == start_rsc) || (task == action_promote))) {
+
+ start = find_first_action(rsc->actions, NULL, RSC_START, NULL);
+ if (start != NULL) {
+ pe_action_t *remote_start = find_remote_start(start);
+
+ if ((remote_start != NULL)
+ && !pcmk_is_set(remote_start->flags, pe_action_runnable)) {
+ /* Start and promote actions for a clone instance behind
+ * a Pacemaker Remote connection happen after the
+ * connection starts. If the connection start is blocked, do
+ * not schedule notifications for these actions.
+ */
+ return;
+ }
+ }
+ if (rsc->allocated_to == NULL) {
+ pe_proc_err("Next role '%s' but %s is not allocated",
+ role2text(rsc->next_role), rsc->id);
+ return;
+ }
+ if ((task != start_rsc) || (start == NULL)
+ || pcmk_is_set(start->flags, pe_action_optional)) {
+
+ new_notify_action(rsc, rsc->allocated_to, n_data->pre,
+ n_data->pre_done, n_data);
+ }
+ new_post_notify_action(rsc, rsc->allocated_to, n_data);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Create notification data and actions for one clone action
+ *
+ * \param[in,out] rsc Clone resource that notification is for
+ * \param[in,out] n_data Clone notification data for some action
+ */
+void
+pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data)
+{
+ if ((rsc == NULL) || (n_data == NULL)) {
+ return;
+ }
+ collect_resource_data(rsc, true, n_data);
+ add_notif_keys(rsc, n_data);
+ create_notify_actions(rsc, n_data);
+}
+
+/*!
+ * \internal
+ * \brief Free notification data for one action
+ *
+ * \param[in,out] n_data Notification data to free
+ */
+void
+pe__free_action_notification_data(notify_data_t *n_data)
+{
+ if (n_data == NULL) {
+ return;
+ }
+ g_list_free_full(n_data->stop, free);
+ g_list_free_full(n_data->start, free);
+ g_list_free_full(n_data->demote, free);
+ g_list_free_full(n_data->promote, free);
+ g_list_free_full(n_data->promoted, free);
+ g_list_free_full(n_data->unpromoted, free);
+ g_list_free_full(n_data->active, free);
+ g_list_free_full(n_data->inactive, free);
+ pcmk_free_nvpairs(n_data->keys);
+ free(n_data);
+}
+
+/*!
+ * \internal
+ * \brief Order clone "notifications complete" pseudo-action after fencing
+ *
+ * If a stop action is implied by fencing, the usual notification pseudo-actions
+ * will not be sufficient to order things properly, or even create all needed
+ * notifications if the clone is also stopping on another node, and another
+ * clone is ordered after it. This function creates new notification
+ * pseudo-actions relative to the fencing to ensure everything works properly.
+ *
+ * \param[in] stop Stop action implied by fencing
+ * \param[in,out] rsc Clone resource that notification is for
+ * \param[in,out] stonith_op Fencing action that implies \p stop
+ */
+void
+pe__order_notifs_after_fencing(const pe_action_t *stop, pe_resource_t *rsc,
+ pe_action_t *stonith_op)
+{
+ notify_data_t *n_data;
+
+ crm_info("Ordering notifications for implied %s after fencing", stop->uuid);
+ n_data = pe__action_notif_pseudo_ops(rsc, RSC_STOP, NULL, stonith_op);
+
+ if (n_data != NULL) {
+ collect_resource_data(rsc, false, n_data);
+ add_notify_env(n_data, "notify_stop_resource", rsc->id);
+ add_notify_env(n_data, "notify_stop_uname", stop->node->details->uname);
+ create_notify_actions(uber_parent(rsc), n_data);
+ pe__free_action_notification_data(n_data);
+ }
+}
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
new file mode 100644
index 0000000..68cc867
--- /dev/null
+++ b/lib/pengine/pe_output.c
@@ -0,0 +1,3108 @@
+/*
+ * Copyright 2019-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+#include <stdint.h>
+#include <crm/common/xml_internal.h>
+#include <crm/common/output.h>
+#include <crm/cib/util.h>
+#include <crm/msg_xml.h>
+#include <crm/pengine/internal.h>
+
+const char *
+pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts)
+{
+ const char * desc = NULL;
+ // User-supplied description
+ if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
+ || pcmk__list_of_multiple(rsc->running_on)) {
+ desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
+ }
+ return desc;
+}
+
+/* Never display node attributes whose name starts with one of these prefixes */
+#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
+ "shutdown", "terminate", "standby", "#", NULL }
+
+static int
+compare_attribute(gconstpointer a, gconstpointer b)
+{
+ int rc;
+
+ rc = strcmp((const char *)a, (const char *)b);
+
+ return rc;
+}
+
+/*!
+ * \internal
+ * \brief Determine whether extended information about an attribute should be added.
+ *
+ * \param[in] node Node that ran this resource
+ * \param[in,out] rsc_list List of resources for this node
+ * \param[in,out] data_set Cluster working set
+ * \param[in] attrname Attribute to find
+ * \param[out] expected_score Expected value for this attribute
+ *
+ * \return true if extended information should be printed, false otherwise
+ * \note Currently, extended information is only supported for ping/pingd
+ * resources, for which a message will be printed if connectivity is lost
+ * or degraded.
+ */
+static bool
+add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_set,
+ const char *attrname, int *expected_score)
+{
+ GList *gIter = NULL;
+
+ for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ const char *type = g_hash_table_lookup(rsc->meta, "type");
+ const char *name = NULL;
+ GHashTable *params = NULL;
+
+ if (rsc->children != NULL) {
+ if (add_extra_info(node, rsc->children, data_set, attrname,
+ expected_score)) {
+ return true;
+ }
+ }
+
+ if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) {
+ continue;
+ }
+
+ params = pe_rsc_params(rsc, node, data_set);
+ name = g_hash_table_lookup(params, "name");
+
+ if (name == NULL) {
+ name = "pingd";
+ }
+
+ /* To identify the resource with the attribute name. */
+ if (pcmk__str_eq(name, attrname, pcmk__str_casei)) {
+ int host_list_num = 0;
+ const char *hosts = g_hash_table_lookup(params, "host_list");
+ const char *multiplier = g_hash_table_lookup(params, "multiplier");
+ int multiplier_i;
+
+ if (hosts) {
+ char **host_list = g_strsplit(hosts, " ", 0);
+ host_list_num = g_strv_length(host_list);
+ g_strfreev(host_list);
+ }
+
+ if ((multiplier == NULL)
+ || (pcmk__scan_min_int(multiplier, &multiplier_i,
+ INT_MIN) != pcmk_rc_ok)) {
+ /* The ocf:pacemaker:ping resource agent defaults multiplier to
+ * 1. The agent currently does not handle invalid text, but it
+ * should, and this would be a reasonable choice ...
+ */
+ multiplier_i = 1;
+ }
+ *expected_score = host_list_num * multiplier_i;
+
+ return true;
+ }
+ }
+ return false;
+}
+
+static GList *
+filter_attr_list(GList *attr_list, char *name)
+{
+ int i;
+ const char *filt_str[] = FILTER_STR;
+
+ CRM_CHECK(name != NULL, return attr_list);
+
+ /* filtering automatic attributes */
+ for (i = 0; filt_str[i] != NULL; i++) {
+ if (g_str_has_prefix(name, filt_str[i])) {
+ return attr_list;
+ }
+ }
+
+ return g_list_insert_sorted(attr_list, name, compare_attribute);
+}
+
+static GList *
+get_operation_list(xmlNode *rsc_entry) {
+ GList *op_list = NULL;
+ xmlNode *rsc_op = NULL;
+
+ for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
+ rsc_op = pcmk__xe_next(rsc_op)) {
+ const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
+ const char *interval_ms_s = crm_element_value(rsc_op,
+ XML_LRM_ATTR_INTERVAL_MS);
+ const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
+ int op_rc_i;
+
+ pcmk__scan_min_int(op_rc, &op_rc_i, 0);
+
+ /* Display 0-interval monitors as "probe" */
+ if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
+ task = "probe";
+ }
+
+ /* Ignore notifies and some probes */
+ if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
+ continue;
+ }
+
+ if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
+ op_list = g_list_append(op_list, rsc_op);
+ }
+ }
+
+ op_list = g_list_sort(op_list, sort_op_by_callid);
+ return op_list;
+}
+
+static void
+add_dump_node(gpointer key, gpointer value, gpointer user_data)
+{
+ xmlNodePtr node = user_data;
+ pcmk_create_xml_text_node(node, (const char *) key, (const char *) value);
+}
+
+static void
+append_dump_text(gpointer key, gpointer value, gpointer user_data)
+{
+ char **dump_text = user_data;
+ char *new_text = crm_strdup_printf("%s %s=%s",
+ *dump_text, (char *)key, (char *)value);
+
+ free(*dump_text);
+ *dump_text = new_text;
+}
+
+static const char *
+get_cluster_stack(pe_working_set_t *data_set)
+{
+ xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
+ data_set->input, LOG_DEBUG);
+ return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
+}
+
+static char *
+last_changed_string(const char *last_written, const char *user,
+ const char *client, const char *origin) {
+ if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
+ return crm_strdup_printf("%s%s%s%s%s%s%s",
+ last_written ? last_written : "",
+ user ? " by " : "",
+ user ? user : "",
+ client ? " via " : "",
+ client ? client : "",
+ origin ? " on " : "",
+ origin ? origin : "");
+ } else {
+ return strdup("");
+ }
+}
+
+static char *
+op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
+ int rc, bool print_timing) {
+ const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
+ char *interval_str = NULL;
+ char *buf = NULL;
+
+ if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
+ char *pair = pcmk__format_nvpair("interval", interval_ms_s, "ms");
+ interval_str = crm_strdup_printf(" %s", pair);
+ free(pair);
+ }
+
+ if (print_timing) {
+ char *last_change_str = NULL;
+ char *exec_str = NULL;
+ char *queue_str = NULL;
+
+ const char *value = NULL;
+
+ time_t epoch = 0;
+
+ if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
+ && (epoch > 0)) {
+ char *epoch_str = pcmk__epoch2str(&epoch, 0);
+
+ last_change_str = crm_strdup_printf(" %s=\"%s\"",
+ XML_RSC_OP_LAST_CHANGE,
+ pcmk__s(epoch_str, ""));
+ free(epoch_str);
+ }
+
+ value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
+ if (value) {
+ char *pair = pcmk__format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
+ exec_str = crm_strdup_printf(" %s", pair);
+ free(pair);
+ }
+
+ value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
+ if (value) {
+ char *pair = pcmk__format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
+ queue_str = crm_strdup_printf(" %s", pair);
+ free(pair);
+ }
+
+ buf = crm_strdup_printf("(%s) %s:%s%s%s%s rc=%d (%s)", call, task,
+ interval_str ? interval_str : "",
+ last_change_str ? last_change_str : "",
+ exec_str ? exec_str : "",
+ queue_str ? queue_str : "",
+ rc, services_ocf_exitcode_str(rc));
+
+ if (last_change_str) {
+ free(last_change_str);
+ }
+
+ if (exec_str) {
+ free(exec_str);
+ }
+
+ if (queue_str) {
+ free(queue_str);
+ }
+ } else {
+ buf = crm_strdup_printf("(%s) %s%s%s", call, task,
+ interval_str ? ":" : "",
+ interval_str ? interval_str : "");
+ }
+
+ if (interval_str) {
+ free(interval_str);
+ }
+
+ return buf;
+}
+
+static char *
+resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
+ int failcount, time_t last_failure) {
+ char *buf = NULL;
+
+ if (rsc == NULL) {
+ buf = crm_strdup_printf("%s: orphan", rsc_id);
+ } else if (all || failcount || last_failure > 0) {
+ char *failcount_s = NULL;
+ char *lastfail_s = NULL;
+
+ if (failcount > 0) {
+ failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
+ failcount);
+ } else {
+ failcount_s = strdup("");
+ }
+ if (last_failure > 0) {
+ buf = pcmk__epoch2str(&last_failure, 0);
+ lastfail_s = crm_strdup_printf(" %s='%s'",
+ PCMK__LAST_FAILURE_PREFIX, buf);
+ free(buf);
+ }
+
+ buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
+ rsc_id, rsc->migration_threshold, failcount_s,
+ lastfail_s? lastfail_s : "");
+ free(failcount_s);
+ free(lastfail_s);
+ } else {
+ buf = crm_strdup_printf("%s:", rsc_id);
+ }
+
+ return buf;
+}
+
+static const char *
+get_node_feature_set(pe_node_t *node) {
+ const char *feature_set = NULL;
+
+ if (node->details->online && !pe__is_guest_or_remote_node(node)) {
+ feature_set = g_hash_table_lookup(node->details->attrs,
+ CRM_ATTR_FEATURE_SET);
+ /* The feature set attribute is present since 3.15.1. If it is missing
+ * then the node must be running an earlier version. */
+ if (feature_set == NULL) {
+ feature_set = "<3.15.1";
+ }
+ }
+ return feature_set;
+}
+
+static bool
+is_mixed_version(pe_working_set_t *data_set) {
+ const char *feature_set = NULL;
+ for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = gIter->data;
+ const char *node_feature_set = get_node_feature_set(node);
+ if (node_feature_set != NULL) {
+ if (feature_set == NULL) {
+ feature_set = node_feature_set;
+ } else if (strcmp(feature_set, node_feature_set) != 0) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static char *
+formatted_xml_buf(pe_resource_t *rsc, bool raw)
+{
+ if (raw) {
+ return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
+ } else {
+ return dump_xml_formatted(rsc->xml);
+ }
+}
+
+PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+ "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
+static int
+cluster_summary(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ enum pcmk_pacemakerd_state pcmkd_state =
+ (enum pcmk_pacemakerd_state) va_arg(args, int);
+ uint32_t section_opts = va_arg(args, uint32_t);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ int rc = pcmk_rc_no_output;
+ const char *stack_s = get_cluster_stack(data_set);
+
+ if (pcmk_is_set(section_opts, pcmk_section_stack)) {
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-stack", stack_s, pcmkd_state);
+ }
+
+ if (pcmk_is_set(section_opts, pcmk_section_dc)) {
+ xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
+ data_set->input, LOG_DEBUG);
+ const char *dc_version_s = dc_version?
+ crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
+ : NULL;
+ const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
+ char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(data_set);
+
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ dc_version_s, dc_name, mixed_version);
+ free(dc_name);
+ }
+
+ if (pcmk_is_set(section_opts, pcmk_section_times)) {
+ const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-times",
+ data_set->localhost, last_written, user, client, origin);
+ }
+
+ if (pcmk_is_set(section_opts, pcmk_section_counts)) {
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-counts", g_list_length(data_set->nodes),
+ data_set->ninstances, data_set->disabled_resources,
+ data_set->blocked_resources);
+ }
+
+ if (pcmk_is_set(section_opts, pcmk_section_options)) {
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-options", data_set);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+
+ if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
+ if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ rc = pcmk_rc_ok;
+ }
+ }
+
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+ "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
+static int
+cluster_summary_html(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ enum pcmk_pacemakerd_state pcmkd_state =
+ (enum pcmk_pacemakerd_state) va_arg(args, int);
+ uint32_t section_opts = va_arg(args, uint32_t);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ int rc = pcmk_rc_no_output;
+ const char *stack_s = get_cluster_stack(data_set);
+
+ if (pcmk_is_set(section_opts, pcmk_section_stack)) {
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-stack", stack_s, pcmkd_state);
+ }
+
+ /* Always print DC if none, even if not requested */
+ if (data_set->dc_node == NULL || pcmk_is_set(section_opts, pcmk_section_dc)) {
+ xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
+ data_set->input, LOG_DEBUG);
+ const char *dc_version_s = dc_version?
+ crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
+ : NULL;
+ const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
+ char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(data_set);
+
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ dc_version_s, dc_name, mixed_version);
+ free(dc_name);
+ }
+
+ if (pcmk_is_set(section_opts, pcmk_section_times)) {
+ const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-times",
+ data_set->localhost, last_written, user, client, origin);
+ }
+
+ if (pcmk_is_set(section_opts, pcmk_section_counts)) {
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
+ out->message(out, "cluster-counts", g_list_length(data_set->nodes),
+ data_set->ninstances, data_set->disabled_resources,
+ data_set->blocked_resources);
+ }
+
+ if (pcmk_is_set(section_opts, pcmk_section_options)) {
+ /* Kind of a hack - close the list we may have opened earlier in this
+ * function so we can put all the options into their own list. We
+ * only want to do this on HTML output, though.
+ */
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+
+ out->begin_list(out, NULL, NULL, "Config Options");
+ out->message(out, "cluster-options", data_set);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+
+ if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
+ if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ rc = pcmk_rc_ok;
+ }
+ }
+
+ return rc;
+}
+
+char *
+pe__node_display_name(pe_node_t *node, bool print_detail)
+{
+ char *node_name;
+ const char *node_host = NULL;
+ const char *node_id = NULL;
+ int name_len;
+
+ CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
+
+ /* Host is displayed only if this is a guest node and detail is requested */
+ if (print_detail && pe__is_guest_node(node)) {
+ const pe_resource_t *container = node->details->remote_rsc->container;
+ const pe_node_t *host_node = pe__current_node(container);
+
+ if (host_node && host_node->details) {
+ node_host = host_node->details->uname;
+ }
+ if (node_host == NULL) {
+ node_host = ""; /* so we at least get "uname@" to indicate guest */
+ }
+ }
+
+ /* Node ID is displayed if different from uname and detail is requested */
+ if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) {
+ node_id = node->details->id;
+ }
+
+ /* Determine name length */
+ name_len = strlen(node->details->uname) + 1;
+ if (node_host) {
+ name_len += strlen(node_host) + 1; /* "@node_host" */
+ }
+ if (node_id) {
+ name_len += strlen(node_id) + 3; /* + " (node_id)" */
+ }
+
+ /* Allocate and populate display name */
+ node_name = malloc(name_len);
+ CRM_ASSERT(node_name != NULL);
+ strcpy(node_name, node->details->uname);
+ if (node_host) {
+ strcat(node_name, "@");
+ strcat(node_name, node_host);
+ }
+ if (node_id) {
+ strcat(node_name, " (");
+ strcat(node_name, node_id);
+ strcat(node_name, ")");
+ }
+ return node_name;
+}
+
+int
+pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
+ , size_t pairs_count, ...)
+{
+ xmlNodePtr xml_node = NULL;
+ va_list args;
+
+ CRM_ASSERT(tag_name != NULL);
+
+ xml_node = pcmk__output_xml_peek_parent(out);
+ CRM_ASSERT(xml_node != NULL);
+ xml_node = is_list
+ ? create_xml_node(xml_node, tag_name)
+ : xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
+
+ va_start(args, pairs_count);
+ while(pairs_count--) {
+ const char *param_name = va_arg(args, const char *);
+ const char *param_value = va_arg(args, const char *);
+ if (param_name && param_value) {
+ crm_xml_add(xml_node, param_name, param_value);
+ }
+ };
+ va_end(args);
+
+ if (is_list) {
+ pcmk__output_xml_push_parent(out, xml_node);
+ }
+ return pcmk_rc_ok;
+}
+
+static const char *
+role_desc(enum rsc_role_e role)
+{
+ if (role == RSC_ROLE_PROMOTED) {
+#ifdef PCMK__COMPAT_2_0
+ return "as " RSC_ROLE_PROMOTED_LEGACY_S " ";
+#else
+ return "in " RSC_ROLE_PROMOTED_S " role ";
+#endif
+ }
+ return "";
+}
+
+PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+static int
+ban_html(pcmk__output_t *out, va_list args) {
+ pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pe__location_t *location = va_arg(args, pe__location_t *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ char *node_name = pe__node_display_name(pe_node,
+ pcmk_is_set(show_opts, pcmk_show_node_id));
+ char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
+ location->id, location->rsc_lh->id,
+ role_desc(location->role_filter), node_name);
+
+ pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
+
+ free(node_name);
+ free(buf);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+static int
+ban_text(pcmk__output_t *out, va_list args) {
+ pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pe__location_t *location = va_arg(args, pe__location_t *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ char *node_name = pe__node_display_name(pe_node,
+ pcmk_is_set(show_opts, pcmk_show_node_id));
+ out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
+ location->id, location->rsc_lh->id,
+ role_desc(location->role_filter), node_name);
+
+ free(node_name);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+static int
+ban_xml(pcmk__output_t *out, va_list args) {
+ pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pe__location_t *location = va_arg(args, pe__location_t *);
+ uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
+
+ const char *promoted_only = pcmk__btoa(location->role_filter == RSC_ROLE_PROMOTED);
+ char *weight_s = pcmk__itoa(pe_node->weight);
+
+ pcmk__output_create_xml_node(out, "ban",
+ "id", location->id,
+ "resource", location->rsc_lh->id,
+ "node", pe_node->details->uname,
+ "weight", weight_s,
+ "promoted-only", promoted_only,
+ /* This is a deprecated alias for
+ * promoted_only. Removing it will break
+ * backward compatibility of the API schema,
+ * which will require an API schema major
+ * version bump.
+ */
+ "master_only", promoted_only,
+ NULL);
+
+ free(weight_s);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("ban-list", "pe_working_set_t *", "const char *", "GList *",
+ "uint32_t", "bool")
+static int
+ban_list(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ const char *prefix = va_arg(args, const char *);
+ GList *only_rsc = va_arg(args, GList *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_spacer = va_arg(args, int);
+
+ GList *gIter, *gIter2;
+ int rc = pcmk_rc_no_output;
+
+ /* Print each ban */
+ for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
+ pe__location_t *location = gIter->data;
+ const pe_resource_t *rsc = location->rsc_lh;
+
+ if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) {
+ continue;
+ }
+
+ if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
+ pcmk__str_star_matches)
+ && !pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc, false)),
+ only_rsc, pcmk__str_star_matches)) {
+ continue;
+ }
+
+ for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
+ pe_node_t *node = (pe_node_t *) gIter2->data;
+
+ if (node->weight < 0) {
+ PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
+ out->message(out, "ban", node, location, show_opts);
+ }
+ }
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
+static int
+cluster_counts_html(pcmk__output_t *out, va_list args) {
+ unsigned int nnodes = va_arg(args, unsigned int);
+ int nresources = va_arg(args, int);
+ int ndisabled = va_arg(args, int);
+ int nblocked = va_arg(args, int);
+
+ xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL);
+ xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL);
+
+ char *nnodes_str = crm_strdup_printf("%d node%s configured",
+ nnodes, pcmk__plural_s(nnodes));
+
+ pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
+ free(nnodes_str);
+
+ if (ndisabled && nblocked) {
+ char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
+ nresources, pcmk__plural_s(nresources),
+ ndisabled);
+ pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
+ free(s);
+
+ pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
+
+ s = crm_strdup_printf(", %d ", nblocked);
+ pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
+ free(s);
+
+ pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
+ pcmk_create_html_node(resources_node, "span", NULL, NULL,
+ " from further action due to failure)");
+ } else if (ndisabled && !nblocked) {
+ char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
+ nresources, pcmk__plural_s(nresources),
+ ndisabled);
+ pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
+ free(s);
+
+ pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
+ pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
+ } else if (!ndisabled && nblocked) {
+ char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
+ nresources, pcmk__plural_s(nresources),
+ nblocked);
+ pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
+ free(s);
+
+ pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
+ pcmk_create_html_node(resources_node, "span", NULL, NULL,
+ " from further action due to failure)");
+ } else {
+ char *s = crm_strdup_printf("%d resource instance%s configured",
+ nresources, pcmk__plural_s(nresources));
+ pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
+ free(s);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
+static int
+cluster_counts_text(pcmk__output_t *out, va_list args) {
+ unsigned int nnodes = va_arg(args, unsigned int);
+ int nresources = va_arg(args, int);
+ int ndisabled = va_arg(args, int);
+ int nblocked = va_arg(args, int);
+
+ out->list_item(out, NULL, "%d node%s configured",
+ nnodes, pcmk__plural_s(nnodes));
+
+ if (ndisabled && nblocked) {
+ out->list_item(out, NULL, "%d resource instance%s configured "
+ "(%d DISABLED, %d BLOCKED from "
+ "further action due to failure)",
+ nresources, pcmk__plural_s(nresources), ndisabled,
+ nblocked);
+ } else if (ndisabled && !nblocked) {
+ out->list_item(out, NULL, "%d resource instance%s configured "
+ "(%d DISABLED)",
+ nresources, pcmk__plural_s(nresources), ndisabled);
+ } else if (!ndisabled && nblocked) {
+ out->list_item(out, NULL, "%d resource instance%s configured "
+ "(%d BLOCKED from further action "
+ "due to failure)",
+ nresources, pcmk__plural_s(nresources), nblocked);
+ } else {
+ out->list_item(out, NULL, "%d resource instance%s configured",
+ nresources, pcmk__plural_s(nresources));
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
+static int
+cluster_counts_xml(pcmk__output_t *out, va_list args) {
+ unsigned int nnodes = va_arg(args, unsigned int);
+ int nresources = va_arg(args, int);
+ int ndisabled = va_arg(args, int);
+ int nblocked = va_arg(args, int);
+
+ xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL);
+ xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL);
+
+ char *s = pcmk__itoa(nnodes);
+ crm_xml_add(nodes_node, "number", s);
+ free(s);
+
+ s = pcmk__itoa(nresources);
+ crm_xml_add(resources_node, "number", s);
+ free(s);
+
+ s = pcmk__itoa(ndisabled);
+ crm_xml_add(resources_node, "disabled", s);
+ free(s);
+
+ s = pcmk__itoa(nblocked);
+ crm_xml_add(resources_node, "blocked", s);
+ free(s);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+ "char *", "int")
+static int
+cluster_dc_html(pcmk__output_t *out, va_list args) {
+ pe_node_t *dc = va_arg(args, pe_node_t *);
+ const char *quorum = va_arg(args, const char *);
+ const char *dc_version_s = va_arg(args, const char *);
+ char *dc_name = va_arg(args, char *);
+ bool mixed_version = va_arg(args, int);
+
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
+
+ pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
+
+ if (dc) {
+ char *buf = crm_strdup_printf("%s (version %s) -", dc_name,
+ dc_version_s ? dc_version_s : "unknown");
+ pcmk_create_html_node(node, "span", NULL, NULL, buf);
+ free(buf);
+
+ if (mixed_version) {
+ pcmk_create_html_node(node, "span", NULL, "warning",
+ " MIXED-VERSION");
+ }
+ pcmk_create_html_node(node, "span", NULL, NULL, " partition");
+ if (crm_is_true(quorum)) {
+ pcmk_create_html_node(node, "span", NULL, NULL, " with");
+ } else {
+ pcmk_create_html_node(node, "span", NULL, "warning", " WITHOUT");
+ }
+ pcmk_create_html_node(node, "span", NULL, NULL, " quorum");
+ } else {
+ pcmk_create_html_node(node, "span", NULL, "warning", "NONE");
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+ "char *", "int")
+static int
+cluster_dc_text(pcmk__output_t *out, va_list args) {
+ pe_node_t *dc = va_arg(args, pe_node_t *);
+ const char *quorum = va_arg(args, const char *);
+ const char *dc_version_s = va_arg(args, const char *);
+ char *dc_name = va_arg(args, char *);
+ bool mixed_version = va_arg(args, int);
+
+ if (dc) {
+ out->list_item(out, "Current DC",
+ "%s (version %s) - %spartition %s quorum",
+ dc_name, dc_version_s ? dc_version_s : "unknown",
+ mixed_version ? "MIXED-VERSION " : "",
+ crm_is_true(quorum) ? "with" : "WITHOUT");
+ } else {
+ out->list_item(out, "Current DC", "NONE");
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+ "char *", "int")
+static int
+cluster_dc_xml(pcmk__output_t *out, va_list args) {
+ pe_node_t *dc = va_arg(args, pe_node_t *);
+ const char *quorum = va_arg(args, const char *);
+ const char *dc_version_s = va_arg(args, const char *);
+ char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
+ bool mixed_version = va_arg(args, int);
+
+ if (dc) {
+ pcmk__output_create_xml_node(out, "current_dc",
+ "present", "true",
+ "version", dc_version_s ? dc_version_s : "",
+ "name", dc->details->uname,
+ "id", dc->details->id,
+ "with_quorum", pcmk__btoa(crm_is_true(quorum)),
+ "mixed_version", pcmk__btoa(mixed_version),
+ NULL);
+ } else {
+ pcmk__output_create_xml_node(out, "current_dc",
+ "present", "false",
+ NULL);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int")
+static int
+cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
+ unsigned long long flags = va_arg(args, unsigned long long);
+
+ if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
+ pcmk__formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n");
+ return pcmk_rc_ok;
+ } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
+ pcmk__formatted_printf(out, " The cluster will keep all resources stopped\n");
+ return pcmk_rc_ok;
+ } else {
+ return pcmk_rc_no_output;
+ }
+}
+
+PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+static int
+cluster_options_html(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+
+ out->list_item(out, NULL, "STONITH of failed nodes %s",
+ pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+
+ out->list_item(out, NULL, "Cluster is %s",
+ pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+
+ switch (data_set->no_quorum_policy) {
+ case no_quorum_freeze:
+ out->list_item(out, NULL, "No quorum policy: Freeze resources");
+ break;
+
+ case no_quorum_stop:
+ out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
+ break;
+
+ case no_quorum_demote:
+ out->list_item(out, NULL, "No quorum policy: Demote promotable "
+ "resources and stop all other resources");
+ break;
+
+ case no_quorum_ignore:
+ out->list_item(out, NULL, "No quorum policy: Ignore");
+ break;
+
+ case no_quorum_suicide:
+ out->list_item(out, NULL, "No quorum policy: Suicide");
+ break;
+ }
+
+ if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
+
+ pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
+ pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
+ pcmk_create_html_node(node, "span", NULL, NULL,
+ " (the cluster will not attempt to start, stop, or recover services)");
+ } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
+
+ pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
+ pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED");
+ pcmk_create_html_node(node, "span", NULL, NULL,
+ " (the cluster will keep all resources stopped)");
+ } else {
+ out->list_item(out, NULL, "Resource management: enabled");
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+static int
+cluster_options_log(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+
+ if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ return out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
+ } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ return out->info(out, "Resource management is DISABLED. The cluster has stopped all resources.");
+ } else {
+ return pcmk_rc_no_output;
+ }
+}
+
+PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+static int
+cluster_options_text(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+
+ out->list_item(out, NULL, "STONITH of failed nodes %s",
+ pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+
+ out->list_item(out, NULL, "Cluster is %s",
+ pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+
+ switch (data_set->no_quorum_policy) {
+ case no_quorum_freeze:
+ out->list_item(out, NULL, "No quorum policy: Freeze resources");
+ break;
+
+ case no_quorum_stop:
+ out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
+ break;
+
+ case no_quorum_demote:
+ out->list_item(out, NULL, "No quorum policy: Demote promotable "
+ "resources and stop all other resources");
+ break;
+
+ case no_quorum_ignore:
+ out->list_item(out, NULL, "No quorum policy: Ignore");
+ break;
+
+ case no_quorum_suicide:
+ out->list_item(out, NULL, "No quorum policy: Suicide");
+ break;
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+static int
+cluster_options_xml(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+
+ const char *no_quorum_policy = NULL;
+ char *stonith_timeout_str = pcmk__itoa(data_set->stonith_timeout);
+ char *priority_fencing_delay_str = pcmk__itoa(data_set->priority_fencing_delay * 1000);
+
+ switch (data_set->no_quorum_policy) {
+ case no_quorum_freeze:
+ no_quorum_policy = "freeze";
+ break;
+
+ case no_quorum_stop:
+ no_quorum_policy = "stop";
+ break;
+
+ case no_quorum_demote:
+ no_quorum_policy = "demote";
+ break;
+
+ case no_quorum_ignore:
+ no_quorum_policy = "ignore";
+ break;
+
+ case no_quorum_suicide:
+ no_quorum_policy = "suicide";
+ break;
+ }
+
+ pcmk__output_create_xml_node(out, "cluster_options",
+ "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
+ "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
+ "no-quorum-policy", no_quorum_policy,
+ "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
+ "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
+ "stonith-timeout-ms", stonith_timeout_str,
+ "priority-fencing-delay-ms", priority_fencing_delay_str,
+ NULL);
+ free(stonith_timeout_str);
+ free(priority_fencing_delay_str);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
+static int
+cluster_stack_html(pcmk__output_t *out, va_list args) {
+ const char *stack_s = va_arg(args, const char *);
+ enum pcmk_pacemakerd_state pcmkd_state =
+ (enum pcmk_pacemakerd_state) va_arg(args, int);
+
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
+
+ pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
+ pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
+
+ if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
+ pcmk_create_html_node(node, "span", NULL, NULL, " (");
+ pcmk_create_html_node(node, "span", NULL, NULL,
+ pcmk__pcmkd_state_enum2friendly(pcmkd_state));
+ pcmk_create_html_node(node, "span", NULL, NULL, ")");
+ }
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
+static int
+cluster_stack_text(pcmk__output_t *out, va_list args) {
+ const char *stack_s = va_arg(args, const char *);
+ enum pcmk_pacemakerd_state pcmkd_state =
+ (enum pcmk_pacemakerd_state) va_arg(args, int);
+
+ if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
+ out->list_item(out, "Stack", "%s (%s)",
+ stack_s, pcmk__pcmkd_state_enum2friendly(pcmkd_state));
+ } else {
+ out->list_item(out, "Stack", "%s", stack_s);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
+static int
+cluster_stack_xml(pcmk__output_t *out, va_list args) {
+ const char *stack_s = va_arg(args, const char *);
+ enum pcmk_pacemakerd_state pcmkd_state =
+ (enum pcmk_pacemakerd_state) va_arg(args, int);
+
+ const char *state_s = NULL;
+
+ if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
+ state_s = pcmk_pacemakerd_api_daemon_state_enum2text(pcmkd_state);
+ }
+
+ pcmk__output_create_xml_node(out, "stack",
+ "type", stack_s,
+ "pacemakerd-state", state_s,
+ NULL);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
+ "const char *", "const char *", "const char *")
+static int
+cluster_times_html(pcmk__output_t *out, va_list args) {
+ const char *our_nodename = va_arg(args, const char *);
+ const char *last_written = va_arg(args, const char *);
+ const char *user = va_arg(args, const char *);
+ const char *client = va_arg(args, const char *);
+ const char *origin = va_arg(args, const char *);
+
+ xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL);
+ xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL);
+
+ char *time_s = pcmk__epoch2str(NULL, 0);
+
+ pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
+ pcmk_create_html_node(updated_node, "span", NULL, NULL, time_s);
+
+ if (our_nodename != NULL) {
+ pcmk_create_html_node(updated_node, "span", NULL, NULL, " on ");
+ pcmk_create_html_node(updated_node, "span", NULL, NULL, our_nodename);
+ }
+
+ free(time_s);
+ time_s = last_changed_string(last_written, user, client, origin);
+
+ pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
+ pcmk_create_html_node(changed_node, "span", NULL, NULL, time_s);
+
+ free(time_s);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
+ "const char *", "const char *", "const char *")
+static int
+cluster_times_xml(pcmk__output_t *out, va_list args) {
+ const char *our_nodename = va_arg(args, const char *);
+ const char *last_written = va_arg(args, const char *);
+ const char *user = va_arg(args, const char *);
+ const char *client = va_arg(args, const char *);
+ const char *origin = va_arg(args, const char *);
+
+ char *time_s = pcmk__epoch2str(NULL, 0);
+
+ pcmk__output_create_xml_node(out, "last_update",
+ "time", time_s,
+ "origin", our_nodename,
+ NULL);
+
+ pcmk__output_create_xml_node(out, "last_change",
+ "time", last_written ? last_written : "",
+ "user", user ? user : "",
+ "client", client ? client : "",
+ "origin", origin ? origin : "",
+ NULL);
+
+ free(time_s);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
+ "const char *", "const char *", "const char *")
+static int
+cluster_times_text(pcmk__output_t *out, va_list args) {
+ const char *our_nodename = va_arg(args, const char *);
+ const char *last_written = va_arg(args, const char *);
+ const char *user = va_arg(args, const char *);
+ const char *client = va_arg(args, const char *);
+ const char *origin = va_arg(args, const char *);
+
+ char *time_s = pcmk__epoch2str(NULL, 0);
+
+ out->list_item(out, "Last updated", "%s%s%s",
+ time_s, (our_nodename != NULL)? " on " : "",
+ pcmk__s(our_nodename, ""));
+
+ free(time_s);
+ time_s = last_changed_string(last_written, user, client, origin);
+
+ out->list_item(out, "Last change", " %s", time_s);
+
+ free(time_s);
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Display a failed action in less-technical natural language
+ *
+ * \param[in,out] out Output object to use for display
+ * \param[in] xml_op XML containing failed action
+ * \param[in] op_key Operation key of failed action
+ * \param[in] node_name Where failed action occurred
+ * \param[in] rc OCF exit code of failed action
+ * \param[in] status Execution status of failed action
+ * \param[in] exit_reason Exit reason given for failed action
+ * \param[in] exec_time String containing execution time in milliseconds
+ */
+static void
+failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op,
+ const char *op_key, const char *node_name, int rc,
+ int status, const char *exit_reason,
+ const char *exec_time)
+{
+ char *rsc_id = NULL;
+ char *task = NULL;
+ guint interval_ms = 0;
+ time_t last_change_epoch = 0;
+ GString *str = NULL;
+
+ if (pcmk__str_empty(op_key)
+ || !parse_op_key(op_key, &rsc_id, &task, &interval_ms)) {
+ rsc_id = strdup("unknown resource");
+ task = strdup("unknown action");
+ interval_ms = 0;
+ }
+ CRM_ASSERT((rsc_id != NULL) && (task != NULL));
+
+ str = g_string_sized_new(256); // Should be sufficient for most messages
+
+ pcmk__g_strcat(str, rsc_id, " ", NULL);
+
+ if (interval_ms != 0) {
+ pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ",
+ NULL);
+ }
+ pcmk__g_strcat(str, crm_action_str(task, interval_ms), " on ", node_name,
+ NULL);
+
+ if (status == PCMK_EXEC_DONE) {
+ pcmk__g_strcat(str, " returned '", services_ocf_exitcode_str(rc), "'",
+ NULL);
+ if (!pcmk__str_empty(exit_reason)) {
+ pcmk__g_strcat(str, " (", exit_reason, ")", NULL);
+ }
+
+ } else {
+ pcmk__g_strcat(str, " could not be executed (",
+ pcmk_exec_status_str(status), NULL);
+ if (!pcmk__str_empty(exit_reason)) {
+ pcmk__g_strcat(str, ": ", exit_reason, NULL);
+ }
+ g_string_append_c(str, ')');
+ }
+
+
+ if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
+ &last_change_epoch) == pcmk_ok) {
+ char *s = pcmk__epoch2str(&last_change_epoch, 0);
+
+ pcmk__g_strcat(str, " at ", s, NULL);
+ free(s);
+ }
+ if (!pcmk__str_empty(exec_time)) {
+ int exec_time_ms = 0;
+
+ if ((pcmk__scan_min_int(exec_time, &exec_time_ms, 0) == pcmk_rc_ok)
+ && (exec_time_ms > 0)) {
+
+ pcmk__g_strcat(str, " after ",
+ pcmk__readable_interval(exec_time_ms), NULL);
+ }
+ }
+
+ out->list_item(out, NULL, "%s", str->str);
+ g_string_free(str, TRUE);
+ free(rsc_id);
+ free(task);
+}
+
+/*!
+ * \internal
+ * \brief Display a failed action with technical details
+ *
+ * \param[in,out] out Output object to use for display
+ * \param[in] xml_op XML containing failed action
+ * \param[in] op_key Operation key of failed action
+ * \param[in] node_name Where failed action occurred
+ * \param[in] rc OCF exit code of failed action
+ * \param[in] status Execution status of failed action
+ * \param[in] exit_reason Exit reason given for failed action
+ * \param[in] exec_time String containing execution time in milliseconds
+ */
+static void
+failed_action_technical(pcmk__output_t *out, const xmlNode *xml_op,
+ const char *op_key, const char *node_name, int rc,
+ int status, const char *exit_reason,
+ const char *exec_time)
+{
+ const char *call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
+ const char *queue_time = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
+ const char *exit_status = services_ocf_exitcode_str(rc);
+ const char *lrm_status = pcmk_exec_status_str(status);
+ time_t last_change_epoch = 0;
+ GString *str = NULL;
+
+ if (pcmk__str_empty(op_key)) {
+ op_key = "unknown operation";
+ }
+ if (pcmk__str_empty(exit_status)) {
+ exit_status = "unknown exit status";
+ }
+ if (pcmk__str_empty(call_id)) {
+ call_id = "unknown";
+ }
+
+ str = g_string_sized_new(256);
+
+ g_string_append_printf(str, "%s on %s '%s' (%d): call=%s, status='%s'",
+ op_key, node_name, exit_status, rc, call_id,
+ lrm_status);
+
+ if (!pcmk__str_empty(exit_reason)) {
+ pcmk__g_strcat(str, ", exitreason='", exit_reason, "'", NULL);
+ }
+
+ if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
+ &last_change_epoch) == pcmk_ok) {
+ char *last_change_str = pcmk__epoch2str(&last_change_epoch, 0);
+
+ pcmk__g_strcat(str,
+ ", " XML_RSC_OP_LAST_CHANGE "="
+ "'", last_change_str, "'", NULL);
+ free(last_change_str);
+ }
+ if (!pcmk__str_empty(queue_time)) {
+ pcmk__g_strcat(str, ", queued=", queue_time, "ms", NULL);
+ }
+ if (!pcmk__str_empty(exec_time)) {
+ pcmk__g_strcat(str, ", exec=", exec_time, "ms", NULL);
+ }
+
+ out->list_item(out, NULL, "%s", str->str);
+ g_string_free(str, TRUE);
+}
+
+PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
+static int
+failed_action_default(pcmk__output_t *out, va_list args)
+{
+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ const char *op_key = pe__xe_history_key(xml_op);
+ const char *node_name = crm_element_value(xml_op, XML_ATTR_UNAME);
+ const char *exit_reason = crm_element_value(xml_op,
+ XML_LRM_ATTR_EXIT_REASON);
+ const char *exec_time = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
+
+ int rc;
+ int status;
+
+ pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
+
+ pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
+ &status, 0);
+
+ if (pcmk__str_empty(node_name)) {
+ node_name = "unknown node";
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_failed_detail)) {
+ failed_action_technical(out, xml_op, op_key, node_name, rc, status,
+ exit_reason, exec_time);
+ } else {
+ failed_action_friendly(out, xml_op, op_key, node_name, rc, status,
+ exit_reason, exec_time);
+ }
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
+static int
+failed_action_xml(pcmk__output_t *out, va_list args) {
+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
+ uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
+
+ const char *op_key = pe__xe_history_key(xml_op);
+ const char *op_key_name = "op_key";
+ int rc;
+ int status;
+ const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
+
+ time_t epoch = 0;
+ char *rc_s = NULL;
+ char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
+ xmlNodePtr node = NULL;
+
+ pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
+ pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
+ &status, 0);
+
+ rc_s = pcmk__itoa(rc);
+ if (crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY) == NULL) {
+ op_key_name = "id";
+ }
+ node = pcmk__output_create_xml_node(out, "failure",
+ op_key_name, op_key,
+ "node", crm_element_value(xml_op, XML_ATTR_UNAME),
+ "exitstatus", services_ocf_exitcode_str(rc),
+ "exitreason", pcmk__s(reason_s, ""),
+ "exitcode", rc_s,
+ "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
+ "status", pcmk_exec_status_str(status),
+ NULL);
+ free(rc_s);
+
+ if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
+ &epoch) == pcmk_ok) && (epoch > 0)) {
+ guint interval_ms = 0;
+ char *interval_ms_s = NULL;
+ char *rc_change = pcmk__epoch2str(&epoch,
+ crm_time_log_date
+ |crm_time_log_timeofday
+ |crm_time_log_with_timezone);
+
+ crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
+ interval_ms_s = crm_strdup_printf("%u", interval_ms);
+
+ pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, rc_change,
+ "queued", crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
+ "exec", crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
+ "interval", interval_ms_s,
+ "task", crm_element_value(xml_op, XML_LRM_ATTR_TASK),
+ NULL);
+
+ free(interval_ms_s);
+ free(rc_change);
+ }
+
+ free(reason_s);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("failed-action-list", "pe_working_set_t *", "GList *",
+ "GList *", "uint32_t", "bool")
+static int
+failed_action_list(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_spacer = va_arg(args, int);
+
+ xmlNode *xml_op = NULL;
+ int rc = pcmk_rc_no_output;
+
+ if (xmlChildElementCount(data_set->failed) == 0) {
+ return rc;
+ }
+
+ for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
+ xml_op = pcmk__xml_next(xml_op)) {
+ char *rsc = NULL;
+
+ if (!pcmk__str_in_list(crm_element_value(xml_op, XML_ATTR_UNAME), only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ continue;
+ }
+
+ if (pcmk_xe_mask_probe_failure(xml_op)) {
+ continue;
+ }
+
+ if (!parse_op_key(pe__xe_history_key(xml_op), &rsc, NULL, NULL)) {
+ continue;
+ }
+
+ if (!pcmk__str_in_list(rsc, only_rsc, pcmk__str_star_matches)) {
+ free(rsc);
+ continue;
+ }
+
+ free(rsc);
+
+ PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
+ out->message(out, "failed-action", xml_op, show_opts);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+static void
+status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
+{
+ int health = pe__node_health(node);
+
+ // Cluster membership
+ if (node->details->online) {
+ pcmk_create_html_node(parent, "span", NULL, "online", " online");
+ } else {
+ pcmk_create_html_node(parent, "span", NULL, "offline", " OFFLINE");
+ }
+
+ // Standby mode
+ if (node->details->standby_onfail && (node->details->running_rsc != NULL)) {
+ pcmk_create_html_node(parent, "span", NULL, "standby",
+ " (in standby due to on-fail,"
+ " with active resources)");
+ } else if (node->details->standby_onfail) {
+ pcmk_create_html_node(parent, "span", NULL, "standby",
+ " (in standby due to on-fail)");
+ } else if (node->details->standby && (node->details->running_rsc != NULL)) {
+ pcmk_create_html_node(parent, "span", NULL, "standby",
+ " (in standby, with active resources)");
+ } else if (node->details->standby) {
+ pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby)");
+ }
+
+ // Maintenance mode
+ if (node->details->maintenance) {
+ pcmk_create_html_node(parent, "span", NULL, "maint",
+ " (in maintenance mode)");
+ }
+
+ // Node health
+ if (health < 0) {
+ pcmk_create_html_node(parent, "span", NULL, "health_red",
+ " (health is RED)");
+ } else if (health == 0) {
+ pcmk_create_html_node(parent, "span", NULL, "health_yellow",
+ " (health is YELLOW)");
+ }
+
+ // Feature set
+ if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
+ const char *feature_set = get_node_feature_set(node);
+ if (feature_set != NULL) {
+ char *buf = crm_strdup_printf(", feature set %s", feature_set);
+ pcmk_create_html_node(parent, "span", NULL, NULL, buf);
+ free(buf);
+ }
+ }
+}
+
+PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool",
+ "GList *", "GList *")
+static int
+node_html(pcmk__output_t *out, va_list args) {
+ pe_node_t *node = va_arg(args, pe_node_t *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool full = va_arg(args, int);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
+
+ if (full) {
+ xmlNodePtr item_node;
+
+ if (pcmk_all_flags_set(show_opts, pcmk_show_brief | pcmk_show_rscs_by_node)) {
+ GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
+
+ out->begin_list(out, NULL, NULL, "%s:", node_name);
+ item_node = pcmk__output_xml_create_parent(out, "li", NULL);
+ pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
+ status_node(node, item_node, show_opts);
+
+ if (rscs != NULL) {
+ uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
+ out->begin_list(out, NULL, NULL, "Resources");
+ pe__rscs_brief_output(out, rscs, new_show_opts);
+ out->end_list(out);
+ }
+
+ pcmk__output_xml_pop_parent(out);
+ out->end_list(out);
+
+ } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ GList *lpc2 = NULL;
+ int rc = pcmk_rc_no_output;
+
+ out->begin_list(out, NULL, NULL, "%s:", node_name);
+ item_node = pcmk__output_xml_create_parent(out, "li", NULL);
+ pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
+ status_node(node, item_node, show_opts);
+
+ for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
+ pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources");
+
+ show_opts |= pcmk_show_rsc_only;
+ out->message(out, crm_map_element_name(rsc->xml), show_opts,
+ rsc, only_node, only_rsc);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ pcmk__output_xml_pop_parent(out);
+ out->end_list(out);
+
+ } else {
+ char *buf = crm_strdup_printf("%s:", node_name);
+
+ item_node = pcmk__output_create_xml_node(out, "li", NULL);
+ pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
+ status_node(node, item_node, show_opts);
+
+ free(buf);
+ }
+ } else {
+ out->begin_list(out, NULL, NULL, "%s:", node_name);
+ }
+
+ free(node_name);
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Get a human-friendly textual description of a node's status
+ *
+ * \param[in] node Node to check
+ *
+ * \return String representation of node's status
+ */
+static const char *
+node_text_status(const pe_node_t *node)
+{
+ if (node->details->unclean) {
+ if (node->details->online) {
+ return "UNCLEAN (online)";
+
+ } else if (node->details->pending) {
+ return "UNCLEAN (pending)";
+
+ } else {
+ return "UNCLEAN (offline)";
+ }
+
+ } else if (node->details->pending) {
+ return "pending";
+
+ } else if (node->details->standby_onfail && node->details->online) {
+ return "standby (on-fail)";
+
+ } else if (node->details->standby) {
+ if (node->details->online) {
+ if (node->details->running_rsc) {
+ return "standby (with active resources)";
+ } else {
+ return "standby";
+ }
+ } else {
+ return "OFFLINE (standby)";
+ }
+
+ } else if (node->details->maintenance) {
+ if (node->details->online) {
+ return "maintenance";
+ } else {
+ return "OFFLINE (maintenance)";
+ }
+
+ } else if (node->details->online) {
+ return "online";
+ }
+
+ return "OFFLINE";
+}
+
+PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+static int
+node_text(pcmk__output_t *out, va_list args) {
+ pe_node_t *node = va_arg(args, pe_node_t *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool full = va_arg(args, int);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ if (full) {
+ char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
+ GString *str = g_string_sized_new(64);
+ int health = pe__node_health(node);
+
+ // Create a summary line with node type, name, and status
+ if (pe__is_guest_node(node)) {
+ g_string_append(str, "GuestNode");
+ } else if (pe__is_remote_node(node)) {
+ g_string_append(str, "RemoteNode");
+ } else {
+ g_string_append(str, "Node");
+ }
+ pcmk__g_strcat(str, " ", node_name, ": ", node_text_status(node), NULL);
+
+ if (health < 0) {
+ g_string_append(str, " (health is RED)");
+ } else if (health == 0) {
+ g_string_append(str, " (health is YELLOW)");
+ }
+ if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
+ const char *feature_set = get_node_feature_set(node);
+ if (feature_set != NULL) {
+ pcmk__g_strcat(str, ", feature set ", feature_set, NULL);
+ }
+ }
+
+ /* If we're grouping by node, print its resources */
+ if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ if (pcmk_is_set(show_opts, pcmk_show_brief)) {
+ GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
+
+ if (rscs != NULL) {
+ uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
+ out->begin_list(out, NULL, NULL, "%s", str->str);
+ out->begin_list(out, NULL, NULL, "Resources");
+
+ pe__rscs_brief_output(out, rscs, new_show_opts);
+
+ out->end_list(out);
+ out->end_list(out);
+
+ g_list_free(rscs);
+ }
+
+ } else {
+ GList *gIter2 = NULL;
+
+ out->begin_list(out, NULL, NULL, "%s", str->str);
+ out->begin_list(out, NULL, NULL, "Resources");
+
+ for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
+ pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
+
+ show_opts |= pcmk_show_rsc_only;
+ out->message(out, crm_map_element_name(rsc->xml), show_opts,
+ rsc, only_node, only_rsc);
+ }
+
+ out->end_list(out);
+ out->end_list(out);
+ }
+ } else {
+ out->list_item(out, NULL, "%s", str->str);
+ }
+
+ g_string_free(str, TRUE);
+ free(node_name);
+ } else {
+ char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
+ out->begin_list(out, NULL, NULL, "Node: %s", node_name);
+ free(node_name);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+static int
+node_xml(pcmk__output_t *out, va_list args) {
+ pe_node_t *node = va_arg(args, pe_node_t *);
+ uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
+ bool full = va_arg(args, int);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ if (full) {
+ const char *node_type = "unknown";
+ char *length_s = pcmk__itoa(g_list_length(node->details->running_rsc));
+ int health = pe__node_health(node);
+ const char *health_s = NULL;
+ const char *feature_set;
+
+ switch (node->details->type) {
+ case node_member:
+ node_type = "member";
+ break;
+ case node_remote:
+ node_type = "remote";
+ break;
+ case node_ping:
+ node_type = "ping";
+ break;
+ }
+
+ if (health < 0) {
+ health_s = "red";
+ } else if (health == 0) {
+ health_s = "yellow";
+ } else {
+ health_s = "green";
+ }
+
+ feature_set = get_node_feature_set(node);
+
+ pe__name_and_nvpairs_xml(out, true, "node", 15,
+ "name", node->details->uname,
+ "id", node->details->id,
+ "online", pcmk__btoa(node->details->online),
+ "standby", pcmk__btoa(node->details->standby),
+ "standby_onfail", pcmk__btoa(node->details->standby_onfail),
+ "maintenance", pcmk__btoa(node->details->maintenance),
+ "pending", pcmk__btoa(node->details->pending),
+ "unclean", pcmk__btoa(node->details->unclean),
+ "health", health_s,
+ "feature_set", feature_set,
+ "shutdown", pcmk__btoa(node->details->shutdown),
+ "expected_up", pcmk__btoa(node->details->expected_up),
+ "is_dc", pcmk__btoa(node->details->is_dc),
+ "resources_running", length_s,
+ "type", node_type);
+
+ if (pe__is_guest_node(node)) {
+ xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
+ crm_xml_add(xml_node, "id_as_resource", node->details->remote_rsc->container->id);
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ GList *lpc = NULL;
+
+ for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
+ pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+
+ show_opts |= pcmk_show_rsc_only;
+ out->message(out, crm_map_element_name(rsc->xml), show_opts,
+ rsc, only_node, only_rsc);
+ }
+ }
+
+ free(length_s);
+
+ out->end_list(out);
+ } else {
+ pcmk__output_xml_create_parent(out, "node",
+ "name", node->details->uname,
+ NULL);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
+static int
+node_attribute_text(pcmk__output_t *out, va_list args) {
+ const char *name = va_arg(args, const char *);
+ const char *value = va_arg(args, const char *);
+ bool add_extra = va_arg(args, int);
+ int expected_score = va_arg(args, int);
+
+ if (add_extra) {
+ int v;
+
+ if (value == NULL) {
+ v = 0;
+ } else {
+ pcmk__scan_min_int(value, &v, INT_MIN);
+ }
+ if (v <= 0) {
+ out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
+ } else if (v < expected_score) {
+ out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
+ } else {
+ out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
+ }
+ } else {
+ out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
+static int
+node_attribute_html(pcmk__output_t *out, va_list args) {
+ const char *name = va_arg(args, const char *);
+ const char *value = va_arg(args, const char *);
+ bool add_extra = va_arg(args, int);
+ int expected_score = va_arg(args, int);
+
+ if (add_extra) {
+ int v;
+ char *s = crm_strdup_printf("%s: %s", name, value);
+ xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL);
+
+ if (value == NULL) {
+ v = 0;
+ } else {
+ pcmk__scan_min_int(value, &v, INT_MIN);
+ }
+
+ pcmk_create_html_node(item_node, "span", NULL, NULL, s);
+ free(s);
+
+ if (v <= 0) {
+ pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
+ } else if (v < expected_score) {
+ char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
+ pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
+ free(buf);
+ }
+ } else {
+ out->list_item(out, NULL, "%s: %s", name, value);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+static int
+node_and_op(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
+
+ pe_resource_t *rsc = NULL;
+ gchar *node_str = NULL;
+ char *last_change_str = NULL;
+
+ const char *op_rsc = crm_element_value(xml_op, "resource");
+ int status;
+ time_t last_change = 0;
+
+ pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
+ &status, PCMK_EXEC_UNKNOWN);
+
+ rsc = pe_find_resource(data_set->resources, op_rsc);
+
+ if (rsc) {
+ const pe_node_t *node = pe__current_node(rsc);
+ const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+ uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending;
+
+ if (node == NULL) {
+ node = rsc->pending_node;
+ }
+
+ node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node,
+ show_opts, target_role, false);
+ } else {
+ node_str = crm_strdup_printf("Unknown resource %s", op_rsc);
+ }
+
+ if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
+ &last_change) == pcmk_ok) {
+ last_change_str = crm_strdup_printf(", %s='%s', exec=%sms",
+ XML_RSC_OP_LAST_CHANGE,
+ pcmk__trim(ctime(&last_change)),
+ crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
+ }
+
+ out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s",
+ node_str, pe__xe_history_key(xml_op),
+ crm_element_value(xml_op, XML_ATTR_UNAME),
+ crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
+ crm_element_value(xml_op, XML_LRM_ATTR_RC),
+ last_change_str ? last_change_str : "",
+ pcmk_exec_status_str(status));
+
+ g_free(node_str);
+ free(last_change_str);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+static int
+node_and_op_xml(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
+
+ pe_resource_t *rsc = NULL;
+ const char *op_rsc = crm_element_value(xml_op, "resource");
+ int status;
+ time_t last_change = 0;
+ xmlNode *node = NULL;
+
+ pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
+ &status, PCMK_EXEC_UNKNOWN);
+ node = pcmk__output_create_xml_node(out, "operation",
+ "op", pe__xe_history_key(xml_op),
+ "node", crm_element_value(xml_op, XML_ATTR_UNAME),
+ "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
+ "rc", crm_element_value(xml_op, XML_LRM_ATTR_RC),
+ "status", pcmk_exec_status_str(status),
+ NULL);
+
+ rsc = pe_find_resource(data_set->resources, op_rsc);
+
+ if (rsc) {
+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
+ char *agent_tuple = NULL;
+
+ agent_tuple = crm_strdup_printf("%s:%s:%s", class,
+ pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "",
+ kind);
+
+ pcmk__xe_set_props(node, "rsc", rsc_printable_id(rsc),
+ "agent", agent_tuple,
+ NULL);
+ free(agent_tuple);
+ }
+
+ if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
+ &last_change) == pcmk_ok) {
+ pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE,
+ pcmk__trim(ctime(&last_change)),
+ XML_RSC_OP_T_EXEC, crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
+ NULL);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
+static int
+node_attribute_xml(pcmk__output_t *out, va_list args) {
+ const char *name = va_arg(args, const char *);
+ const char *value = va_arg(args, const char *);
+ bool add_extra = va_arg(args, int);
+ int expected_score = va_arg(args, int);
+
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute",
+ "name", name,
+ "value", value,
+ NULL);
+
+ if (add_extra) {
+ char *buf = pcmk__itoa(expected_score);
+ crm_xml_add(node, "expected", buf);
+ free(buf);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-attribute-list", "pe_working_set_t *", "uint32_t",
+ "bool", "GList *", "GList *")
+static int
+node_attribute_list(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_spacer = va_arg(args, int);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+
+ int rc = pcmk_rc_no_output;
+
+ /* Display each node's attributes */
+ for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = gIter->data;
+
+ GList *attr_list = NULL;
+ GHashTableIter iter;
+ gpointer key;
+
+ if (!node || !node->details || !node->details->online) {
+ continue;
+ }
+
+ g_hash_table_iter_init(&iter, node->details->attrs);
+ while (g_hash_table_iter_next (&iter, &key, NULL)) {
+ attr_list = filter_attr_list(attr_list, key);
+ }
+
+ if (attr_list == NULL) {
+ continue;
+ }
+
+ if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) {
+ g_list_free(attr_list);
+ continue;
+ }
+
+ PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
+
+ out->message(out, "node", node, show_opts, false, only_node, only_rsc);
+
+ for (GList *aIter = attr_list; aIter != NULL; aIter = aIter->next) {
+ const char *name = aIter->data;
+ const char *value = NULL;
+ int expected_score = 0;
+ bool add_extra = false;
+
+ value = pe_node_attribute_raw(node, name);
+
+ add_extra = add_extra_info(node, node->details->running_rsc,
+ data_set, name, &expected_score);
+
+ /* Print attribute name and value */
+ out->message(out, "node-attribute", name, value, add_extra,
+ expected_score);
+ }
+
+ g_list_free(attr_list);
+ out->end_list(out);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+static int
+node_capacity(pcmk__output_t *out, va_list args)
+{
+ const pe_node_t *node = va_arg(args, pe_node_t *);
+ const char *comment = va_arg(args, const char *);
+
+ char *dump_text = crm_strdup_printf("%s: %s capacity:",
+ comment, pe__node_name(node));
+
+ g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
+ out->list_item(out, NULL, "%s", dump_text);
+ free(dump_text);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+static int
+node_capacity_xml(pcmk__output_t *out, va_list args)
+{
+ const pe_node_t *node = va_arg(args, pe_node_t *);
+ const char *comment = va_arg(args, const char *);
+
+ xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity",
+ "node", node->details->uname,
+ "comment", comment,
+ NULL);
+ g_hash_table_foreach(node->details->utilization, add_dump_node, xml_node);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-history-list", "pe_working_set_t *", "pe_node_t *", "xmlNodePtr",
+ "GList *", "GList *", "uint32_t", "uint32_t")
+static int
+node_history_list(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pe_node_t *node = va_arg(args, pe_node_t *);
+ xmlNode *node_state = va_arg(args, xmlNode *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+ uint32_t section_opts = va_arg(args, uint32_t);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ xmlNode *lrm_rsc = NULL;
+ xmlNode *rsc_entry = NULL;
+ int rc = pcmk_rc_no_output;
+
+ lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
+ lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
+
+ /* Print history of each of the node's resources */
+ for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE);
+ rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
+ const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
+ pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
+ const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+
+ /* We can't use is_filtered here to filter group resources. For is_filtered,
+ * we have to decide whether to check the parent or not. If we check the
+ * parent, all elements of a group will always be printed because that's how
+ * is_filtered works for groups. If we do not check the parent, sometimes
+ * this will filter everything out.
+ *
+ * For other resource types, is_filtered is okay.
+ */
+ if (parent->variant == pe_group) {
+ if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
+ pcmk__str_star_matches)
+ && !pcmk__str_in_list(rsc_printable_id(parent), only_rsc,
+ pcmk__str_star_matches)) {
+ continue;
+ }
+ } else {
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ continue;
+ }
+ }
+
+ if (!pcmk_is_set(section_opts, pcmk_section_operations)) {
+ time_t last_failure = 0;
+ int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
+ NULL);
+
+ if (failcount <= 0) {
+ continue;
+ }
+
+ if (rc == pcmk_rc_no_output) {
+ rc = pcmk_rc_ok;
+ out->message(out, "node", node, show_opts, false, only_node,
+ only_rsc);
+ }
+
+ out->message(out, "resource-history", rsc, rsc_id, false,
+ failcount, last_failure, false);
+ } else {
+ GList *op_list = get_operation_list(rsc_entry);
+ pe_resource_t *rsc = pe_find_resource(data_set->resources,
+ crm_element_value(rsc_entry, XML_ATTR_ID));
+
+ if (op_list == NULL) {
+ continue;
+ }
+
+ if (rc == pcmk_rc_no_output) {
+ rc = pcmk_rc_ok;
+ out->message(out, "node", node, show_opts, false, only_node,
+ only_rsc);
+ }
+
+ out->message(out, "resource-operation-list", data_set, rsc, node,
+ op_list, show_opts);
+ }
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
+static int
+node_list_html(pcmk__output_t *out, va_list args) {
+ GList *nodes = va_arg(args, GList *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
+
+ int rc = pcmk_rc_no_output;
+
+ for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = (pe_node_t *) gIter->data;
+
+ if (!pcmk__str_in_list(node->details->uname, only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ continue;
+ }
+
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Node List");
+
+ out->message(out, "node", node, show_opts, true, only_node, only_rsc);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
+static int
+node_list_text(pcmk__output_t *out, va_list args) {
+ GList *nodes = va_arg(args, GList *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_spacer = va_arg(args, int);
+
+ /* space-separated lists of node names */
+ GString *online_nodes = NULL;
+ GString *online_remote_nodes = NULL;
+ GString *online_guest_nodes = NULL;
+ GString *offline_nodes = NULL;
+ GString *offline_remote_nodes = NULL;
+
+ int rc = pcmk_rc_no_output;
+
+ for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = (pe_node_t *) gIter->data;
+ char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
+
+ if (!pcmk__str_in_list(node->details->uname, only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ free(node_name);
+ continue;
+ }
+
+ PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node List");
+
+ // Determine whether to display node individually or in a list
+ if (node->details->unclean || node->details->pending
+ || (node->details->standby_onfail && node->details->online)
+ || node->details->standby || node->details->maintenance
+ || pcmk_is_set(show_opts, pcmk_show_rscs_by_node)
+ || pcmk_is_set(show_opts, pcmk_show_feature_set)
+ || (pe__node_health(node) <= 0)) {
+ // Display node individually
+
+ } else if (node->details->online) {
+ // Display online node in a list
+ if (pe__is_guest_node(node)) {
+ pcmk__add_word(&online_guest_nodes, 1024, node_name);
+
+ } else if (pe__is_remote_node(node)) {
+ pcmk__add_word(&online_remote_nodes, 1024, node_name);
+
+ } else {
+ pcmk__add_word(&online_nodes, 1024, node_name);
+ }
+ free(node_name);
+ continue;
+
+ } else {
+ // Display offline node in a list
+ if (pe__is_remote_node(node)) {
+ pcmk__add_word(&offline_remote_nodes, 1024, node_name);
+
+ } else if (pe__is_guest_node(node)) {
+ /* ignore offline guest nodes */
+
+ } else {
+ pcmk__add_word(&offline_nodes, 1024, node_name);
+ }
+ free(node_name);
+ continue;
+ }
+
+ /* If we get here, node is in bad state, or we're grouping by node */
+ out->message(out, "node", node, show_opts, true, only_node, only_rsc);
+ free(node_name);
+ }
+
+ /* If we're not grouping by node, summarize nodes by status */
+ if (online_nodes != NULL) {
+ out->list_item(out, "Online", "[ %s ]",
+ (const char *) online_nodes->str);
+ g_string_free(online_nodes, TRUE);
+ }
+ if (offline_nodes != NULL) {
+ out->list_item(out, "OFFLINE", "[ %s ]",
+ (const char *) offline_nodes->str);
+ g_string_free(offline_nodes, TRUE);
+ }
+ if (online_remote_nodes) {
+ out->list_item(out, "RemoteOnline", "[ %s ]",
+ (const char *) online_remote_nodes->str);
+ g_string_free(online_remote_nodes, TRUE);
+ }
+ if (offline_remote_nodes) {
+ out->list_item(out, "RemoteOFFLINE", "[ %s ]",
+ (const char *) offline_remote_nodes->str);
+ g_string_free(offline_remote_nodes, TRUE);
+ }
+ if (online_guest_nodes != NULL) {
+ out->list_item(out, "GuestOnline", "[ %s ]",
+ (const char *) online_guest_nodes->str);
+ g_string_free(online_guest_nodes, TRUE);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
+static int
+node_list_xml(pcmk__output_t *out, va_list args) {
+ GList *nodes = va_arg(args, GList *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
+
+ out->begin_list(out, NULL, NULL, "nodes");
+ for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = (pe_node_t *) gIter->data;
+
+ if (!pcmk__str_in_list(node->details->uname, only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ continue;
+ }
+
+ out->message(out, "node", node, show_opts, true, only_node, only_rsc);
+ }
+ out->end_list(out);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-summary", "pe_working_set_t *", "GList *", "GList *",
+ "uint32_t", "uint32_t", "bool")
+static int
+node_summary(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+ uint32_t section_opts = va_arg(args, uint32_t);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_spacer = va_arg(args, int);
+
+ xmlNode *node_state = NULL;
+ xmlNode *cib_status = pcmk_find_cib_element(data_set->input,
+ XML_CIB_TAG_STATUS);
+ int rc = pcmk_rc_no_output;
+
+ if (xmlChildElementCount(cib_status) == 0) {
+ return rc;
+ }
+
+ for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE);
+ node_state != NULL; node_state = crm_next_same_xml(node_state)) {
+ pe_node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
+
+ if (!node || !node->details || !node->details->online) {
+ continue;
+ }
+
+ if (!pcmk__str_in_list(node->details->uname, only_node,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ continue;
+ }
+
+ PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc,
+ pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary");
+
+ out->message(out, "node-history-list", data_set, node, node_state,
+ only_node, only_rsc, section_opts, show_opts);
+ }
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+ "const char *", "const char *")
+static int
+node_weight(pcmk__output_t *out, va_list args)
+{
+ const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const char *prefix = va_arg(args, const char *);
+ const char *uname = va_arg(args, const char *);
+ const char *score = va_arg(args, const char *);
+
+ if (rsc) {
+ out->list_item(out, NULL, "%s: %s allocation score on %s: %s",
+ prefix, rsc->id, uname, score);
+ } else {
+ out->list_item(out, NULL, "%s: %s = %s", prefix, uname, score);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+ "const char *", "const char *")
+static int
+node_weight_xml(pcmk__output_t *out, va_list args)
+{
+ const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const char *prefix = va_arg(args, const char *);
+ const char *uname = va_arg(args, const char *);
+ const char *score = va_arg(args, const char *);
+
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "node_weight",
+ "function", prefix,
+ "node", uname,
+ "score", score,
+ NULL);
+
+ if (rsc) {
+ crm_xml_add(node, "id", rsc->id);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
+static int
+op_history_text(pcmk__output_t *out, va_list args) {
+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
+ const char *task = va_arg(args, const char *);
+ const char *interval_ms_s = va_arg(args, const char *);
+ int rc = va_arg(args, int);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ char *buf = op_history_string(xml_op, task, interval_ms_s, rc,
+ pcmk_is_set(show_opts, pcmk_show_timing));
+
+ out->list_item(out, NULL, "%s", buf);
+
+ free(buf);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
+static int
+op_history_xml(pcmk__output_t *out, va_list args) {
+ xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
+ const char *task = va_arg(args, const char *);
+ const char *interval_ms_s = va_arg(args, const char *);
+ int rc = va_arg(args, int);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ char *rc_s = pcmk__itoa(rc);
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history",
+ "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
+ "task", task,
+ "rc", rc_s,
+ "rc_text", services_ocf_exitcode_str(rc),
+ NULL);
+ free(rc_s);
+
+ if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
+ char *s = crm_strdup_printf("%sms", interval_ms_s);
+ crm_xml_add(node, "interval", s);
+ free(s);
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_timing)) {
+ const char *value = NULL;
+ time_t epoch = 0;
+
+ if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
+ &epoch) == pcmk_ok) && (epoch > 0)) {
+ char *s = pcmk__epoch2str(&epoch, 0);
+ crm_xml_add(node, XML_RSC_OP_LAST_CHANGE, s);
+ free(s);
+ }
+
+ value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
+ if (value) {
+ char *s = crm_strdup_printf("%sms", value);
+ crm_xml_add(node, XML_RSC_OP_T_EXEC, s);
+ free(s);
+ }
+ value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
+ if (value) {
+ char *s = crm_strdup_printf("%sms", value);
+ crm_xml_add(node, XML_RSC_OP_T_QUEUE, s);
+ free(s);
+ }
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+static int
+promotion_score(pcmk__output_t *out, va_list args)
+{
+ pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
+ pe_node_t *chosen = va_arg(args, pe_node_t *);
+ const char *score = va_arg(args, const char *);
+
+ out->list_item(out, NULL, "%s promotion score on %s: %s",
+ child_rsc->id,
+ chosen? chosen->details->uname : "none",
+ score);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+static int
+promotion_score_xml(pcmk__output_t *out, va_list args)
+{
+ pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
+ pe_node_t *chosen = va_arg(args, pe_node_t *);
+ const char *score = va_arg(args, const char *);
+
+ xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score",
+ "id", child_rsc->id,
+ "score", score,
+ NULL);
+
+ if (chosen) {
+ crm_xml_add(node, "node", chosen->details->uname);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+static int
+resource_config(pcmk__output_t *out, va_list args) {
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ bool raw = va_arg(args, int);
+
+ char *rsc_xml = formatted_xml_buf(rsc, raw);
+
+ out->output_xml(out, "xml", rsc_xml);
+
+ free(rsc_xml);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+static int
+resource_config_text(pcmk__output_t *out, va_list args) {
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ bool raw = va_arg(args, int);
+
+ char *rsc_xml = formatted_xml_buf(rsc, raw);
+
+ pcmk__formatted_printf(out, "Resource XML:\n");
+ out->output_xml(out, "xml", rsc_xml);
+
+ free(rsc_xml);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+static int
+resource_history_text(pcmk__output_t *out, va_list args) {
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const char *rsc_id = va_arg(args, const char *);
+ bool all = va_arg(args, int);
+ int failcount = va_arg(args, int);
+ time_t last_failure = va_arg(args, time_t);
+ bool as_header = va_arg(args, int);
+
+ char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
+
+ if (as_header) {
+ out->begin_list(out, NULL, NULL, "%s", buf);
+ } else {
+ out->list_item(out, NULL, "%s", buf);
+ }
+
+ free(buf);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+static int
+resource_history_xml(pcmk__output_t *out, va_list args) {
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const char *rsc_id = va_arg(args, const char *);
+ bool all = va_arg(args, int);
+ int failcount = va_arg(args, int);
+ time_t last_failure = va_arg(args, time_t);
+ bool as_header = va_arg(args, int);
+
+ xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history",
+ "id", rsc_id,
+ NULL);
+
+ if (rsc == NULL) {
+ pcmk__xe_set_bool_attr(node, "orphan", true);
+ } else if (all || failcount || last_failure > 0) {
+ char *migration_s = pcmk__itoa(rsc->migration_threshold);
+
+ pcmk__xe_set_props(node, "orphan", "false",
+ "migration-threshold", migration_s,
+ NULL);
+ free(migration_s);
+
+ if (failcount > 0) {
+ char *s = pcmk__itoa(failcount);
+
+ crm_xml_add(node, PCMK__FAIL_COUNT_PREFIX, s);
+ free(s);
+ }
+
+ if (last_failure > 0) {
+ char *s = pcmk__epoch2str(&last_failure, 0);
+
+ crm_xml_add(node, PCMK__LAST_FAILURE_PREFIX, s);
+ free(s);
+ }
+ }
+
+ if (!as_header) {
+ pcmk__output_xml_pop_parent(out);
+ }
+
+ return pcmk_rc_ok;
+}
+
+static void
+print_resource_header(pcmk__output_t *out, uint32_t show_opts)
+{
+ if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ /* Active resources have already been printed by node */
+ out->begin_list(out, NULL, NULL, "Inactive Resources");
+ } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
+ out->begin_list(out, NULL, NULL, "Full List of Resources");
+ } else {
+ out->begin_list(out, NULL, NULL, "Active Resources");
+ }
+}
+
+
+PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "uint32_t", "bool",
+ "GList *", "GList *", "bool")
+static int
+resource_list(pcmk__output_t *out, va_list args)
+{
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+ bool print_summary = va_arg(args, int);
+ GList *only_node = va_arg(args, GList *);
+ GList *only_rsc = va_arg(args, GList *);
+ bool print_spacer = va_arg(args, int);
+
+ GList *rsc_iter;
+ int rc = pcmk_rc_no_output;
+ bool printed_header = false;
+
+ /* If we already showed active resources by node, and
+ * we're not showing inactive resources, we have nothing to do
+ */
+ if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node) &&
+ !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
+ return rc;
+ }
+
+ /* If we haven't already printed resources grouped by node,
+ * and brief output was requested, print resource summary */
+ if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
+
+ PCMK__OUTPUT_SPACER_IF(out, print_spacer);
+ print_resource_header(out, show_opts);
+ printed_header = true;
+
+ rc = pe__rscs_brief_output(out, rscs, show_opts);
+ g_list_free(rscs);
+ }
+
+ /* For each resource, display it if appropriate */
+ for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
+ int x;
+
+ /* Complex resources may have some sub-resources active and some inactive */
+ gboolean is_active = rsc->fns->active(rsc, TRUE);
+ gboolean partially_active = rsc->fns->active(rsc, FALSE);
+
+ /* Skip inactive orphans (deleted but still in CIB) */
+ if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
+ continue;
+
+ /* Skip active resources if we already displayed them by node */
+ } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ if (is_active) {
+ continue;
+ }
+
+ /* Skip primitives already counted in a brief summary */
+ } else if (pcmk_is_set(show_opts, pcmk_show_brief) && (rsc->variant == pe_native)) {
+ continue;
+
+ /* Skip resources that aren't at least partially active,
+ * unless we're displaying inactive resources
+ */
+ } else if (!partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
+ continue;
+
+ } else if (partially_active && !pe__rsc_running_on_any(rsc, only_node)) {
+ continue;
+ }
+
+ if (!printed_header) {
+ PCMK__OUTPUT_SPACER_IF(out, print_spacer);
+ print_resource_header(out, show_opts);
+ printed_header = true;
+ }
+
+ /* Print this resource */
+ x = out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc,
+ only_node, only_rsc);
+ if (x == pcmk_rc_ok) {
+ rc = pcmk_rc_ok;
+ }
+ }
+
+ if (print_summary && rc != pcmk_rc_ok) {
+ if (!printed_header) {
+ PCMK__OUTPUT_SPACER_IF(out, print_spacer);
+ print_resource_header(out, show_opts);
+ printed_header = true;
+ }
+
+ if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ out->list_item(out, NULL, "No inactive resources");
+ } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
+ out->list_item(out, NULL, "No resources");
+ } else {
+ out->list_item(out, NULL, "No active resources");
+ }
+ }
+
+ if (printed_header) {
+ out->end_list(out);
+ }
+
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("resource-operation-list", "pe_working_set_t *", "pe_resource_t *",
+ "pe_node_t *", "GList *", "uint32_t")
+static int
+resource_operation_list(pcmk__output_t *out, va_list args)
+{
+ pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *);
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pe_node_t *node = va_arg(args, pe_node_t *);
+ GList *op_list = va_arg(args, GList *);
+ uint32_t show_opts = va_arg(args, uint32_t);
+
+ GList *gIter = NULL;
+ int rc = pcmk_rc_no_output;
+
+ /* Print each operation */
+ for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
+ xmlNode *xml_op = (xmlNode *) gIter->data;
+ const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
+ const char *interval_ms_s = crm_element_value(xml_op,
+ XML_LRM_ATTR_INTERVAL_MS);
+ const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
+ int op_rc_i;
+
+ pcmk__scan_min_int(op_rc, &op_rc_i, 0);
+
+ /* Display 0-interval monitors as "probe" */
+ if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
+ task = "probe";
+ }
+
+ /* If this is the first printed operation, print heading for resource */
+ if (rc == pcmk_rc_no_output) {
+ time_t last_failure = 0;
+ int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
+ NULL);
+
+ out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true,
+ failcount, last_failure, true);
+ rc = pcmk_rc_ok;
+ }
+
+ /* Print the operation */
+ out->message(out, "op-history", xml_op, task, interval_ms_s,
+ op_rc_i, show_opts);
+ }
+
+ /* Free the list we created (no need to free the individual items) */
+ g_list_free(op_list);
+
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
+}
+
+PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+static int
+resource_util(pcmk__output_t *out, va_list args)
+{
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pe_node_t *node = va_arg(args, pe_node_t *);
+ const char *fn = va_arg(args, const char *);
+
+ char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
+ fn, rsc->id, pe__node_name(node));
+
+ g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
+ out->list_item(out, NULL, "%s", dump_text);
+ free(dump_text);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+static int
+resource_util_xml(pcmk__output_t *out, va_list args)
+{
+ pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pe_node_t *node = va_arg(args, pe_node_t *);
+ const char *fn = va_arg(args, const char *);
+
+ xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization",
+ "resource", rsc->id,
+ "node", node->details->uname,
+ "function", fn,
+ NULL);
+ g_hash_table_foreach(rsc->utilization, add_dump_node, xml_node);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+static int
+ticket_html(pcmk__output_t *out, va_list args) {
+ pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+
+ if (ticket->last_granted > -1) {
+ char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
+
+ out->list_item(out, NULL, "%s:\t%s%s %s=\"%s\"", ticket->id,
+ ticket->granted ? "granted" : "revoked",
+ ticket->standby ? " [standby]" : "",
+ "last-granted", pcmk__s(epoch_str, ""));
+ free(epoch_str);
+ } else {
+ out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
+ ticket->granted ? "granted" : "revoked",
+ ticket->standby ? " [standby]" : "");
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+static int
+ticket_text(pcmk__output_t *out, va_list args) {
+ pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+
+ if (ticket->last_granted > -1) {
+ char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
+
+ out->list_item(out, ticket->id, "%s%s %s=\"%s\"",
+ ticket->granted ? "granted" : "revoked",
+ ticket->standby ? " [standby]" : "",
+ "last-granted", pcmk__s(epoch_str, ""));
+ free(epoch_str);
+ } else {
+ out->list_item(out, ticket->id, "%s%s",
+ ticket->granted ? "granted" : "revoked",
+ ticket->standby ? " [standby]" : "");
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+static int
+ticket_xml(pcmk__output_t *out, va_list args) {
+ pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+
+ xmlNodePtr node = NULL;
+
+ node = pcmk__output_create_xml_node(out, "ticket",
+ "id", ticket->id,
+ "status", ticket->granted ? "granted" : "revoked",
+ "standby", pcmk__btoa(ticket->standby),
+ NULL);
+
+ if (ticket->last_granted > -1) {
+ char *buf = pcmk__epoch2str(&ticket->last_granted, 0);
+
+ crm_xml_add(node, "last-granted", buf);
+ free(buf);
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("ticket-list", "pe_working_set_t *", "bool")
+static int
+ticket_list(pcmk__output_t *out, va_list args) {
+ pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ bool print_spacer = va_arg(args, int);
+
+ GHashTableIter iter;
+ gpointer key, value;
+
+ if (g_hash_table_size(data_set->tickets) == 0) {
+ return pcmk_rc_no_output;
+ }
+
+ PCMK__OUTPUT_SPACER_IF(out, print_spacer);
+
+ /* Print section heading */
+ out->begin_list(out, NULL, NULL, "Tickets");
+
+ /* Print each ticket */
+ g_hash_table_iter_init(&iter, data_set->tickets);
+ while (g_hash_table_iter_next(&iter, &key, &value)) {
+ pe_ticket_t *ticket = (pe_ticket_t *) value;
+ out->message(out, "ticket", ticket);
+ }
+
+ /* Close section */
+ out->end_list(out);
+ return pcmk_rc_ok;
+}
+
+static pcmk__message_entry_t fmt_functions[] = {
+ { "ban", "default", ban_text },
+ { "ban", "html", ban_html },
+ { "ban", "xml", ban_xml },
+ { "ban-list", "default", ban_list },
+ { "bundle", "default", pe__bundle_text },
+ { "bundle", "xml", pe__bundle_xml },
+ { "bundle", "html", pe__bundle_html },
+ { "clone", "default", pe__clone_default },
+ { "clone", "xml", pe__clone_xml },
+ { "cluster-counts", "default", cluster_counts_text },
+ { "cluster-counts", "html", cluster_counts_html },
+ { "cluster-counts", "xml", cluster_counts_xml },
+ { "cluster-dc", "default", cluster_dc_text },
+ { "cluster-dc", "html", cluster_dc_html },
+ { "cluster-dc", "xml", cluster_dc_xml },
+ { "cluster-options", "default", cluster_options_text },
+ { "cluster-options", "html", cluster_options_html },
+ { "cluster-options", "log", cluster_options_log },
+ { "cluster-options", "xml", cluster_options_xml },
+ { "cluster-summary", "default", cluster_summary },
+ { "cluster-summary", "html", cluster_summary_html },
+ { "cluster-stack", "default", cluster_stack_text },
+ { "cluster-stack", "html", cluster_stack_html },
+ { "cluster-stack", "xml", cluster_stack_xml },
+ { "cluster-times", "default", cluster_times_text },
+ { "cluster-times", "html", cluster_times_html },
+ { "cluster-times", "xml", cluster_times_xml },
+ { "failed-action", "default", failed_action_default },
+ { "failed-action", "xml", failed_action_xml },
+ { "failed-action-list", "default", failed_action_list },
+ { "group", "default", pe__group_default},
+ { "group", "xml", pe__group_xml },
+ { "maint-mode", "text", cluster_maint_mode_text },
+ { "node", "default", node_text },
+ { "node", "html", node_html },
+ { "node", "xml", node_xml },
+ { "node-and-op", "default", node_and_op },
+ { "node-and-op", "xml", node_and_op_xml },
+ { "node-capacity", "default", node_capacity },
+ { "node-capacity", "xml", node_capacity_xml },
+ { "node-history-list", "default", node_history_list },
+ { "node-list", "default", node_list_text },
+ { "node-list", "html", node_list_html },
+ { "node-list", "xml", node_list_xml },
+ { "node-weight", "default", node_weight },
+ { "node-weight", "xml", node_weight_xml },
+ { "node-attribute", "default", node_attribute_text },
+ { "node-attribute", "html", node_attribute_html },
+ { "node-attribute", "xml", node_attribute_xml },
+ { "node-attribute-list", "default", node_attribute_list },
+ { "node-summary", "default", node_summary },
+ { "op-history", "default", op_history_text },
+ { "op-history", "xml", op_history_xml },
+ { "primitive", "default", pe__resource_text },
+ { "primitive", "xml", pe__resource_xml },
+ { "primitive", "html", pe__resource_html },
+ { "promotion-score", "default", promotion_score },
+ { "promotion-score", "xml", promotion_score_xml },
+ { "resource-config", "default", resource_config },
+ { "resource-config", "text", resource_config_text },
+ { "resource-history", "default", resource_history_text },
+ { "resource-history", "xml", resource_history_xml },
+ { "resource-list", "default", resource_list },
+ { "resource-operation-list", "default", resource_operation_list },
+ { "resource-util", "default", resource_util },
+ { "resource-util", "xml", resource_util_xml },
+ { "ticket", "default", ticket_text },
+ { "ticket", "html", ticket_html },
+ { "ticket", "xml", ticket_xml },
+ { "ticket-list", "default", ticket_list },
+
+ { NULL, NULL, NULL }
+};
+
+void
+pe__register_messages(pcmk__output_t *out) {
+ pcmk__register_messages(out, fmt_functions);
+}
diff --git a/lib/pengine/pe_status_private.h b/lib/pengine/pe_status_private.h
new file mode 100644
index 0000000..ae8d131
--- /dev/null
+++ b/lib/pengine/pe_status_private.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2018-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PE_STATUS_PRIVATE__H
+# define PE_STATUS_PRIVATE__H
+
+/* This header is for the sole use of libpe_status, so that functions can be
+ * declared with G_GNUC_INTERNAL for efficiency.
+ */
+
+#if defined(PCMK__UNIT_TESTING)
+#undef G_GNUC_INTERNAL
+#define G_GNUC_INTERNAL
+#endif
+
+/*!
+ * \internal
+ * \deprecated This macro will be removed in a future release
+ */
+# define status_print(fmt, args...) \
+ if(options & pe_print_html) { \
+ FILE *stream = print_data; \
+ fprintf(stream, fmt, ##args); \
+ } else if(options & pe_print_printf || options & pe_print_ncurses) { \
+ FILE *stream = print_data; \
+ fprintf(stream, fmt, ##args); \
+ } else if(options & pe_print_xml) { \
+ FILE *stream = print_data; \
+ fprintf(stream, fmt, ##args); \
+ } else if(options & pe_print_log) { \
+ int log_level = *(int*)print_data; \
+ do_crm_log(log_level, fmt, ##args); \
+ }
+
+typedef struct notify_data_s {
+ GSList *keys; // Environment variable name/value pairs
+
+ const char *action;
+
+ pe_action_t *pre;
+ pe_action_t *post;
+ pe_action_t *pre_done;
+ pe_action_t *post_done;
+
+ GList *active; /* notify_entry_t* */
+ GList *inactive; /* notify_entry_t* */
+ GList *start; /* notify_entry_t* */
+ GList *stop; /* notify_entry_t* */
+ GList *demote; /* notify_entry_t* */
+ GList *promote; /* notify_entry_t* */
+ GList *promoted; /* notify_entry_t* */
+ GList *unpromoted; /* notify_entry_t* */
+ GHashTable *allowed_nodes;
+} notify_data_t;
+
+G_GNUC_INTERNAL
+pe_resource_t *pe__create_clone_child(pe_resource_t *rsc,
+ pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+void pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data);
+
+G_GNUC_INTERNAL
+void pe__free_action_notification_data(notify_data_t *n_data);
+
+G_GNUC_INTERNAL
+notify_data_t *pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
+ pe_action_t *action,
+ pe_action_t *complete);
+
+G_GNUC_INTERNAL
+void pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
+ pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+gint pe__cmp_rsc_priority(gconstpointer a, gconstpointer b);
+
+G_GNUC_INTERNAL
+gboolean pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
+ pe_resource_t *parent, pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+gboolean unpack_remote_nodes(xmlNode *xml_resources, pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+gboolean unpack_resources(const xmlNode *xml_resources,
+ pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+gboolean unpack_config(xmlNode *config, pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+gboolean unpack_nodes(xmlNode *xml_nodes, pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+gboolean unpack_tags(xmlNode *xml_tags, pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+gboolean unpack_status(xmlNode *status, pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+op_digest_cache_t *pe__compare_fencing_digest(pe_resource_t *rsc,
+ const char *agent,
+ pe_node_t *node,
+ pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+void pe__unpack_node_health_scores(pe_working_set_t *data_set);
+
+G_GNUC_INTERNAL
+pe_node_t *pe__bundle_active_node(const pe_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
+
+#endif // PE_STATUS_PRIVATE__H
diff --git a/lib/pengine/remote.c b/lib/pengine/remote.c
new file mode 100644
index 0000000..769635f
--- /dev/null
+++ b/lib/pengine/remote.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2013-2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/pengine/internal.h>
+#include <glib.h>
+
+bool
+pe__resource_is_remote_conn(const pe_resource_t *rsc,
+ const pe_working_set_t *data_set)
+{
+ return (rsc != NULL) && rsc->is_remote_node
+ && pe__is_remote_node(pe_find_node(data_set->nodes, rsc->id));
+}
+
+bool
+pe__is_remote_node(const pe_node_t *node)
+{
+ return (node != NULL) && (node->details->type == node_remote)
+ && ((node->details->remote_rsc == NULL)
+ || (node->details->remote_rsc->container == NULL));
+}
+
+bool
+pe__is_guest_node(const pe_node_t *node)
+{
+ return (node != NULL) && (node->details->type == node_remote)
+ && (node->details->remote_rsc != NULL)
+ && (node->details->remote_rsc->container != NULL);
+}
+
+bool
+pe__is_guest_or_remote_node(const pe_node_t *node)
+{
+ return (node != NULL) && (node->details->type == node_remote);
+}
+
+bool
+pe__is_bundle_node(const pe_node_t *node)
+{
+ return pe__is_guest_node(node)
+ && pe_rsc_is_bundled(node->details->remote_rsc);
+}
+
+/*!
+ * \internal
+ * \brief Check whether a resource creates a guest node
+ *
+ * If a given resource contains a filler resource that is a remote connection,
+ * return that filler resource (or NULL if none is found).
+ *
+ * \param[in] data_set Working set of cluster
+ * \param[in] rsc Resource to check
+ *
+ * \return Filler resource with remote connection, or NULL if none found
+ */
+pe_resource_t *
+pe__resource_contains_guest_node(const pe_working_set_t *data_set,
+ const pe_resource_t *rsc)
+{
+ if ((rsc != NULL) && (data_set != NULL)
+ && pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+
+ for (GList *gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *filler = gIter->data;
+
+ if (filler->is_remote_node) {
+ return filler;
+ }
+ }
+ }
+ return NULL;
+}
+
+bool
+xml_contains_remote_node(xmlNode *xml)
+{
+ const char *value = NULL;
+
+ if (xml == NULL) {
+ return false;
+ }
+
+ value = crm_element_value(xml, XML_ATTR_TYPE);
+ if (!pcmk__str_eq(value, "remote", pcmk__str_casei)) {
+ return false;
+ }
+
+ value = crm_element_value(xml, XML_AGENT_ATTR_CLASS);
+ if (!pcmk__str_eq(value, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) {
+ return false;
+ }
+
+ value = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER);
+ if (!pcmk__str_eq(value, "pacemaker", pcmk__str_casei)) {
+ return false;
+ }
+
+ return true;
+}
+
+/*!
+ * \internal
+ * \brief Execute a supplied function for each guest node running on a host
+ *
+ * \param[in] data_set Working set for cluster
+ * \param[in] host Host node to check
+ * \param[in] helper Function to call for each guest node
+ * \param[in,out] user_data Pointer to pass to helper function
+ */
+void
+pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
+ void (*helper)(const pe_node_t*, void*), void *user_data)
+{
+ GList *iter;
+
+ CRM_CHECK(data_set && host && host->details && helper, return);
+ if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ return;
+ }
+ for (iter = host->details->running_rsc; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (rsc->is_remote_node && (rsc->container != NULL)) {
+ pe_node_t *guest_node = pe_find_node(data_set->nodes, rsc->id);
+
+ if (guest_node) {
+ (*helper)(guest_node, user_data);
+ }
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Create CIB XML for an implicit remote connection
+ *
+ * \param[in,out] parent If not NULL, use as parent XML element
+ * \param[in] uname Name of Pacemaker Remote node
+ * \param[in] container If not NULL, use this as connection container
+ * \param[in] migrateable If not NULL, use as allow-migrate value
+ * \param[in] is_managed If not NULL, use as is-managed value
+ * \param[in] start_timeout If not NULL, use as remote connect timeout
+ * \param[in] server If not NULL, use as remote server value
+ * \param[in] port If not NULL, use as remote port value
+ *
+ * \return Newly created XML
+ */
+xmlNode *
+pe_create_remote_xml(xmlNode *parent, const char *uname,
+ const char *container_id, const char *migrateable,
+ const char *is_managed, const char *start_timeout,
+ const char *server, const char *port)
+{
+ xmlNode *remote;
+ xmlNode *xml_sub;
+
+ remote = create_xml_node(parent, XML_CIB_TAG_RESOURCE);
+
+ // Add identity
+ crm_xml_add(remote, XML_ATTR_ID, uname);
+ crm_xml_add(remote, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
+ crm_xml_add(remote, XML_AGENT_ATTR_PROVIDER, "pacemaker");
+ crm_xml_add(remote, XML_ATTR_TYPE, "remote");
+
+ // Add meta-attributes
+ xml_sub = create_xml_node(remote, XML_TAG_META_SETS);
+ crm_xml_set_id(xml_sub, "%s-%s", uname, XML_TAG_META_SETS);
+ crm_create_nvpair_xml(xml_sub, NULL,
+ XML_RSC_ATTR_INTERNAL_RSC, XML_BOOLEAN_TRUE);
+ if (container_id) {
+ crm_create_nvpair_xml(xml_sub, NULL,
+ XML_RSC_ATTR_CONTAINER, container_id);
+ }
+ if (migrateable) {
+ crm_create_nvpair_xml(xml_sub, NULL,
+ XML_OP_ATTR_ALLOW_MIGRATE, migrateable);
+ }
+ if (is_managed) {
+ crm_create_nvpair_xml(xml_sub, NULL, XML_RSC_ATTR_MANAGED, is_managed);
+ }
+
+ // Add instance attributes
+ if (port || server) {
+ xml_sub = create_xml_node(remote, XML_TAG_ATTR_SETS);
+ crm_xml_set_id(xml_sub, "%s-%s", uname, XML_TAG_ATTR_SETS);
+ if (server) {
+ crm_create_nvpair_xml(xml_sub, NULL, XML_RSC_ATTR_REMOTE_RA_ADDR,
+ server);
+ }
+ if (port) {
+ crm_create_nvpair_xml(xml_sub, NULL, "port", port);
+ }
+ }
+
+ // Add operations
+ xml_sub = create_xml_node(remote, "operations");
+ crm_create_op_xml(xml_sub, uname, "monitor", "30s", "30s");
+ if (start_timeout) {
+ crm_create_op_xml(xml_sub, uname, "start", "0", start_timeout);
+ }
+ return remote;
+}
+
+// History entry to be checked for fail count clearing
+struct check_op {
+ const xmlNode *rsc_op; // History entry XML
+ pe_resource_t *rsc; // Known resource corresponding to history entry
+ pe_node_t *node; // Known node corresponding to history entry
+ enum pe_check_parameters check_type; // What needs checking
+};
+
+void
+pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
+ pe_node_t *node, enum pe_check_parameters flag,
+ pe_working_set_t *data_set)
+{
+ struct check_op *check_op = NULL;
+
+ CRM_CHECK(data_set && rsc_op && rsc && node, return);
+
+ check_op = calloc(1, sizeof(struct check_op));
+ CRM_ASSERT(check_op != NULL);
+
+ crm_trace("Deferring checks of %s until after allocation", ID(rsc_op));
+ check_op->rsc_op = rsc_op;
+ check_op->rsc = rsc;
+ check_op->node = node;
+ check_op->check_type = flag;
+ data_set->param_check = g_list_prepend(data_set->param_check, check_op);
+}
+
+/*!
+ * \internal
+ * \brief Call a function for each action to be checked for addr substitution
+ *
+ * \param[in,out] data_set Working set for cluster
+ * \param[in] cb Function to be called
+ */
+void
+pe__foreach_param_check(pe_working_set_t *data_set,
+ void (*cb)(pe_resource_t*, pe_node_t*, const xmlNode*,
+ enum pe_check_parameters))
+{
+ CRM_CHECK(data_set && cb, return);
+
+ for (GList *item = data_set->param_check; item != NULL; item = item->next) {
+ struct check_op *check_op = item->data;
+
+ cb(check_op->rsc, check_op->node, check_op->rsc_op,
+ check_op->check_type);
+ }
+}
+
+void
+pe__free_param_checks(pe_working_set_t *data_set)
+{
+ if (data_set && data_set->param_check) {
+ g_list_free_full(data_set->param_check, free);
+ data_set->param_check = NULL;
+ }
+}
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
new file mode 100644
index 0000000..7021d3c
--- /dev/null
+++ b/lib/pengine/rules.c
@@ -0,0 +1,1316 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/common/xml_internal.h>
+
+#include <glib.h>
+
+#include <crm/pengine/rules.h>
+#include <crm/pengine/rules_internal.h>
+#include <crm/pengine/internal.h>
+
+#include <sys/types.h>
+#include <regex.h>
+#include <ctype.h>
+
+CRM_TRACE_INIT_DATA(pe_rules);
+
+/*!
+ * \brief Evaluate any rules contained by given XML element
+ *
+ * \param[in,out] xml XML element to check for rules
+ * \param[in] node_hash Node attributes to use to evaluate expressions
+ * \param[in] now Time to use when evaluating expressions
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ *
+ * \return TRUE if no rules, or any of rules present is in effect, else FALSE
+ */
+gboolean
+pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
+ crm_time_t *next_change)
+{
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ return pe_eval_rules(ruleset, &rule_data, next_change);
+}
+
+gboolean
+pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
+ crm_time_t *now, crm_time_t *next_change,
+ pe_match_data_t *match_data)
+{
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = role,
+ .now = now,
+ .match_data = match_data,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ return pe_eval_expr(rule, &rule_data, next_change);
+}
+
+/*!
+ * \brief Evaluate one rule subelement (pass/fail)
+ *
+ * A rule element may contain another rule, a node attribute expression, or a
+ * date expression. Given any one of those, evaluate it and return whether it
+ * passed.
+ *
+ * \param[in,out] expr Rule subelement XML
+ * \param[in] node_hash Node attributes to use when evaluating expression
+ * \param[in] role Resource role to use when evaluating expression
+ * \param[in] now Time to use when evaluating expression
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ * \param[in] match_data If not NULL, resource back-references and params
+ *
+ * \return TRUE if expression is in effect under given conditions, else FALSE
+ */
+gboolean
+pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
+ crm_time_t *now, crm_time_t *next_change,
+ pe_match_data_t *match_data)
+{
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = role,
+ .now = now,
+ .match_data = match_data,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ return pe_eval_subexpr(expr, &rule_data, next_change);
+}
+
+enum expression_type
+find_expression_type(xmlNode * expr)
+{
+ const char *tag = NULL;
+ const char *attr = NULL;
+
+ attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
+ tag = crm_element_name(expr);
+
+ if (pcmk__str_eq(tag, PCMK_XE_DATE_EXPRESSION, pcmk__str_none)) {
+ return time_expr;
+
+ } else if (pcmk__str_eq(tag, PCMK_XE_RSC_EXPRESSION, pcmk__str_none)) {
+ return rsc_expr;
+
+ } else if (pcmk__str_eq(tag, PCMK_XE_OP_EXPRESSION, pcmk__str_none)) {
+ return op_expr;
+
+ } else if (pcmk__str_eq(tag, XML_TAG_RULE, pcmk__str_none)) {
+ return nested_rule;
+
+ } else if (!pcmk__str_eq(tag, XML_TAG_EXPRESSION, pcmk__str_none)) {
+ return not_expr;
+
+ } else if (pcmk__str_any_of(attr, CRM_ATTR_UNAME, CRM_ATTR_KIND, CRM_ATTR_ID, NULL)) {
+ return loc_expr;
+
+ } else if (pcmk__str_eq(attr, CRM_ATTR_ROLE, pcmk__str_none)) {
+ return role_expr;
+ }
+
+ return attr_expr;
+}
+
+/* As per the nethack rules:
+ *
+ * moon period = 29.53058 days ~= 30, year = 365.2422 days
+ * days moon phase advances on first day of year compared to preceding year
+ * = 365.2422 - 12*29.53058 ~= 11
+ * years in Metonic cycle (time until same phases fall on the same days of
+ * the month) = 18.6 ~= 19
+ * moon phase on first day of year (epact) ~= (11*(year%19) + 29) % 30
+ * (29 as initial condition)
+ * current phase in days = first day phase + days elapsed in year
+ * 6 moons ~= 177 days
+ * 177 ~= 8 reported phases * 22
+ * + 11/22 for rounding
+ *
+ * 0-7, with 0: new, 4: full
+ */
+
+static int
+phase_of_the_moon(const crm_time_t *now)
+{
+ uint32_t epact, diy, goldn;
+ uint32_t y;
+
+ crm_time_get_ordinal(now, &y, &diy);
+
+ goldn = (y % 19) + 1;
+ epact = (11 * goldn + 18) % 30;
+ if ((epact == 25 && goldn > 11) || epact == 24)
+ epact++;
+
+ return ((((((diy + epact) * 6) + 11) % 177) / 22) & 7);
+}
+
+static int
+check_one(const xmlNode *cron_spec, const char *xml_field, uint32_t time_field)
+{
+ int rc = pcmk_rc_undetermined;
+ const char *value = crm_element_value(cron_spec, xml_field);
+ long long low, high;
+
+ if (value == NULL) {
+ /* Return pe_date_result_undetermined if the field is missing. */
+ goto bail;
+ }
+
+ if (pcmk__parse_ll_range(value, &low, &high) != pcmk_rc_ok) {
+ goto bail;
+ } else if (low == high) {
+ /* A single number was given, not a range. */
+ if (time_field < low) {
+ rc = pcmk_rc_before_range;
+ } else if (time_field > high) {
+ rc = pcmk_rc_after_range;
+ } else {
+ rc = pcmk_rc_within_range;
+ }
+ } else if (low != -1 && high != -1) {
+ /* This is a range with both bounds. */
+ if (time_field < low) {
+ rc = pcmk_rc_before_range;
+ } else if (time_field > high) {
+ rc = pcmk_rc_after_range;
+ } else {
+ rc = pcmk_rc_within_range;
+ }
+ } else if (low == -1) {
+ /* This is a range with no starting value. */
+ rc = time_field <= high ? pcmk_rc_within_range : pcmk_rc_after_range;
+ } else if (high == -1) {
+ /* This is a range with no ending value. */
+ rc = time_field >= low ? pcmk_rc_within_range : pcmk_rc_before_range;
+ }
+
+bail:
+ if (rc == pcmk_rc_within_range) {
+ crm_debug("Condition '%s' in %s: passed", value, xml_field);
+ } else {
+ crm_debug("Condition '%s' in %s: failed", value, xml_field);
+ }
+
+ return rc;
+}
+
+static gboolean
+check_passes(int rc) {
+ /* _within_range is obvious. _undetermined is a pass because
+ * this is the return value if a field is not given. In this
+ * case, we just want to ignore it and check other fields to
+ * see if they place some restriction on what can pass.
+ */
+ return rc == pcmk_rc_within_range || rc == pcmk_rc_undetermined;
+}
+
+#define CHECK_ONE(spec, name, var) do { \
+ int subpart_rc = check_one(spec, name, var); \
+ if (check_passes(subpart_rc) == FALSE) { \
+ return subpart_rc; \
+ } \
+} while (0)
+
+int
+pe_cron_range_satisfied(const crm_time_t *now, const xmlNode *cron_spec)
+{
+ uint32_t h, m, s, y, d, w;
+
+ CRM_CHECK(now != NULL, return pcmk_rc_op_unsatisfied);
+
+ crm_time_get_gregorian(now, &y, &m, &d);
+ CHECK_ONE(cron_spec, "years", y);
+ CHECK_ONE(cron_spec, "months", m);
+ CHECK_ONE(cron_spec, "monthdays", d);
+
+ crm_time_get_timeofday(now, &h, &m, &s);
+ CHECK_ONE(cron_spec, "hours", h);
+ CHECK_ONE(cron_spec, "minutes", m);
+ CHECK_ONE(cron_spec, "seconds", s);
+
+ crm_time_get_ordinal(now, &y, &d);
+ CHECK_ONE(cron_spec, "yeardays", d);
+
+ crm_time_get_isoweek(now, &y, &w, &d);
+ CHECK_ONE(cron_spec, "weekyears", y);
+ CHECK_ONE(cron_spec, "weeks", w);
+ CHECK_ONE(cron_spec, "weekdays", d);
+
+ CHECK_ONE(cron_spec, "moon", phase_of_the_moon(now));
+ if (crm_element_value(cron_spec, "moon") != NULL) {
+ pcmk__config_warn("Support for 'moon' in date_spec elements "
+ "(such as %s) is deprecated and will be removed "
+ "in a future release of Pacemaker", ID(cron_spec));
+ }
+
+ /* If we get here, either no fields were specified (which is success), or all
+ * the fields that were specified had their conditions met (which is also a
+ * success). Thus, the result is success.
+ */
+ return pcmk_rc_ok;
+}
+
+static void
+update_field(crm_time_t *t, const xmlNode *xml, const char *attr,
+ void (*time_fn)(crm_time_t *, int))
+{
+ long long value;
+
+ if ((pcmk__scan_ll(crm_element_value(xml, attr), &value, 0LL) == pcmk_rc_ok)
+ && (value != 0LL) && (value >= INT_MIN) && (value <= INT_MAX)) {
+ time_fn(t, (int) value);
+ }
+}
+
+static crm_time_t *
+parse_xml_duration(const crm_time_t *start, const xmlNode *duration_spec)
+{
+ crm_time_t *end = pcmk_copy_time(start);
+
+ update_field(end, duration_spec, "years", crm_time_add_years);
+ update_field(end, duration_spec, "months", crm_time_add_months);
+ update_field(end, duration_spec, "weeks", crm_time_add_weeks);
+ update_field(end, duration_spec, "days", crm_time_add_days);
+ update_field(end, duration_spec, "hours", crm_time_add_hours);
+ update_field(end, duration_spec, "minutes", crm_time_add_minutes);
+ update_field(end, duration_spec, "seconds", crm_time_add_seconds);
+
+ return end;
+}
+
+// Set next_change to t if t is earlier
+static void
+crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t)
+{
+ if ((next_change != NULL) && (t != NULL)) {
+ if (!crm_time_is_defined(next_change)
+ || (crm_time_compare(t, next_change) < 0)) {
+ crm_time_set(next_change, t);
+ }
+ }
+}
+
+// Information about a block of nvpair elements
+typedef struct sorted_set_s {
+ int score; // This block's score for sorting
+ const char *name; // This block's ID
+ const char *special_name; // ID that should sort first
+ xmlNode *attr_set; // This block
+} sorted_set_t;
+
+static gint
+sort_pairs(gconstpointer a, gconstpointer b)
+{
+ const sorted_set_t *pair_a = a;
+ const sorted_set_t *pair_b = b;
+
+ if (a == NULL && b == NULL) {
+ return 0;
+ } else if (a == NULL) {
+ return 1;
+ } else if (b == NULL) {
+ return -1;
+ }
+
+ if (pcmk__str_eq(pair_a->name, pair_a->special_name, pcmk__str_casei)) {
+ return -1;
+
+ } else if (pcmk__str_eq(pair_b->name, pair_a->special_name, pcmk__str_casei)) {
+ return 1;
+ }
+
+ if (pair_a->score < pair_b->score) {
+ return 1;
+ } else if (pair_a->score > pair_b->score) {
+ return -1;
+ }
+ return 0;
+}
+
+static void
+populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlNode * top)
+{
+ const char *name = NULL;
+ const char *value = NULL;
+ const char *old_value = NULL;
+ xmlNode *list = nvpair_list;
+ xmlNode *an_attr = NULL;
+
+ name = crm_element_name(list->children);
+ if (pcmk__str_eq(XML_TAG_ATTRS, name, pcmk__str_casei)) {
+ list = list->children;
+ }
+
+ for (an_attr = pcmk__xe_first_child(list); an_attr != NULL;
+ an_attr = pcmk__xe_next(an_attr)) {
+
+ if (pcmk__str_eq((const char *)an_attr->name, XML_CIB_TAG_NVPAIR, pcmk__str_none)) {
+ xmlNode *ref_nvpair = expand_idref(an_attr, top);
+
+ name = crm_element_value(an_attr, XML_NVPAIR_ATTR_NAME);
+ if (name == NULL) {
+ name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME);
+ }
+
+ value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE);
+ if (value == NULL) {
+ value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE);
+ }
+
+ if (name == NULL || value == NULL) {
+ continue;
+ }
+
+ old_value = g_hash_table_lookup(hash, name);
+
+ if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
+ if (old_value) {
+ crm_trace("Letting %s default (removing explicit value \"%s\")",
+ name, value);
+ g_hash_table_remove(hash, name);
+ }
+ continue;
+
+ } else if (old_value == NULL) {
+ crm_trace("Setting %s=\"%s\"", name, value);
+ g_hash_table_insert(hash, strdup(name), strdup(value));
+
+ } else if (overwrite) {
+ crm_trace("Setting %s=\"%s\" (overwriting old value \"%s\")",
+ name, value, old_value);
+ g_hash_table_replace(hash, strdup(name), strdup(value));
+ }
+ }
+ }
+}
+
+typedef struct unpack_data_s {
+ gboolean overwrite;
+ void *hash;
+ crm_time_t *next_change;
+ const pe_rule_eval_data_t *rule_data;
+ xmlNode *top;
+} unpack_data_t;
+
+static void
+unpack_attr_set(gpointer data, gpointer user_data)
+{
+ sorted_set_t *pair = data;
+ unpack_data_t *unpack_data = user_data;
+
+ if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data,
+ unpack_data->next_change)) {
+ return;
+ }
+
+ crm_trace("Adding attributes from %s (score %d) %s overwrite",
+ pair->name, pair->score,
+ (unpack_data->overwrite? "with" : "without"));
+ populate_hash(pair->attr_set, unpack_data->hash, unpack_data->overwrite, unpack_data->top);
+}
+
+/*!
+ * \internal
+ * \brief Create a sorted list of nvpair blocks
+ *
+ * \param[in,out] top XML document root (used to expand id-ref's)
+ * \param[in] xml_obj XML element containing blocks of nvpair elements
+ * \param[in] set_name If not NULL, only get blocks of this element
+ * \param[in] always_first If not NULL, sort block with this ID as first
+ *
+ * \return List of sorted_set_t entries for nvpair blocks
+ */
+static GList *
+make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
+ const char *always_first)
+{
+ GList *unsorted = NULL;
+
+ if (xml_obj == NULL) {
+ return NULL;
+ }
+ for (xmlNode *attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL;
+ attr_set = pcmk__xe_next(attr_set)) {
+
+ if (pcmk__str_eq(set_name, (const char *) attr_set->name,
+ pcmk__str_null_matches)) {
+ const char *score = NULL;
+ sorted_set_t *pair = NULL;
+ xmlNode *expanded_attr_set = expand_idref(attr_set, top);
+
+ if (expanded_attr_set == NULL) {
+ // Schema (if not "none") prevents this
+ continue;
+ }
+
+ pair = calloc(1, sizeof(sorted_set_t));
+ pair->name = ID(expanded_attr_set);
+ pair->special_name = always_first;
+ pair->attr_set = expanded_attr_set;
+
+ score = crm_element_value(expanded_attr_set, XML_RULE_ATTR_SCORE);
+ pair->score = char2score(score);
+
+ unsorted = g_list_prepend(unsorted, pair);
+ }
+ }
+ return g_list_sort(unsorted, sort_pairs);
+}
+
+/*!
+ * \brief Extract nvpair blocks contained by an XML element into a hash table
+ *
+ * \param[in,out] top XML document root (used to expand id-ref's)
+ * \param[in] xml_obj XML element containing blocks of nvpair elements
+ * \param[in] set_name If not NULL, only use blocks of this element
+ * \param[in] rule_data Matching parameters to use when unpacking
+ * \param[out] hash Where to store extracted name/value pairs
+ * \param[in] always_first If not NULL, process block with this ID first
+ * \param[in] overwrite Whether to replace existing values with same name
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ */
+void
+pe_eval_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
+ const pe_rule_eval_data_t *rule_data, GHashTable *hash,
+ const char *always_first, gboolean overwrite,
+ crm_time_t *next_change)
+{
+ GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
+
+ if (pairs) {
+ unpack_data_t data = {
+ .hash = hash,
+ .overwrite = overwrite,
+ .next_change = next_change,
+ .top = top,
+ .rule_data = rule_data
+ };
+
+ g_list_foreach(pairs, unpack_attr_set, &data);
+ g_list_free_full(pairs, free);
+ }
+}
+
+/*!
+ * \brief Extract nvpair blocks contained by an XML element into a hash table
+ *
+ * \param[in,out] top XML document root (used to expand id-ref's)
+ * \param[in] xml_obj XML element containing blocks of nvpair elements
+ * \param[in] set_name Element name to identify nvpair blocks
+ * \param[in] node_hash Node attributes to use when evaluating rules
+ * \param[out] hash Where to store extracted name/value pairs
+ * \param[in] always_first If not NULL, process block with this ID first
+ * \param[in] overwrite Whether to replace existing values with same name
+ * \param[in] now Time to use when evaluating rules
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ */
+void
+pe_unpack_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
+ GHashTable *node_hash, GHashTable *hash,
+ const char *always_first, gboolean overwrite,
+ crm_time_t *now, crm_time_t *next_change)
+{
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash,
+ always_first, overwrite, next_change);
+}
+
+/*!
+ * \brief Expand any regular expression submatches (%0-%9) in a string
+ *
+ * \param[in] string String possibly containing submatch variables
+ * \param[in] match_data If not NULL, regular expression matches
+ *
+ * \return Newly allocated string identical to \p string with submatches
+ * expanded, or NULL if there were no matches
+ */
+char *
+pe_expand_re_matches(const char *string, const pe_re_match_data_t *match_data)
+{
+ size_t len = 0;
+ int i;
+ const char *p, *last_match_index;
+ char *p_dst, *result = NULL;
+
+ if (pcmk__str_empty(string) || !match_data) {
+ return NULL;
+ }
+
+ p = last_match_index = string;
+
+ while (*p) {
+ if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
+ i = *(p + 1) - '0';
+ if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
+ match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
+ len += p - last_match_index + (match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so);
+ last_match_index = p + 2;
+ }
+ p++;
+ }
+ p++;
+ }
+ len += p - last_match_index + 1;
+
+ /* FIXME: Excessive? */
+ if (len - 1 <= 0) {
+ return NULL;
+ }
+
+ p_dst = result = calloc(1, len);
+ p = string;
+
+ while (*p) {
+ if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
+ i = *(p + 1) - '0';
+ if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
+ match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
+ /* rm_eo can be equal to rm_so, but then there is nothing to do */
+ int match_len = match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so;
+ memcpy(p_dst, match_data->string + match_data->pmatch[i].rm_so, match_len);
+ p_dst += match_len;
+ }
+ p++;
+ } else {
+ *(p_dst) = *(p);
+ p_dst++;
+ }
+ p++;
+ }
+
+ return result;
+}
+
+/*!
+ * \brief Evaluate rules
+ *
+ * \param[in,out] ruleset XML possibly containing rule sub-elements
+ * \param[in] rule_data
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ *
+ * \return TRUE if there are no rules or
+ */
+gboolean
+pe_eval_rules(xmlNode *ruleset, const pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change)
+{
+ // If there are no rules, pass by default
+ gboolean ruleset_default = TRUE;
+
+ for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE);
+ rule != NULL; rule = crm_next_same_xml(rule)) {
+
+ ruleset_default = FALSE;
+ if (pe_eval_expr(rule, rule_data, next_change)) {
+ /* Only the deprecated "lifetime" element of location constraints
+ * may contain more than one rule at the top level -- the schema
+ * limits a block of nvpairs to a single top-level rule. So, this
+ * effectively means that a lifetime is active if any rule it
+ * contains is active.
+ */
+ return TRUE;
+ }
+ }
+
+ return ruleset_default;
+}
+
+/*!
+ * \brief Evaluate all of a rule's expressions
+ *
+ * \param[in,out] rule XML containing a rule definition or its id-ref
+ * \param[in] rule_data Matching parameters to check against rule
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ *
+ * \return TRUE if \p rule_data passes \p rule, otherwise FALSE
+ */
+gboolean
+pe_eval_expr(xmlNode *rule, const pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change)
+{
+ xmlNode *expr = NULL;
+ gboolean test = TRUE;
+ gboolean empty = TRUE;
+ gboolean passed = TRUE;
+ gboolean do_and = TRUE;
+ const char *value = NULL;
+
+ rule = expand_idref(rule, NULL);
+ value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP);
+ if (pcmk__str_eq(value, "or", pcmk__str_casei)) {
+ do_and = FALSE;
+ passed = FALSE;
+ }
+
+ crm_trace("Testing rule %s", ID(rule));
+ for (expr = pcmk__xe_first_child(rule); expr != NULL;
+ expr = pcmk__xe_next(expr)) {
+
+ test = pe_eval_subexpr(expr, rule_data, next_change);
+ empty = FALSE;
+
+ if (test && do_and == FALSE) {
+ crm_trace("Expression %s/%s passed", ID(rule), ID(expr));
+ return TRUE;
+
+ } else if (test == FALSE && do_and) {
+ crm_trace("Expression %s/%s failed", ID(rule), ID(expr));
+ return FALSE;
+ }
+ }
+
+ if (empty) {
+ crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule));
+ }
+
+ crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed");
+ return passed;
+}
+
+/*!
+ * \brief Evaluate a single rule expression, including any subexpressions
+ *
+ * \param[in,out] expr XML containing a rule expression
+ * \param[in] rule_data Matching parameters to check against expression
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ *
+ * \return TRUE if \p rule_data passes \p expr, otherwise FALSE
+ */
+gboolean
+pe_eval_subexpr(xmlNode *expr, const pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change)
+{
+ gboolean accept = FALSE;
+ const char *uname = NULL;
+
+ switch (find_expression_type(expr)) {
+ case nested_rule:
+ accept = pe_eval_expr(expr, rule_data, next_change);
+ break;
+ case attr_expr:
+ case loc_expr:
+ /* these expressions can never succeed if there is
+ * no node to compare with
+ */
+ if (rule_data->node_hash != NULL) {
+ accept = pe__eval_attr_expr(expr, rule_data);
+ }
+ break;
+
+ case time_expr:
+ switch (pe__eval_date_expr(expr, rule_data, next_change)) {
+ case pcmk_rc_within_range:
+ case pcmk_rc_ok:
+ accept = TRUE;
+ break;
+
+ default:
+ accept = FALSE;
+ break;
+ }
+ break;
+
+ case role_expr:
+ accept = pe__eval_role_expr(expr, rule_data);
+ break;
+
+ case rsc_expr:
+ accept = pe__eval_rsc_expr(expr, rule_data);
+ break;
+
+ case op_expr:
+ accept = pe__eval_op_expr(expr, rule_data);
+ break;
+
+ default:
+ CRM_CHECK(FALSE /* bad type */ , return FALSE);
+ accept = FALSE;
+ }
+ if (rule_data->node_hash) {
+ uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME);
+ }
+
+ crm_trace("Expression %s %s on %s",
+ ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes");
+ return accept;
+}
+
+/*!
+ * \internal
+ * \brief Compare two values in a rule's node attribute expression
+ *
+ * \param[in] l_val Value on left-hand side of comparison
+ * \param[in] r_val Value on right-hand side of comparison
+ * \param[in] type How to interpret the values (allowed values:
+ * \c "string", \c "integer", \c "number",
+ * \c "version", \c NULL)
+ * \param[in] op Type of comparison
+ *
+ * \return -1 if <tt>(l_val < r_val)</tt>,
+ * 0 if <tt>(l_val == r_val)</tt>,
+ * 1 if <tt>(l_val > r_val)</tt>
+ */
+static int
+compare_attr_expr_vals(const char *l_val, const char *r_val, const char *type,
+ const char *op)
+{
+ int cmp = 0;
+
+ if (l_val != NULL && r_val != NULL) {
+ if (type == NULL) {
+ if (pcmk__strcase_any_of(op, "lt", "lte", "gt", "gte", NULL)) {
+ if (pcmk__char_in_any_str('.', l_val, r_val, NULL)) {
+ type = "number";
+ } else {
+ type = "integer";
+ }
+
+ } else {
+ type = "string";
+ }
+ crm_trace("Defaulting to %s based comparison for '%s' op", type, op);
+ }
+
+ if (pcmk__str_eq(type, "string", pcmk__str_casei)) {
+ cmp = strcasecmp(l_val, r_val);
+
+ } else if (pcmk__str_eq(type, "integer", pcmk__str_casei)) {
+ long long l_val_num;
+ int rc1 = pcmk__scan_ll(l_val, &l_val_num, 0LL);
+
+ long long r_val_num;
+ int rc2 = pcmk__scan_ll(r_val, &r_val_num, 0LL);
+
+ if ((rc1 == pcmk_rc_ok) && (rc2 == pcmk_rc_ok)) {
+ if (l_val_num < r_val_num) {
+ cmp = -1;
+ } else if (l_val_num > r_val_num) {
+ cmp = 1;
+ } else {
+ cmp = 0;
+ }
+
+ } else {
+ crm_debug("Integer parse error. Comparing %s and %s as strings",
+ l_val, r_val);
+ cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
+ }
+
+ } else if (pcmk__str_eq(type, "number", pcmk__str_casei)) {
+ double l_val_num;
+ double r_val_num;
+
+ int rc1 = pcmk__scan_double(l_val, &l_val_num, NULL, NULL);
+ int rc2 = pcmk__scan_double(r_val, &r_val_num, NULL, NULL);
+
+ if (rc1 == pcmk_rc_ok && rc2 == pcmk_rc_ok) {
+ if (l_val_num < r_val_num) {
+ cmp = -1;
+ } else if (l_val_num > r_val_num) {
+ cmp = 1;
+ } else {
+ cmp = 0;
+ }
+
+ } else {
+ crm_debug("Floating-point parse error. Comparing %s and %s as "
+ "strings", l_val, r_val);
+ cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
+ }
+
+ } else if (pcmk__str_eq(type, "version", pcmk__str_casei)) {
+ cmp = compare_version(l_val, r_val);
+
+ }
+
+ } else if (l_val == NULL && r_val == NULL) {
+ cmp = 0;
+ } else if (r_val == NULL) {
+ cmp = 1;
+ } else { // l_val == NULL && r_val != NULL
+ cmp = -1;
+ }
+
+ return cmp;
+}
+
+/*!
+ * \internal
+ * \brief Check whether an attribute expression evaluates to \c true
+ *
+ * \param[in] l_val Value on left-hand side of comparison
+ * \param[in] r_val Value on right-hand side of comparison
+ * \param[in] type How to interpret the values (allowed values:
+ * \c "string", \c "integer", \c "number",
+ * \c "version", \c NULL)
+ * \param[in] op Type of comparison.
+ *
+ * \return \c true if expression evaluates to \c true, \c false
+ * otherwise
+ */
+static bool
+accept_attr_expr(const char *l_val, const char *r_val, const char *type,
+ const char *op)
+{
+ int cmp;
+
+ if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
+ return (l_val != NULL);
+
+ } else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
+ return (l_val == NULL);
+
+ }
+
+ cmp = compare_attr_expr_vals(l_val, r_val, type, op);
+
+ if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
+ return (cmp == 0);
+
+ } else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
+ return (cmp != 0);
+
+ } else if (l_val == NULL || r_val == NULL) {
+ // The comparison is meaningless from this point on
+ return false;
+
+ } else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
+ return (cmp < 0);
+
+ } else if (pcmk__str_eq(op, "lte", pcmk__str_casei)) {
+ return (cmp <= 0);
+
+ } else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
+ return (cmp > 0);
+
+ } else if (pcmk__str_eq(op, "gte", pcmk__str_casei)) {
+ return (cmp >= 0);
+ }
+
+ return false; // Should never reach this point
+}
+
+/*!
+ * \internal
+ * \brief Get correct value according to value-source
+ *
+ * \param[in] value value given in rule expression
+ * \param[in] value_source value-source given in rule expressions
+ * \param[in] match_data If not NULL, resource back-references and params
+ */
+static const char *
+expand_value_source(const char *value, const char *value_source,
+ const pe_match_data_t *match_data)
+{
+ GHashTable *table = NULL;
+
+ if (pcmk__str_empty(value)) {
+ return NULL; // value_source is irrelevant
+
+ } else if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) {
+ table = match_data->params;
+
+ } else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) {
+ table = match_data->meta;
+
+ } else { // literal
+ return value;
+ }
+
+ if (table == NULL) {
+ return NULL;
+ }
+ return (const char *) g_hash_table_lookup(table, value);
+}
+
+/*!
+ * \internal
+ * \brief Evaluate a node attribute expression based on #uname, #id, #kind,
+ * or a generic node attribute
+ *
+ * \param[in] expr XML of rule expression
+ * \param[in] rule_data The match_data and node_hash members are used
+ *
+ * \return TRUE if rule_data satisfies the expression, FALSE otherwise
+ */
+gboolean
+pe__eval_attr_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
+{
+ gboolean attr_allocated = FALSE;
+ const char *h_val = NULL;
+
+ const char *op = NULL;
+ const char *type = NULL;
+ const char *attr = NULL;
+ const char *value = NULL;
+ const char *value_source = NULL;
+
+ attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
+ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
+ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
+ type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
+ value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE);
+
+ if (attr == NULL) {
+ pe_err("Expression %s invalid: " XML_EXPR_ATTR_ATTRIBUTE
+ " not specified", pcmk__s(ID(expr), "without ID"));
+ return FALSE;
+ } else if (op == NULL) {
+ pe_err("Expression %s invalid: " XML_EXPR_ATTR_OPERATION
+ " not specified", pcmk__s(ID(expr), "without ID"));
+ }
+
+ if (rule_data->match_data != NULL) {
+ // Expand any regular expression submatches (%0-%9) in attribute name
+ if (rule_data->match_data->re != NULL) {
+ char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re);
+
+ if (resolved_attr != NULL) {
+ attr = (const char *) resolved_attr;
+ attr_allocated = TRUE;
+ }
+ }
+
+ // Get value appropriate to value-source
+ value = expand_value_source(value, value_source, rule_data->match_data);
+ }
+
+ if (rule_data->node_hash != NULL) {
+ h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr);
+ }
+
+ if (attr_allocated) {
+ free((char *)attr);
+ attr = NULL;
+ }
+
+ return accept_attr_expr(h_val, value, type, op);
+}
+
+/*!
+ * \internal
+ * \brief Evaluate a date_expression
+ *
+ * \param[in] expr XML of rule expression
+ * \param[in] rule_data Only the now member is used
+ * \param[out] next_change If not NULL, set to when evaluation will change
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pe__eval_date_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data,
+ crm_time_t *next_change)
+{
+ crm_time_t *start = NULL;
+ crm_time_t *end = NULL;
+ const char *value = NULL;
+ const char *op = crm_element_value(expr, "operation");
+
+ xmlNode *duration_spec = NULL;
+ xmlNode *date_spec = NULL;
+
+ // "undetermined" will also be returned for parsing errors
+ int rc = pcmk_rc_undetermined;
+
+ crm_trace("Testing expression: %s", ID(expr));
+
+ duration_spec = first_named_child(expr, "duration");
+ date_spec = first_named_child(expr, "date_spec");
+
+ value = crm_element_value(expr, "start");
+ if (value != NULL) {
+ start = crm_time_new(value);
+ }
+ value = crm_element_value(expr, "end");
+ if (value != NULL) {
+ end = crm_time_new(value);
+ }
+
+ if (start != NULL && end == NULL && duration_spec != NULL) {
+ end = parse_xml_duration(start, duration_spec);
+ }
+
+ if (pcmk__str_eq(op, "in_range", pcmk__str_null_matches | pcmk__str_casei)) {
+ if ((start == NULL) && (end == NULL)) {
+ // in_range requires at least one of start or end
+ } else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) {
+ rc = pcmk_rc_before_range;
+ crm_time_set_if_earlier(next_change, start);
+ } else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) {
+ rc = pcmk_rc_after_range;
+ } else {
+ rc = pcmk_rc_within_range;
+ if (end && next_change) {
+ // Evaluation doesn't change until second after end
+ crm_time_add_seconds(end, 1);
+ crm_time_set_if_earlier(next_change, end);
+ }
+ }
+
+ } else if (pcmk__str_eq(op, "date_spec", pcmk__str_casei)) {
+ rc = pe_cron_range_satisfied(rule_data->now, date_spec);
+ // @TODO set next_change appropriately
+
+ } else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
+ if (start == NULL) {
+ // gt requires start
+ } else if (crm_time_compare(rule_data->now, start) > 0) {
+ rc = pcmk_rc_within_range;
+ } else {
+ rc = pcmk_rc_before_range;
+
+ // Evaluation doesn't change until second after start
+ crm_time_add_seconds(start, 1);
+ crm_time_set_if_earlier(next_change, start);
+ }
+
+ } else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
+ if (end == NULL) {
+ // lt requires end
+ } else if (crm_time_compare(rule_data->now, end) < 0) {
+ rc = pcmk_rc_within_range;
+ crm_time_set_if_earlier(next_change, end);
+ } else {
+ rc = pcmk_rc_after_range;
+ }
+ }
+
+ crm_time_free(start);
+ crm_time_free(end);
+ return rc;
+}
+
+gboolean
+pe__eval_op_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
+{
+ const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME);
+ const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL);
+ guint interval;
+
+ crm_trace("Testing op_defaults expression: %s", ID(expr));
+
+ if (rule_data->op_data == NULL) {
+ crm_trace("No operations data provided");
+ return FALSE;
+ }
+
+ interval = crm_parse_interval_spec(interval_s);
+ if (interval == 0 && errno != 0) {
+ crm_trace("Could not parse interval: %s", interval_s);
+ return FALSE;
+ }
+
+ if (interval_s != NULL && interval != rule_data->op_data->interval) {
+ crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval);
+ return FALSE;
+ }
+
+ if (!pcmk__str_eq(name, rule_data->op_data->op_name, pcmk__str_none)) {
+ crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/*!
+ * \internal
+ * \brief Evaluate a node attribute expression based on #role
+ *
+ * \param[in] expr XML of rule expression
+ * \param[in] rule_data Only the role member is used
+ *
+ * \return TRUE if rule_data->role satisfies the expression, FALSE otherwise
+ */
+gboolean
+pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
+{
+ gboolean accept = FALSE;
+ const char *op = NULL;
+ const char *value = NULL;
+
+ if (rule_data->role == RSC_ROLE_UNKNOWN) {
+ return accept;
+ }
+
+ value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
+ op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
+
+ if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
+ if (rule_data->role > RSC_ROLE_STARTED) {
+ accept = TRUE;
+ }
+
+ } else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
+ if ((rule_data->role > RSC_ROLE_UNKNOWN)
+ && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ accept = TRUE;
+ }
+
+ } else if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
+ if (text2role(value) == rule_data->role) {
+ accept = TRUE;
+ }
+
+ } else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
+ // Test "ne" only with promotable clone roles
+ if ((rule_data->role > RSC_ROLE_UNKNOWN)
+ && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ accept = FALSE;
+
+ } else if (text2role(value) != rule_data->role) {
+ accept = TRUE;
+ }
+ }
+ return accept;
+}
+
+gboolean
+pe__eval_rsc_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
+{
+ const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS);
+ const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER);
+ const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
+
+ crm_trace("Testing rsc_defaults expression: %s", ID(expr));
+
+ if (rule_data->rsc_data == NULL) {
+ crm_trace("No resource data provided");
+ return FALSE;
+ }
+
+ if (class != NULL &&
+ !pcmk__str_eq(class, rule_data->rsc_data->standard, pcmk__str_none)) {
+ crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard);
+ return FALSE;
+ }
+
+ if ((provider == NULL && rule_data->rsc_data->provider != NULL) ||
+ (provider != NULL && rule_data->rsc_data->provider == NULL) ||
+ !pcmk__str_eq(provider, rule_data->rsc_data->provider, pcmk__str_none)) {
+ crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider);
+ return FALSE;
+ }
+
+ if (type != NULL &&
+ !pcmk__str_eq(type, rule_data->rsc_data->agent, pcmk__str_none)) {
+ crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+// Deprecated functions kept only for backward API compatibility
+// LCOV_EXCL_START
+
+#include <crm/pengine/rules_compat.h>
+
+gboolean
+test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now)
+{
+ return pe_evaluate_rules(ruleset, node_hash, now, NULL);
+}
+
+gboolean
+test_rule(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
+{
+ return pe_test_rule(rule, node_hash, role, now, NULL, NULL);
+}
+
+gboolean
+pe_test_rule_re(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
+{
+ pe_match_data_t match_data = {
+ .re = re_match_data,
+ .params = NULL,
+ .meta = NULL,
+ };
+ return pe_test_rule(rule, node_hash, role, now, NULL, &match_data);
+}
+
+gboolean
+pe_test_rule_full(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
+ crm_time_t *now, pe_match_data_t *match_data)
+{
+ return pe_test_rule(rule, node_hash, role, now, NULL, match_data);
+}
+
+gboolean
+test_expression(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
+{
+ return pe_test_expression(expr, node_hash, role, now, NULL, NULL);
+}
+
+gboolean
+pe_test_expression_re(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
+{
+ pe_match_data_t match_data = {
+ .re = re_match_data,
+ .params = NULL,
+ .meta = NULL,
+ };
+ return pe_test_expression(expr, node_hash, role, now, NULL, &match_data);
+}
+
+gboolean
+pe_test_expression_full(xmlNode *expr, GHashTable *node_hash,
+ enum rsc_role_e role, crm_time_t *now,
+ pe_match_data_t *match_data)
+{
+ return pe_test_expression(expr, node_hash, role, now, NULL, match_data);
+}
+
+void
+unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
+ GHashTable *node_hash, GHashTable *hash,
+ const char *always_first, gboolean overwrite,
+ crm_time_t *now)
+{
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_hash,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash, always_first,
+ overwrite, NULL);
+}
+
+// LCOV_EXCL_STOP
+// End deprecated API
diff --git a/lib/pengine/rules_alerts.c b/lib/pengine/rules_alerts.c
new file mode 100644
index 0000000..073b0c1
--- /dev/null
+++ b/lib/pengine/rules_alerts.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/pengine/rules.h>
+#include <crm/common/alerts_internal.h>
+#include <crm/common/xml_internal.h>
+#include <crm/pengine/rules_internal.h>
+
+/*!
+ * \internal
+ * \brief Unpack an alert's or alert recipient's meta attributes
+ *
+ * \param[in,out] basenode Alert or recipient XML
+ * \param[in,out] entry Where to store unpacked values
+ * \param[in,out] max_timeout Max timeout of all alerts and recipients thus far
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+get_meta_attrs_from_cib(xmlNode *basenode, pcmk__alert_t *entry,
+ guint *max_timeout)
+{
+ GHashTable *config_hash = pcmk__strkey_table(free, free);
+ crm_time_t *now = crm_time_new(NULL);
+ const char *value = NULL;
+ int rc = pcmk_rc_ok;
+
+ pe_unpack_nvpairs(basenode, basenode, XML_TAG_META_SETS, NULL, config_hash,
+ NULL, FALSE, now, NULL);
+ crm_time_free(now);
+
+ value = g_hash_table_lookup(config_hash, PCMK_META_ENABLED);
+ if ((value != NULL) && !crm_is_true(value)) {
+ // No need to continue unpacking
+ rc = pcmk_rc_disabled;
+ goto done;
+ }
+
+ value = g_hash_table_lookup(config_hash, XML_ALERT_ATTR_TIMEOUT);
+ if (value) {
+ entry->timeout = crm_get_msec(value);
+ if (entry->timeout <= 0) {
+ if (entry->timeout == 0) {
+ crm_trace("Alert %s uses default timeout of %dmsec",
+ entry->id, PCMK__ALERT_DEFAULT_TIMEOUT_MS);
+ } else {
+ crm_warn("Alert %s has invalid timeout value '%s', using default %dmsec",
+ entry->id, (char*)value, PCMK__ALERT_DEFAULT_TIMEOUT_MS);
+ }
+ entry->timeout = PCMK__ALERT_DEFAULT_TIMEOUT_MS;
+ } else {
+ crm_trace("Alert %s uses timeout of %dmsec",
+ entry->id, entry->timeout);
+ }
+ if (entry->timeout > *max_timeout) {
+ *max_timeout = entry->timeout;
+ }
+ }
+ value = g_hash_table_lookup(config_hash, XML_ALERT_ATTR_TSTAMP_FORMAT);
+ if (value) {
+ /* hard to do any checks here as merely anything can
+ * can be a valid time-format-string
+ */
+ entry->tstamp_format = strdup(value);
+ crm_trace("Alert %s uses timestamp format '%s'",
+ entry->id, entry->tstamp_format);
+ }
+
+done:
+ g_hash_table_destroy(config_hash);
+ return rc;
+}
+
+static void
+get_envvars_from_cib(xmlNode *basenode, pcmk__alert_t *entry)
+{
+ xmlNode *child;
+
+ if ((basenode == NULL) || (entry == NULL)) {
+ return;
+ }
+
+ child = first_named_child(basenode, XML_TAG_ATTR_SETS);
+ if (child == NULL) {
+ return;
+ }
+
+ if (entry->envvars == NULL) {
+ entry->envvars = pcmk__strkey_table(free, free);
+ }
+
+ for (child = first_named_child(child, XML_CIB_TAG_NVPAIR); child != NULL;
+ child = crm_next_same_xml(child)) {
+
+ const char *name = crm_element_value(child, XML_NVPAIR_ATTR_NAME);
+ const char *value = crm_element_value(child, XML_NVPAIR_ATTR_VALUE);
+
+ if (value == NULL) {
+ value = "";
+ }
+ g_hash_table_insert(entry->envvars, strdup(name), strdup(value));
+ crm_trace("Alert %s: added environment variable %s='%s'",
+ entry->id, name, value);
+ }
+}
+
+static void
+unpack_alert_filter(xmlNode *basenode, pcmk__alert_t *entry)
+{
+ xmlNode *select = first_named_child(basenode, XML_CIB_TAG_ALERT_SELECT);
+ xmlNode *event_type = NULL;
+ uint32_t flags = pcmk__alert_none;
+
+ for (event_type = pcmk__xe_first_child(select); event_type != NULL;
+ event_type = pcmk__xe_next(event_type)) {
+
+ const char *tagname = crm_element_name(event_type);
+
+ if (tagname == NULL) {
+ continue;
+
+ } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_FENCING)) {
+ flags |= pcmk__alert_fencing;
+
+ } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_NODES)) {
+ flags |= pcmk__alert_node;
+
+ } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_RESOURCES)) {
+ flags |= pcmk__alert_resource;
+
+ } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_ATTRIBUTES)) {
+ xmlNode *attr;
+ const char *attr_name;
+ int nattrs = 0;
+
+ flags |= pcmk__alert_attribute;
+ for (attr = first_named_child(event_type, XML_CIB_TAG_ALERT_ATTR);
+ attr != NULL;
+ attr = crm_next_same_xml(attr)) {
+
+ attr_name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
+ if (attr_name) {
+ if (nattrs == 0) {
+ g_strfreev(entry->select_attribute_name);
+ entry->select_attribute_name = NULL;
+ }
+ ++nattrs;
+ entry->select_attribute_name = pcmk__realloc(entry->select_attribute_name,
+ (nattrs + 1) * sizeof(char*));
+ entry->select_attribute_name[nattrs - 1] = strdup(attr_name);
+ entry->select_attribute_name[nattrs] = NULL;
+ }
+ }
+ }
+ }
+
+ if (flags != pcmk__alert_none) {
+ entry->flags = flags;
+ crm_debug("Alert %s receives events: attributes:%s%s%s%s",
+ entry->id,
+ (pcmk_is_set(flags, pcmk__alert_attribute)?
+ (entry->select_attribute_name? "some" : "all") : "none"),
+ (pcmk_is_set(flags, pcmk__alert_fencing)? " fencing" : ""),
+ (pcmk_is_set(flags, pcmk__alert_node)? " nodes" : ""),
+ (pcmk_is_set(flags, pcmk__alert_resource)? " resources" : ""));
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack an alert or an alert recipient
+ *
+ * \param[in,out] alert Alert or recipient XML
+ * \param[in,out] entry Where to store unpacked values
+ * \param[in,out] max_timeout Max timeout of all alerts and recipients thus far
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+unpack_alert(xmlNode *alert, pcmk__alert_t *entry, guint *max_timeout)
+{
+ int rc = pcmk_rc_ok;
+
+ get_envvars_from_cib(alert, entry);
+ rc = get_meta_attrs_from_cib(alert, entry, max_timeout);
+ if (rc == pcmk_rc_ok) {
+ unpack_alert_filter(alert, entry);
+ }
+ return rc;
+}
+
+/*!
+ * \internal
+ * \brief Unpack a CIB alerts section
+ *
+ * \param[in] alerts XML of alerts section
+ *
+ * \return List of unpacked alert entries
+ *
+ * \note Unlike most unpack functions, this is not used by the scheduler itself,
+ * but is supplied for use by daemons that need to send alerts.
+ */
+GList *
+pe_unpack_alerts(const xmlNode *alerts)
+{
+ xmlNode *alert;
+ pcmk__alert_t *entry;
+ guint max_timeout = 0;
+ GList *alert_list = NULL;
+
+ if (alerts == NULL) {
+ return alert_list;
+ }
+
+ for (alert = first_named_child(alerts, XML_CIB_TAG_ALERT);
+ alert != NULL; alert = crm_next_same_xml(alert)) {
+
+ xmlNode *recipient;
+ int recipients = 0;
+ const char *alert_id = ID(alert);
+ const char *alert_path = crm_element_value(alert, XML_ALERT_ATTR_PATH);
+
+ /* The schema should enforce this, but to be safe ... */
+ if ((alert_id == NULL) || (alert_path == NULL)) {
+ crm_warn("Ignoring invalid alert without id and path");
+ continue;
+ }
+
+ entry = pcmk__alert_new(alert_id, alert_path);
+
+ if (unpack_alert(alert, entry, &max_timeout) != pcmk_rc_ok) {
+ // Don't allow recipients to override if entire alert is disabled
+ crm_debug("Alert %s is disabled", entry->id);
+ pcmk__free_alert(entry);
+ continue;
+ }
+
+ if (entry->tstamp_format == NULL) {
+ entry->tstamp_format = strdup(PCMK__ALERT_DEFAULT_TSTAMP_FORMAT);
+ }
+
+ crm_debug("Alert %s: path=%s timeout=%dms tstamp-format='%s' %u vars",
+ entry->id, entry->path, entry->timeout, entry->tstamp_format,
+ (entry->envvars? g_hash_table_size(entry->envvars) : 0));
+
+ for (recipient = first_named_child(alert, XML_CIB_TAG_ALERT_RECIPIENT);
+ recipient != NULL; recipient = crm_next_same_xml(recipient)) {
+
+ pcmk__alert_t *recipient_entry = pcmk__dup_alert(entry);
+
+ recipients++;
+ recipient_entry->recipient = strdup(crm_element_value(recipient,
+ XML_ALERT_ATTR_REC_VALUE));
+
+ if (unpack_alert(recipient, recipient_entry,
+ &max_timeout) != pcmk_rc_ok) {
+ crm_debug("Alert %s: recipient %s is disabled",
+ entry->id, recipient_entry->id);
+ pcmk__free_alert(recipient_entry);
+ continue;
+ }
+ alert_list = g_list_prepend(alert_list, recipient_entry);
+ crm_debug("Alert %s has recipient %s with value %s and %d envvars",
+ entry->id, ID(recipient), recipient_entry->recipient,
+ (recipient_entry->envvars?
+ g_hash_table_size(recipient_entry->envvars) : 0));
+ }
+
+ if (recipients == 0) {
+ alert_list = g_list_prepend(alert_list, entry);
+ } else {
+ pcmk__free_alert(entry);
+ }
+ }
+ return alert_list;
+}
+
+/*!
+ * \internal
+ * \brief Free an alert list generated by pe_unpack_alerts()
+ *
+ * \param[in,out] alert_list Alert list to free
+ */
+void
+pe_free_alert_list(GList *alert_list)
+{
+ if (alert_list) {
+ g_list_free_full(alert_list, (GDestroyNotify) pcmk__free_alert);
+ }
+}
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
new file mode 100644
index 0000000..b1144eb
--- /dev/null
+++ b/lib/pengine/status.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright 2004-2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <sys/param.h>
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+
+#include <glib.h>
+
+#include <crm/pengine/internal.h>
+#include <pe_status_private.h>
+
+/*!
+ * \brief Create a new working set
+ *
+ * \return New, initialized working set on success, else NULL (and set errno)
+ * \note Only pe_working_set_t objects created with this function (as opposed
+ * to statically declared or directly allocated) should be used with the
+ * functions in this library, to allow for future extensions to the
+ * data type. The caller is responsible for freeing the memory with
+ * pe_free_working_set() when the instance is no longer needed.
+ */
+pe_working_set_t *
+pe_new_working_set(void)
+{
+ pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+
+ if (data_set != NULL) {
+ set_working_set_defaults(data_set);
+ }
+ return data_set;
+}
+
+/*!
+ * \brief Free a working set
+ *
+ * \param[in,out] data_set Working set to free
+ */
+void
+pe_free_working_set(pe_working_set_t *data_set)
+{
+ if (data_set != NULL) {
+ pe_reset_working_set(data_set);
+ data_set->priv = NULL;
+ free(data_set);
+ }
+}
+
+/*
+ * Unpack everything
+ * At the end you'll have:
+ * - A list of nodes
+ * - A list of resources (each with any dependencies on other resources)
+ * - A list of constraints between resources and nodes
+ * - A list of constraints between start/stop actions
+ * - A list of nodes that need to be stonith'd
+ * - A list of nodes that need to be shutdown
+ * - A list of the possible stop/start actions (without dependencies)
+ */
+gboolean
+cluster_status(pe_working_set_t * data_set)
+{
+ xmlNode *section = NULL;
+
+ if ((data_set == NULL) || (data_set->input == NULL)) {
+ return FALSE;
+ }
+
+ crm_trace("Beginning unpack");
+
+ if (data_set->failed != NULL) {
+ free_xml(data_set->failed);
+ }
+ data_set->failed = create_xml_node(NULL, "failed-ops");
+
+ if (data_set->now == NULL) {
+ data_set->now = crm_time_new(NULL);
+ }
+
+ if (data_set->dc_uuid == NULL) {
+ data_set->dc_uuid = crm_element_value_copy(data_set->input,
+ XML_ATTR_DC_UUID);
+ }
+
+ if (pcmk__xe_attr_is_true(data_set->input, XML_ATTR_HAVE_QUORUM)) {
+ pe__set_working_set_flags(data_set, pe_flag_have_quorum);
+ } else {
+ pe__clear_working_set_flags(data_set, pe_flag_have_quorum);
+ }
+
+ data_set->op_defaults = get_xpath_object("//" XML_CIB_TAG_OPCONFIG,
+ data_set->input, LOG_NEVER);
+ data_set->rsc_defaults = get_xpath_object("//" XML_CIB_TAG_RSCCONFIG,
+ data_set->input, LOG_NEVER);
+
+ section = get_xpath_object("//" XML_CIB_TAG_CRMCONFIG, data_set->input,
+ LOG_TRACE);
+ unpack_config(section, data_set);
+
+ if (!pcmk_any_flags_set(data_set->flags,
+ pe_flag_quick_location|pe_flag_have_quorum)
+ && (data_set->no_quorum_policy != no_quorum_ignore)) {
+ crm_warn("Fencing and resource management disabled due to lack of quorum");
+ }
+
+ section = get_xpath_object("//" XML_CIB_TAG_NODES, data_set->input,
+ LOG_TRACE);
+ unpack_nodes(section, data_set);
+
+ section = get_xpath_object("//" XML_CIB_TAG_RESOURCES, data_set->input,
+ LOG_TRACE);
+ if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ unpack_remote_nodes(section, data_set);
+ }
+ unpack_resources(section, data_set);
+
+ section = get_xpath_object("//" XML_CIB_TAG_TAGS, data_set->input,
+ LOG_NEVER);
+ unpack_tags(section, data_set);
+
+ if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ section = get_xpath_object("//"XML_CIB_TAG_STATUS, data_set->input,
+ LOG_TRACE);
+ unpack_status(section, data_set);
+ }
+
+ if (!pcmk_is_set(data_set->flags, pe_flag_no_counts)) {
+ for (GList *item = data_set->resources; item != NULL;
+ item = item->next) {
+ ((pe_resource_t *) (item->data))->fns->count(item->data);
+ }
+ crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
+ data_set->ninstances, data_set->disabled_resources,
+ data_set->blocked_resources);
+ }
+
+ pe__set_working_set_flags(data_set, pe_flag_have_status);
+ return TRUE;
+}
+
+/*!
+ * \internal
+ * \brief Free a list of pe_resource_t
+ *
+ * \param[in,out] resources List to free
+ *
+ * \note When a working set's resource list is freed, that includes the original
+ * storage for the uname and id of any Pacemaker Remote nodes in the
+ * working set's node list, so take care not to use those afterward.
+ * \todo Refactor pe_node_t to strdup() the node name.
+ */
+static void
+pe_free_resources(GList *resources)
+{
+ pe_resource_t *rsc = NULL;
+ GList *iterator = resources;
+
+ while (iterator != NULL) {
+ rsc = (pe_resource_t *) iterator->data;
+ iterator = iterator->next;
+ rsc->fns->free(rsc);
+ }
+ if (resources != NULL) {
+ g_list_free(resources);
+ }
+}
+
+static void
+pe_free_actions(GList *actions)
+{
+ GList *iterator = actions;
+
+ while (iterator != NULL) {
+ pe_free_action(iterator->data);
+ iterator = iterator->next;
+ }
+ if (actions != NULL) {
+ g_list_free(actions);
+ }
+}
+
+static void
+pe_free_nodes(GList *nodes)
+{
+ for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
+ pe_node_t *node = (pe_node_t *) iterator->data;
+
+ // Shouldn't be possible, but to be safe ...
+ if (node == NULL) {
+ continue;
+ }
+ if (node->details == NULL) {
+ free(node);
+ continue;
+ }
+
+ /* This is called after pe_free_resources(), which means that we can't
+ * use node->details->uname for Pacemaker Remote nodes.
+ */
+ crm_trace("Freeing node %s", (pe__is_guest_or_remote_node(node)?
+ "(guest or remote)" : pe__node_name(node)));
+
+ if (node->details->attrs != NULL) {
+ g_hash_table_destroy(node->details->attrs);
+ }
+ if (node->details->utilization != NULL) {
+ g_hash_table_destroy(node->details->utilization);
+ }
+ if (node->details->digest_cache != NULL) {
+ g_hash_table_destroy(node->details->digest_cache);
+ }
+ g_list_free(node->details->running_rsc);
+ g_list_free(node->details->allocated_rsc);
+ free(node->details);
+ free(node);
+ }
+ if (nodes != NULL) {
+ g_list_free(nodes);
+ }
+}
+
+static void
+pe__free_ordering(GList *constraints)
+{
+ GList *iterator = constraints;
+
+ while (iterator != NULL) {
+ pe__ordering_t *order = iterator->data;
+
+ iterator = iterator->next;
+
+ free(order->lh_action_task);
+ free(order->rh_action_task);
+ free(order);
+ }
+ if (constraints != NULL) {
+ g_list_free(constraints);
+ }
+}
+
+static void
+pe__free_location(GList *constraints)
+{
+ GList *iterator = constraints;
+
+ while (iterator != NULL) {
+ pe__location_t *cons = iterator->data;
+
+ iterator = iterator->next;
+
+ g_list_free_full(cons->node_list_rh, free);
+ free(cons->id);
+ free(cons);
+ }
+ if (constraints != NULL) {
+ g_list_free(constraints);
+ }
+}
+
+/*!
+ * \brief Reset working set to default state without freeing it or constraints
+ *
+ * \param[in,out] data_set Working set to reset
+ *
+ * \deprecated This function is deprecated as part of the API;
+ * pe_reset_working_set() should be used instead.
+ */
+void
+cleanup_calculations(pe_working_set_t * data_set)
+{
+ if (data_set == NULL) {
+ return;
+ }
+
+ pe__clear_working_set_flags(data_set, pe_flag_have_status);
+ if (data_set->config_hash != NULL) {
+ g_hash_table_destroy(data_set->config_hash);
+ }
+
+ if (data_set->singletons != NULL) {
+ g_hash_table_destroy(data_set->singletons);
+ }
+
+ if (data_set->tickets) {
+ g_hash_table_destroy(data_set->tickets);
+ }
+
+ if (data_set->template_rsc_sets) {
+ g_hash_table_destroy(data_set->template_rsc_sets);
+ }
+
+ if (data_set->tags) {
+ g_hash_table_destroy(data_set->tags);
+ }
+
+ free(data_set->dc_uuid);
+
+ crm_trace("deleting resources");
+ pe_free_resources(data_set->resources);
+
+ crm_trace("deleting actions");
+ pe_free_actions(data_set->actions);
+
+ crm_trace("deleting nodes");
+ pe_free_nodes(data_set->nodes);
+
+ pe__free_param_checks(data_set);
+ g_list_free(data_set->stop_needed);
+ free_xml(data_set->graph);
+ crm_time_free(data_set->now);
+ free_xml(data_set->input);
+ free_xml(data_set->failed);
+
+ set_working_set_defaults(data_set);
+
+ CRM_CHECK(data_set->ordering_constraints == NULL,;
+ );
+ CRM_CHECK(data_set->placement_constraints == NULL,;
+ );
+}
+
+/*!
+ * \brief Reset a working set to default state without freeing it
+ *
+ * \param[in,out] data_set Working set to reset
+ */
+void
+pe_reset_working_set(pe_working_set_t *data_set)
+{
+ if (data_set == NULL) {
+ return;
+ }
+
+ crm_trace("Deleting %d ordering constraints",
+ g_list_length(data_set->ordering_constraints));
+ pe__free_ordering(data_set->ordering_constraints);
+ data_set->ordering_constraints = NULL;
+
+ crm_trace("Deleting %d location constraints",
+ g_list_length(data_set->placement_constraints));
+ pe__free_location(data_set->placement_constraints);
+ data_set->placement_constraints = NULL;
+
+ crm_trace("Deleting %d colocation constraints",
+ g_list_length(data_set->colocation_constraints));
+ g_list_free_full(data_set->colocation_constraints, free);
+ data_set->colocation_constraints = NULL;
+
+ crm_trace("Deleting %d ticket constraints",
+ g_list_length(data_set->ticket_constraints));
+ g_list_free_full(data_set->ticket_constraints, free);
+ data_set->ticket_constraints = NULL;
+
+ cleanup_calculations(data_set);
+}
+
+void
+set_working_set_defaults(pe_working_set_t * data_set)
+{
+ void *priv = data_set->priv;
+
+ memset(data_set, 0, sizeof(pe_working_set_t));
+
+ data_set->priv = priv;
+ data_set->order_id = 1;
+ data_set->action_id = 1;
+ data_set->no_quorum_policy = no_quorum_stop;
+
+ data_set->flags = 0x0ULL;
+
+ pe__set_working_set_flags(data_set,
+ pe_flag_stop_rsc_orphans
+ |pe_flag_symmetric_cluster
+ |pe_flag_stop_action_orphans);
+ if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
+ pe__set_working_set_flags(data_set, pe_flag_concurrent_fencing);
+ }
+}
+
+pe_resource_t *
+pe_find_resource(GList *rsc_list, const char *id)
+{
+ return pe_find_resource_with_flags(rsc_list, id, pe_find_renamed);
+}
+
+pe_resource_t *
+pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
+{
+ GList *rIter = NULL;
+
+ for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
+ pe_resource_t *parent = rIter->data;
+
+ pe_resource_t *match =
+ parent->fns->find_rsc(parent, id, NULL, flags);
+ if (match != NULL) {
+ return match;
+ }
+ }
+ crm_trace("No match for %s", id);
+ return NULL;
+}
+
+/*!
+ * \brief Find a node by name or ID in a list of nodes
+ *
+ * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] id If not NULL, ID of node to find
+ * \param[in] node_name If not NULL, name of node to find
+ *
+ * \return Node from \p nodes that matches \p id if any,
+ * otherwise node from \p nodes that matches \p uname if any,
+ * otherwise NULL
+ */
+pe_node_t *
+pe_find_node_any(const GList *nodes, const char *id, const char *uname)
+{
+ pe_node_t *match = NULL;
+
+ if (id != NULL) {
+ match = pe_find_node_id(nodes, id);
+ }
+ if ((match == NULL) && (uname != NULL)) {
+ match = pe_find_node(nodes, uname);
+ }
+ return match;
+}
+
+/*!
+ * \brief Find a node by ID in a list of nodes
+ *
+ * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] id ID of node to find
+ *
+ * \return Node from \p nodes that matches \p id if any, otherwise NULL
+ */
+pe_node_t *
+pe_find_node_id(const GList *nodes, const char *id)
+{
+ for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
+ pe_node_t *node = (pe_node_t *) iter->data;
+
+ /* @TODO Whether node IDs should be considered case-sensitive should
+ * probably depend on the node type, so functionizing the comparison
+ * would be worthwhile
+ */
+ if (pcmk__str_eq(node->details->id, id, pcmk__str_casei)) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+/*!
+ * \brief Find a node by name in a list of nodes
+ *
+ * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] node_name Name of node to find
+ *
+ * \return Node from \p nodes that matches \p node_name if any, otherwise NULL
+ */
+pe_node_t *
+pe_find_node(const GList *nodes, const char *node_name)
+{
+ for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
+ pe_node_t *node = (pe_node_t *) iter->data;
+
+ if (pcmk__str_eq(node->details->uname, node_name, pcmk__str_casei)) {
+ return node;
+ }
+ }
+ return NULL;
+}
diff --git a/lib/pengine/tags.c b/lib/pengine/tags.c
new file mode 100644
index 0000000..81c27e4
--- /dev/null
+++ b/lib/pengine/tags.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2020-2021 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <glib.h>
+#include <stdbool.h>
+
+#include <crm/common/util.h>
+#include <crm/pengine/internal.h>
+#include <crm/pengine/pe_types.h>
+
+GList *
+pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name)
+{
+ gpointer value;
+ GList *retval = NULL;
+
+ if (data_set->tags == NULL) {
+ return retval;
+ }
+
+ value = g_hash_table_lookup(data_set->tags, tag_name);
+
+ if (value == NULL) {
+ return retval;
+ }
+
+ for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ const char *id = (const char *) refs->data;
+ pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, id,
+ pe_find_renamed|pe_find_any);
+
+ if (!rsc) {
+ continue;
+ }
+
+ retval = g_list_append(retval, strdup(rsc_printable_id(rsc)));
+ }
+
+ return retval;
+}
+
+GList *
+pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name)
+{
+ gpointer value;
+ GList *retval = NULL;
+
+ if (data_set->tags == NULL) {
+ return retval;
+ }
+
+ value = g_hash_table_lookup(data_set->tags, tag_name);
+
+ if (value == NULL) {
+ return retval;
+ }
+
+ /* Iterate over the list of node IDs. */
+ for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ /* Find the node that has this ID. */
+ const char *id = (const char *) refs->data;
+ pe_node_t *node = pe_find_node_id(data_set->nodes, id);
+
+ if (!node) {
+ continue;
+ }
+
+ /* Get the uname for the node and add it to the return list. */
+ retval = g_list_append(retval, strdup(node->details->uname));
+ }
+
+ return retval;
+}
+
+bool
+pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc_name, const char *tag_name)
+{
+ GList *rscs = pe__rscs_with_tag(data_set, tag_name);
+ bool retval = false;
+
+ if (rscs == NULL) {
+ return retval;
+ }
+
+ retval = g_list_find_custom(rscs, rsc_name, (GCompareFunc) strcmp) != NULL;
+ g_list_free_full(rscs, free);
+ return retval;
+}
+
+bool
+pe__uname_has_tag(pe_working_set_t *data_set, const char *node_name, const char *tag_name)
+{
+ GList *unames = pe__unames_with_tag(data_set, tag_name);
+ bool retval = false;
+
+ if (unames == NULL) {
+ return retval;
+ }
+
+ retval = g_list_find_custom(unames, node_name, (GCompareFunc) strcmp) != NULL;
+ g_list_free_full(unames, free);
+ return retval;
+}
diff --git a/lib/pengine/tests/Makefile.am b/lib/pengine/tests/Makefile.am
new file mode 100644
index 0000000..4986ef2
--- /dev/null
+++ b/lib/pengine/tests/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = rules native status unpack utils
diff --git a/lib/pengine/tests/native/Makefile.am b/lib/pengine/tests/native/Makefile.am
new file mode 100644
index 0000000..5046ff1
--- /dev/null
+++ b/lib/pengine/tests/native/Makefile.am
@@ -0,0 +1,22 @@
+#
+# Copyright 2022 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+include $(top_srcdir)/mk/tap.mk
+include $(top_srcdir)/mk/unittest.mk
+
+AM_CPPFLAGS += -I$(top_srcdir)
+LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
+
+AM_TESTS_ENVIRONMENT += PCMK_CTS_CLI_DIR=$(top_srcdir)/cts/cli
+
+# Add "_test" to the end of all test program names to simplify .gitignore.
+check_PROGRAMS = native_find_rsc_test \
+ pe_base_name_eq_test
+
+TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/native/native_find_rsc_test.c b/lib/pengine/tests/native/native_find_rsc_test.c
new file mode 100644
index 0000000..22aaf41
--- /dev/null
+++ b/lib/pengine/tests/native/native_find_rsc_test.c
@@ -0,0 +1,677 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/common/xml.h>
+#include <crm/pengine/internal.h>
+#include <crm/pengine/status.h>
+#include <crm/pengine/pe_types.h>
+
+/* Needed to access replicas inside a bundle. */
+#define PE__VARIANT_BUNDLE 1
+#include <lib/pengine/variant.h>
+
+xmlNode *input = NULL;
+pe_working_set_t *data_set = NULL;
+
+pe_node_t *cluster01, *cluster02, *httpd_bundle_0;
+pe_resource_t *exim_group, *inactive_group, *promotable_clone, *inactive_clone;
+pe_resource_t *httpd_bundle, *mysql_clone_group;
+
+static int
+setup(void **state) {
+ char *path = NULL;
+
+ crm_xml_init();
+
+ path = crm_strdup_printf("%s/crm_mon.xml", getenv("PCMK_CTS_CLI_DIR"));
+ input = filename2xml(path);
+ free(path);
+
+ if (input == NULL) {
+ return 1;
+ }
+
+ data_set = pe_new_working_set();
+
+ if (data_set == NULL) {
+ return 1;
+ }
+
+ pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
+ data_set->input = input;
+
+ cluster_status(data_set);
+
+ /* Get references to the cluster nodes so we don't have to find them repeatedly. */
+ cluster01 = pe_find_node(data_set->nodes, "cluster01");
+ cluster02 = pe_find_node(data_set->nodes, "cluster02");
+ httpd_bundle_0 = pe_find_node(data_set->nodes, "httpd-bundle-0");
+
+ /* Get references to several resources we use frequently. */
+ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "exim-group") == 0) {
+ exim_group = rsc;
+ } else if (strcmp(rsc->id, "httpd-bundle") == 0) {
+ httpd_bundle = rsc;
+ } else if (strcmp(rsc->id, "inactive-clone") == 0) {
+ inactive_clone = rsc;
+ } else if (strcmp(rsc->id, "inactive-group") == 0) {
+ inactive_group = rsc;
+ } else if (strcmp(rsc->id, "mysql-clone-group") == 0) {
+ mysql_clone_group = rsc;
+ } else if (strcmp(rsc->id, "promotable-clone") == 0) {
+ promotable_clone = rsc;
+ }
+ }
+
+ return 0;
+}
+
+static int
+teardown(void **state) {
+ pe_free_working_set(data_set);
+
+ return 0;
+}
+
+static void
+bad_args(void **state) {
+ pe_resource_t *rsc = (pe_resource_t *) g_list_first(data_set->resources)->data;
+ char *id = rsc->id;
+ char *name = NULL;
+
+ assert_non_null(rsc);
+
+ assert_null(native_find_rsc(NULL, "dummy", NULL, 0));
+ assert_null(native_find_rsc(rsc, NULL, NULL, 0));
+
+ /* No resources exist with these names. */
+ name = crm_strdup_printf("%sX", rsc->id);
+ assert_null(native_find_rsc(rsc, name, NULL, 0));
+ free(name);
+
+ name = crm_strdup_printf("x%s", rsc->id);
+ assert_null(native_find_rsc(rsc, name, NULL, 0));
+ free(name);
+
+ name = g_ascii_strup(rsc->id, -1);
+ assert_null(native_find_rsc(rsc, name, NULL, 0));
+ g_free(name);
+
+ /* Fails because resource ID is NULL. */
+ rsc->id = NULL;
+ assert_null(native_find_rsc(rsc, id, NULL, 0));
+ rsc->id = id;
+}
+
+static void
+primitive_rsc(void **state) {
+ pe_resource_t *dummy = NULL;
+
+ /* Find the "dummy" resource, which is the only one with that ID in the set. */
+ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "dummy") == 0) {
+ dummy = rsc;
+ break;
+ }
+ }
+
+ assert_non_null(dummy);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, 0));
+ assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, pe_find_current));
+
+ /* Fails because resource is not a clone (nor cloned). */
+ assert_null(native_find_rsc(dummy, "dummy", NULL, pe_find_clone));
+ assert_null(native_find_rsc(dummy, "dummy", cluster02, pe_find_clone));
+
+ /* Fails because dummy is not running on cluster01, even with the right flags. */
+ assert_null(native_find_rsc(dummy, "dummy", cluster01, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(dummy, "dummy", cluster02, 0));
+
+ /* Passes because dummy is running on cluster02. */
+ assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", cluster02, pe_find_current));
+}
+
+static void
+group_rsc(void **state) {
+ assert_non_null(exim_group);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, 0));
+ assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, pe_find_current));
+
+ /* Fails because resource is not a clone (nor cloned). */
+ assert_null(native_find_rsc(exim_group, "exim-group", NULL, pe_find_clone));
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_clone));
+
+ /* Fails because none of exim-group's children are running on cluster01, even with the right flags. */
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01, 0));
+
+ /* Passes because one of exim-group's children is running on cluster02. */
+ assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", cluster02, pe_find_current));
+}
+
+static void
+inactive_group_rsc(void **state) {
+ assert_non_null(inactive_group);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, 0));
+ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_current));
+ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_inactive));
+
+ /* Fails because resource is not a clone (nor cloned). */
+ assert_null(native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_clone));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_clone));
+
+ /* Fails because none of inactive-group's children are running. */
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_current));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_current));
+
+ /* Passes because of flags. */
+ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_inactive));
+ /* Passes because of flags. */
+ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_inactive));
+}
+
+static void
+group_member_rsc(void **state) {
+ pe_resource_t *public_ip = NULL;
+
+ /* Find the "Public-IP" resource, a member of "exim-group". */
+ for (GList *iter = exim_group->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "Public-IP") == 0) {
+ public_ip = rsc;
+ break;
+ }
+ }
+
+ assert_non_null(public_ip);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, 0));
+ assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, pe_find_current));
+
+ /* Fails because resource is not a clone (nor cloned). */
+ assert_null(native_find_rsc(public_ip, "Public-IP", NULL, pe_find_clone));
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_clone));
+
+ /* Fails because Public-IP is not running on cluster01, even with the right flags. */
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster01, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, 0));
+
+ /* Passes because Public-IP is running on cluster02. */
+ assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_current));
+}
+
+static void
+inactive_group_member_rsc(void **state) {
+ pe_resource_t *inactive_dummy_1 = NULL;
+
+ /* Find the "inactive-dummy-1" resource, a member of "inactive-group". */
+ for (GList *iter = inactive_group->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "inactive-dummy-1") == 0) {
+ inactive_dummy_1 = rsc;
+ break;
+ }
+ }
+
+ assert_non_null(inactive_dummy_1);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, 0));
+ assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_current));
+
+ /* Fails because resource is not a clone (nor cloned). */
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_clone));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_clone));
+
+ /* Fails because inactive-dummy-1 is not running. */
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_current));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_current));
+
+ /* Passes because of flags. */
+ assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_inactive));
+ /* Passes because of flags. */
+ assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_inactive));
+}
+
+static void
+clone_rsc(void **state) {
+ assert_non_null(promotable_clone);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, 0));
+ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_current));
+ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_clone));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster01, 0));
+
+ /* Passes because one of ping-clone's children is running on cluster01. */
+ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster02, 0));
+
+ /* Passes because one of ping_clone's children is running on cluster02. */
+ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_current));
+
+ /* Passes for previous reasons, plus includes pe_find_clone check. */
+ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_clone|pe_find_current));
+ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_clone|pe_find_current));
+}
+
+static void
+inactive_clone_rsc(void **state) {
+ assert_non_null(inactive_clone);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, 0));
+ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_current));
+ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_clone));
+ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_inactive));
+
+ /* Fails because none of inactive-clone's children are running. */
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_current|pe_find_clone));
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_current|pe_find_clone));
+
+ /* Passes because of flags. */
+ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_inactive));
+ /* Passes because of flags. */
+ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_inactive));
+}
+
+static void
+clone_instance_rsc(void **state) {
+ pe_resource_t *promotable_0 = NULL;
+ pe_resource_t *promotable_1 = NULL;
+
+ /* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
+ for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
+ promotable_0 = rsc;
+ } else if (strcmp(rsc->id, "promotable-rsc:1") == 0) {
+ promotable_1 = rsc;
+ }
+ }
+
+ assert_non_null(promotable_0);
+ assert_non_null(promotable_1);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, 0));
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, pe_find_current));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, 0));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, 0));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, 0));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, pe_find_current));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01, pe_find_current));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, pe_find_current));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02, pe_find_current));
+
+ /* Passes because NULL was passed for node and primitive name was given, with correct flags. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_clone));
+
+ /* Passes because pe_find_any matches any instance's base name. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_any));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_any));
+
+ /* Passes because pe_find_anon matches. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_anon));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_anon));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
+
+ /* Fails because incorrect flags were given along with primitive name. */
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_current));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_current));
+
+ /* And then we check failure possibilities again, except passing promotable_clone
+ * instead of promotable_X as the first argument to native_find_rsc.
+ */
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, 0));
+ assert_null(native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, 0));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, pe_find_current));
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, pe_find_current));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
+}
+
+static void
+renamed_rsc(void **state) {
+ pe_resource_t *promotable_0 = NULL;
+ pe_resource_t *promotable_1 = NULL;
+
+ /* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
+ for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
+ promotable_0 = rsc;
+ } else if (strcmp(rsc->id, "promotable-rsc:1") == 0) {
+ promotable_1 = rsc;
+ }
+ }
+
+ assert_non_null(promotable_0);
+ assert_non_null(promotable_1);
+
+ /* Passes because pe_find_renamed means the base name matches clone_name. */
+ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_renamed));
+ assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_renamed));
+}
+
+static void
+bundle_rsc(void **state) {
+ assert_non_null(httpd_bundle);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, 0));
+ assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_current));
+
+ /* Fails because resource is not a clone (nor cloned). */
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_clone));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_clone));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, 0));
+
+ /* Passes because one of httpd_bundle's children is running on cluster01. */
+ assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_current));
+}
+
+static void
+bundle_replica_rsc(void **state) {
+ pe__bundle_variant_data_t *bundle_data = NULL;
+ pe__bundle_replica_t *replica_0 = NULL;
+
+ pe_resource_t *ip_0 = NULL;
+ pe_resource_t *child_0 = NULL;
+ pe_resource_t *container_0 = NULL;
+ pe_resource_t *remote_0 = NULL;
+
+ get_bundle_variant_data(bundle_data, httpd_bundle);
+ replica_0 = (pe__bundle_replica_t *) bundle_data->replicas->data;
+
+ ip_0 = replica_0->ip;
+ child_0 = replica_0->child;
+ container_0 = replica_0->container;
+ remote_0 = replica_0->remote;
+
+ assert_non_null(ip_0);
+ assert_non_null(child_0);
+ assert_non_null(container_0);
+ assert_non_null(remote_0);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", NULL, 0));
+ assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", NULL, 0));
+ assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", NULL, 0));
+ assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", NULL, 0));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
+ assert_null(native_find_rsc(child_0, "httpd:0", httpd_bundle_0, 0));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, 0));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster01, 0));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster02, pe_find_current));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", httpd_bundle_0, pe_find_current));
+ assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", httpd_bundle_0, pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster01, pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster02, pe_find_current));
+ assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, pe_find_current));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02, pe_find_current));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", httpd_bundle_0, pe_find_current));
+ assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", cluster01, pe_find_current));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02, pe_find_current));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0, pe_find_current));
+
+ /* Passes because pe_find_any matches any replica's base name. */
+ assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_any));
+
+ /* Passes because pe_find_anon matches. */
+ assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_anon));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_any|pe_find_current));
+ assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_anon|pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_any|pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_anon|pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_any|pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_anon|pe_find_current));
+
+ /* Fails because incorrect flags were given along with base name. */
+ assert_null(native_find_rsc(child_0, "httpd", NULL, pe_find_current));
+
+ /* And then we check failure possibilities again, except passing httpd-bundle
+ * instead of X_0 as the first argument to native_find_rsc.
+ */
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
+ assert_null(native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, 0));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, 0));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, 0));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(ip_0, native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
+ assert_ptr_equal(child_0, native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, pe_find_current));
+ assert_ptr_equal(container_0, native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, pe_find_current));
+ assert_ptr_equal(remote_0, native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, pe_find_current));
+}
+
+static void
+clone_group_rsc(void **rsc) {
+ assert_non_null(mysql_clone_group);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, 0));
+ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_current));
+ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_clone));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, 0));
+
+ /* Passes because one of mysql-clone-group's children is running on cluster01. */
+ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, 0));
+
+ /* Passes because one of mysql-clone-group's children is running on cluster02. */
+ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_current));
+
+ /* Passes for previous reasons, plus includes pe_find_clone check. */
+ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_clone|pe_find_current));
+ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_clone|pe_find_current));
+}
+
+static void
+clone_group_instance_rsc(void **rsc) {
+ pe_resource_t *mysql_group_0 = NULL;
+ pe_resource_t *mysql_group_1 = NULL;
+
+ /* Find the "mysql-group:0" and "mysql-group:1" resources, members of "mysql-clone-group". */
+ for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "mysql-group:0") == 0) {
+ mysql_group_0 = rsc;
+ } else if (strcmp(rsc->id, "mysql-group:1") == 0) {
+ mysql_group_1 = rsc;
+ }
+ }
+
+ assert_non_null(mysql_group_0);
+ assert_non_null(mysql_group_1);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, 0));
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, pe_find_current));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, 0));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, 0));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, 0));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, pe_find_current));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01, pe_find_current));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, pe_find_current));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02, pe_find_current));
+
+ /* Passes because NULL was passed for node and base name was given, with correct flags. */
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_clone));
+
+ /* Passes because pe_find_any matches any base name. */
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_any));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_any));
+
+ /* Passes because pe_find_anon matches. */
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_anon));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_anon));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_any|pe_find_current));
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_anon|pe_find_current));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_any|pe_find_current));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_any|pe_find_current));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_anon|pe_find_current));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_any|pe_find_current));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_anon|pe_find_current));
+
+ /* Fails because incorrect flags were given along with base name. */
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL, pe_find_current));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL, pe_find_current));
+
+ /* And then we check failure possibilities again, except passing mysql_clone_group
+ * instead of mysql_group_X as the first argument to native_find_rsc.
+ */
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, 0));
+ assert_null(native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, 0));
+
+ /* Check that the resource is running on the node we expect. */
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, pe_find_current));
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_any|pe_find_current));
+ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, pe_find_current));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_any|pe_find_current));
+ assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_anon|pe_find_current));
+}
+
+static void
+clone_group_member_rsc(void **state) {
+ pe_resource_t *mysql_proxy = NULL;
+
+ /* Find the "mysql-proxy" resource, a member of "mysql-group". */
+ for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "mysql-group:0") == 0) {
+ for (GList *iter2 = rsc->children; iter2 != NULL; iter2 = iter2->next) {
+ pe_resource_t *child = (pe_resource_t *) iter2->data;
+
+ if (strcmp(child->id, "mysql-proxy:0") == 0) {
+ mysql_proxy = child;
+ break;
+ }
+ }
+
+ break;
+ }
+ }
+
+ assert_non_null(mysql_proxy);
+
+ /* Passes because NULL was passed for node, regardless of flags. */
+ assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, 0));
+ assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_current));
+
+ /* Passes because resource's parent is a clone. */
+ assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_clone));
+ assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_clone|pe_find_current));
+
+ /* Fails because mysql-proxy:0 is not running on cluster01, even with the right flags. */
+ assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01, pe_find_current));
+
+ /* Fails because pe_find_current is required if a node is given. */
+ assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, 0));
+
+ /* Passes because mysql-proxy:0 is running on cluster02. */
+ assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_current));
+}
+
+/* TODO: Add tests for finding on allocated node (passing a node without
+ * pe_find_current, after scheduling, for a resource that is starting/stopping/moving.
+ */
+PCMK__UNIT_TEST(setup, teardown,
+ cmocka_unit_test(bad_args),
+ cmocka_unit_test(primitive_rsc),
+ cmocka_unit_test(group_rsc),
+ cmocka_unit_test(inactive_group_rsc),
+ cmocka_unit_test(group_member_rsc),
+ cmocka_unit_test(inactive_group_member_rsc),
+ cmocka_unit_test(clone_rsc),
+ cmocka_unit_test(inactive_clone_rsc),
+ cmocka_unit_test(clone_instance_rsc),
+ cmocka_unit_test(renamed_rsc),
+ cmocka_unit_test(bundle_rsc),
+ cmocka_unit_test(bundle_replica_rsc),
+ cmocka_unit_test(clone_group_rsc),
+ cmocka_unit_test(clone_group_instance_rsc),
+ cmocka_unit_test(clone_group_member_rsc))
diff --git a/lib/pengine/tests/native/pe_base_name_eq_test.c b/lib/pengine/tests/native/pe_base_name_eq_test.c
new file mode 100644
index 0000000..67a62f8
--- /dev/null
+++ b/lib/pengine/tests/native/pe_base_name_eq_test.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+#include <crm/common/xml.h>
+#include <crm/pengine/internal.h>
+#include <crm/pengine/status.h>
+#include <crm/pengine/pe_types.h>
+
+xmlNode *input = NULL;
+pe_working_set_t *data_set = NULL;
+
+pe_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
+pe_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
+
+static int
+setup(void **state) {
+ char *path = NULL;
+
+ crm_xml_init();
+
+ path = crm_strdup_printf("%s/crm_mon.xml", getenv("PCMK_CTS_CLI_DIR"));
+ input = filename2xml(path);
+ free(path);
+
+ if (input == NULL) {
+ return 1;
+ }
+
+ data_set = pe_new_working_set();
+
+ if (data_set == NULL) {
+ return 1;
+ }
+
+ pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
+ data_set->input = input;
+
+ cluster_status(data_set);
+
+ /* Get references to several resources we use frequently. */
+ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (strcmp(rsc->id, "dummy") == 0) {
+ dummy = rsc;
+ } else if (strcmp(rsc->id, "exim-group") == 0) {
+ exim_group = rsc;
+ } else if (strcmp(rsc->id, "httpd-bundle") == 0) {
+ httpd_bundle = rsc;
+ } else if (strcmp(rsc->id, "mysql-clone-group") == 0) {
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *child = (pe_resource_t *) iter->data;
+
+ if (strcmp(child->id, "mysql-group:0") == 0) {
+ mysql_group_0 = child;
+ } else if (strcmp(child->id, "mysql-group:1") == 0) {
+ mysql_group_1 = child;
+ }
+ }
+ } else if (strcmp(rsc->id, "promotable-clone") == 0) {
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *child = (pe_resource_t *) iter->data;
+
+ if (strcmp(child->id, "promotable-rsc:0") == 0) {
+ promotable_0 = child;
+ } else if (strcmp(child->id, "promotable-rsc:1") == 0) {
+ promotable_1 = child;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+teardown(void **state) {
+ pe_free_working_set(data_set);
+
+ return 0;
+}
+
+static void
+bad_args(void **state) {
+ char *id = dummy->id;
+
+ assert_false(pe_base_name_eq(NULL, "dummy"));
+ assert_false(pe_base_name_eq(dummy, NULL));
+
+ dummy->id = NULL;
+ assert_false(pe_base_name_eq(dummy, "dummy"));
+ dummy->id = id;
+}
+
+static void
+primitive_rsc(void **state) {
+ assert_true(pe_base_name_eq(dummy, "dummy"));
+ assert_false(pe_base_name_eq(dummy, "DUMMY"));
+ assert_false(pe_base_name_eq(dummy, "dUmMy"));
+ assert_false(pe_base_name_eq(dummy, "dummy0"));
+ assert_false(pe_base_name_eq(dummy, "dummy:0"));
+}
+
+static void
+group_rsc(void **state) {
+ assert_true(pe_base_name_eq(exim_group, "exim-group"));
+ assert_false(pe_base_name_eq(exim_group, "EXIM-GROUP"));
+ assert_false(pe_base_name_eq(exim_group, "exim-group0"));
+ assert_false(pe_base_name_eq(exim_group, "exim-group:0"));
+ assert_false(pe_base_name_eq(exim_group, "Public-IP"));
+}
+
+static void
+clone_rsc(void **state) {
+ assert_true(pe_base_name_eq(promotable_0, "promotable-rsc"));
+ assert_true(pe_base_name_eq(promotable_1, "promotable-rsc"));
+
+ assert_false(pe_base_name_eq(promotable_0, "promotable-rsc:0"));
+ assert_false(pe_base_name_eq(promotable_1, "promotable-rsc:1"));
+ assert_false(pe_base_name_eq(promotable_0, "PROMOTABLE-RSC"));
+ assert_false(pe_base_name_eq(promotable_1, "PROMOTABLE-RSC"));
+ assert_false(pe_base_name_eq(promotable_0, "Promotable-rsc"));
+ assert_false(pe_base_name_eq(promotable_1, "Promotable-rsc"));
+}
+
+static void
+bundle_rsc(void **state) {
+ assert_true(pe_base_name_eq(httpd_bundle, "httpd-bundle"));
+ assert_false(pe_base_name_eq(httpd_bundle, "HTTPD-BUNDLE"));
+ assert_false(pe_base_name_eq(httpd_bundle, "httpd"));
+ assert_false(pe_base_name_eq(httpd_bundle, "httpd-docker-0"));
+}
+
+PCMK__UNIT_TEST(setup, teardown,
+ cmocka_unit_test(bad_args),
+ cmocka_unit_test(primitive_rsc),
+ cmocka_unit_test(group_rsc),
+ cmocka_unit_test(clone_rsc),
+ cmocka_unit_test(bundle_rsc))
diff --git a/lib/pengine/tests/rules/Makefile.am b/lib/pengine/tests/rules/Makefile.am
new file mode 100644
index 0000000..261ec16
--- /dev/null
+++ b/lib/pengine/tests/rules/Makefile.am
@@ -0,0 +1,18 @@
+#
+# Copyright 2020-2021 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+include $(top_srcdir)/mk/tap.mk
+include $(top_srcdir)/mk/unittest.mk
+
+LDADD += $(top_builddir)/lib/pengine/libpe_rules_test.la
+
+# Add "_test" to the end of all test program names to simplify .gitignore.
+check_PROGRAMS = pe_cron_range_satisfied_test
+
+TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/rules/pe_cron_range_satisfied_test.c b/lib/pengine/tests/rules/pe_cron_range_satisfied_test.c
new file mode 100644
index 0000000..a8ba6cf
--- /dev/null
+++ b/lib/pengine/tests/rules/pe_cron_range_satisfied_test.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2020-2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <glib.h>
+
+#include <crm/msg_xml.h>
+#include <crm/common/unittest_internal.h>
+#include <crm/common/xml.h>
+#include <crm/pengine/rules_internal.h>
+
+static void
+run_one_test(const char *t, const char *x, int expected) {
+ crm_time_t *tm = crm_time_new(t);
+ xmlNodePtr xml = string2xml(x);
+
+ assert_int_equal(pe_cron_range_satisfied(tm, xml), expected);
+
+ crm_time_free(tm);
+ free_xml(xml);
+}
+
+static void
+no_time_given(void **state) {
+ assert_int_equal(pe_cron_range_satisfied(NULL, NULL), pcmk_rc_op_unsatisfied);
+}
+
+static void
+any_time_satisfies_empty_spec(void **state) {
+ crm_time_t *tm = crm_time_new(NULL);
+
+ assert_int_equal(pe_cron_range_satisfied(tm, NULL), pcmk_rc_ok);
+
+ crm_time_free(tm);
+}
+
+static void
+time_satisfies_year_spec(void **state) {
+ run_one_test("2020-01-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='2020'/>",
+ pcmk_rc_ok);
+}
+
+static void
+time_after_year_spec(void **state) {
+ run_one_test("2020-01-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='2019'/>",
+ pcmk_rc_after_range);
+}
+
+static void
+time_satisfies_year_range(void **state) {
+ run_one_test("2020-01-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='2010-2030'/>",
+ pcmk_rc_ok);
+}
+
+static void
+time_before_year_range(void **state) {
+ run_one_test("2000-01-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='2010-2030'/>",
+ pcmk_rc_before_range);
+}
+
+static void
+time_after_year_range(void **state) {
+ run_one_test("2020-01-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='2010-2015'/>",
+ pcmk_rc_after_range);
+}
+
+static void
+range_without_start_year_passes(void **state) {
+ run_one_test("2010-01-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='-2020'/>",
+ pcmk_rc_ok);
+}
+
+static void
+range_without_end_year_passes(void **state) {
+ run_one_test("2010-01-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='2000-'/>",
+ pcmk_rc_ok);
+ run_one_test("2000-10-01",
+ "<date_spec " XML_ATTR_ID "='spec' years='2000-'/>",
+ pcmk_rc_ok);
+}
+
+static void
+yeardays_satisfies(void **state) {
+ run_one_test("2020-01-30",
+ "<date_spec " XML_ATTR_ID "='spec' yeardays='30'/>",
+ pcmk_rc_ok);
+}
+
+static void
+time_after_yeardays_spec(void **state) {
+ run_one_test("2020-02-15",
+ "<date_spec " XML_ATTR_ID "='spec' yeardays='40'/>",
+ pcmk_rc_after_range);
+}
+
+static void
+yeardays_feb_29_satisfies(void **state) {
+ run_one_test("2016-02-29",
+ "<date_spec " XML_ATTR_ID "='spec' yeardays='60'/>",
+ pcmk_rc_ok);
+}
+
+static void
+exact_ymd_satisfies(void **state) {
+ run_one_test("2001-12-31",
+ "<date_spec " XML_ATTR_ID "='spec' years='2001' months='12' "
+ "monthdays='31'/>",
+ pcmk_rc_ok);
+}
+
+static void
+range_in_month_satisfies(void **state) {
+ run_one_test("2001-06-10",
+ "<date_spec " XML_ATTR_ID "='spec' years='2001' months='6' "
+ "monthdays='1-10'/>",
+ pcmk_rc_ok);
+}
+
+static void
+exact_ymd_after_range(void **state) {
+ run_one_test("2001-12-31",
+ "<date_spec " XML_ATTR_ID "='spec' years='2001' months='12' "
+ "monthdays='30'/>",
+ pcmk_rc_after_range);
+}
+
+static void
+time_after_monthdays_range(void **state) {
+ run_one_test("2001-06-10",
+ "<date_spec " XML_ATTR_ID "='spec' years='2001' months='6' "
+ "monthdays='11-15'/>",
+ pcmk_rc_before_range);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(no_time_given),
+ cmocka_unit_test(any_time_satisfies_empty_spec),
+ cmocka_unit_test(time_satisfies_year_spec),
+ cmocka_unit_test(time_after_year_spec),
+ cmocka_unit_test(time_satisfies_year_range),
+ cmocka_unit_test(time_before_year_range),
+ cmocka_unit_test(time_after_year_range),
+ cmocka_unit_test(range_without_start_year_passes),
+ cmocka_unit_test(range_without_end_year_passes),
+ cmocka_unit_test(yeardays_satisfies),
+ cmocka_unit_test(time_after_yeardays_spec),
+ cmocka_unit_test(yeardays_feb_29_satisfies),
+ cmocka_unit_test(exact_ymd_satisfies),
+ cmocka_unit_test(range_in_month_satisfies),
+ cmocka_unit_test(exact_ymd_after_range),
+ cmocka_unit_test(time_after_monthdays_range))
diff --git a/lib/pengine/tests/status/Makefile.am b/lib/pengine/tests/status/Makefile.am
new file mode 100644
index 0000000..3f95496
--- /dev/null
+++ b/lib/pengine/tests/status/Makefile.am
@@ -0,0 +1,22 @@
+#
+# Copyright 2022 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+include $(top_srcdir)/mk/tap.mk
+include $(top_srcdir)/mk/unittest.mk
+
+LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
+
+# Add "_test" to the end of all test program names to simplify .gitignore.
+check_PROGRAMS = pe_find_node_any_test \
+ pe_find_node_id_test \
+ pe_find_node_test \
+ pe_new_working_set_test \
+ set_working_set_defaults_test
+
+TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/status/pe_find_node_any_test.c b/lib/pengine/tests/status/pe_find_node_any_test.c
new file mode 100644
index 0000000..b911424
--- /dev/null
+++ b/lib/pengine/tests/status/pe_find_node_any_test.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+
+static void
+empty_list(void **state) {
+ assert_null(pe_find_node_any(NULL, NULL, NULL));
+ assert_null(pe_find_node_any(NULL, NULL, "cluster1"));
+ assert_null(pe_find_node_any(NULL, "id1", NULL));
+ assert_null(pe_find_node_any(NULL, "id1", "cluster1"));
+}
+
+static void
+non_null_list(void **state) {
+ GList *nodes = NULL;
+
+ pe_node_t *a = calloc(1, sizeof(pe_node_t));
+ pe_node_t *b = calloc(1, sizeof(pe_node_t));
+
+ a->details = calloc(1, sizeof(struct pe_node_shared_s));
+ a->details->uname = "cluster1";
+ a->details->id = "id1";
+ b->details = calloc(1, sizeof(struct pe_node_shared_s));
+ b->details->uname = "cluster2";
+ b->details->id = "id2";
+
+ nodes = g_list_append(nodes, a);
+ nodes = g_list_append(nodes, b);
+
+ assert_ptr_equal(b, pe_find_node_any(nodes, "id2", NULL));
+ assert_ptr_equal(b, pe_find_node_any(nodes, "ID2", NULL));
+
+ assert_ptr_equal(a, pe_find_node_any(nodes, "xyz", "cluster1"));
+ assert_ptr_equal(a, pe_find_node_any(nodes, NULL, "cluster1"));
+
+ assert_null(pe_find_node_any(nodes, "id10", NULL));
+ assert_null(pe_find_node_any(nodes, "nodeid1", NULL));
+ assert_null(pe_find_node_any(nodes, NULL, "cluster10"));
+ assert_null(pe_find_node_any(nodes, NULL, "nodecluster1"));
+ assert_null(pe_find_node_any(nodes, "id3", "cluster3"));
+ assert_null(pe_find_node_any(nodes, NULL, NULL));
+
+ free(a->details);
+ free(a);
+ free(b->details);
+ free(b);
+ g_list_free(nodes);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(empty_list),
+ cmocka_unit_test(non_null_list))
diff --git a/lib/pengine/tests/status/pe_find_node_id_test.c b/lib/pengine/tests/status/pe_find_node_id_test.c
new file mode 100644
index 0000000..832a40a
--- /dev/null
+++ b/lib/pengine/tests/status/pe_find_node_id_test.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+
+static void
+empty_list(void **state) {
+ assert_null(pe_find_node_id(NULL, NULL));
+ assert_null(pe_find_node_id(NULL, "id1"));
+}
+
+static void
+non_null_list(void **state) {
+ GList *nodes = NULL;
+
+ pe_node_t *a = calloc(1, sizeof(pe_node_t));
+ pe_node_t *b = calloc(1, sizeof(pe_node_t));
+
+ a->details = calloc(1, sizeof(struct pe_node_shared_s));
+ a->details->id = "id1";
+ b->details = calloc(1, sizeof(struct pe_node_shared_s));
+ b->details->id = "id2";
+
+ nodes = g_list_append(nodes, a);
+ nodes = g_list_append(nodes, b);
+
+ assert_ptr_equal(a, pe_find_node_id(nodes, "id1"));
+ assert_null(pe_find_node_id(nodes, "id10"));
+ assert_null(pe_find_node_id(nodes, "nodeid1"));
+ assert_ptr_equal(b, pe_find_node_id(nodes, "ID2"));
+ assert_null(pe_find_node_id(nodes, "xyz"));
+
+ free(a->details);
+ free(a);
+ free(b->details);
+ free(b);
+ g_list_free(nodes);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(empty_list),
+ cmocka_unit_test(non_null_list))
diff --git a/lib/pengine/tests/status/pe_find_node_test.c b/lib/pengine/tests/status/pe_find_node_test.c
new file mode 100644
index 0000000..7c7ea30
--- /dev/null
+++ b/lib/pengine/tests/status/pe_find_node_test.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+
+static void
+empty_list(void **state) {
+ assert_null(pe_find_node(NULL, NULL));
+ assert_null(pe_find_node(NULL, "cluster1"));
+}
+
+static void
+non_null_list(void **state) {
+ GList *nodes = NULL;
+
+ pe_node_t *a = calloc(1, sizeof(pe_node_t));
+ pe_node_t *b = calloc(1, sizeof(pe_node_t));
+
+ a->details = calloc(1, sizeof(struct pe_node_shared_s));
+ a->details->uname = "cluster1";
+ b->details = calloc(1, sizeof(struct pe_node_shared_s));
+ b->details->uname = "cluster2";
+
+ nodes = g_list_append(nodes, a);
+ nodes = g_list_append(nodes, b);
+
+ assert_ptr_equal(a, pe_find_node(nodes, "cluster1"));
+ assert_null(pe_find_node(nodes, "cluster10"));
+ assert_null(pe_find_node(nodes, "nodecluster1"));
+ assert_ptr_equal(b, pe_find_node(nodes, "CLUSTER2"));
+ assert_null(pe_find_node(nodes, "xyz"));
+
+ free(a->details);
+ free(a);
+ free(b->details);
+ free(b);
+ g_list_free(nodes);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(empty_list),
+ cmocka_unit_test(non_null_list))
diff --git a/lib/pengine/tests/status/pe_new_working_set_test.c b/lib/pengine/tests/status/pe_new_working_set_test.c
new file mode 100644
index 0000000..cf2df4f
--- /dev/null
+++ b/lib/pengine/tests/status/pe_new_working_set_test.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+
+#include "mock_private.h"
+
+static void
+calloc_fails(void **state) {
+ pcmk__mock_calloc = true; // calloc() will return NULL
+
+ expect_value(__wrap_calloc, nmemb, 1);
+ expect_value(__wrap_calloc, size, sizeof(pe_working_set_t));
+ assert_null(pe_new_working_set());
+
+ pcmk__mock_calloc = false; // Use real calloc()
+}
+
+static void
+calloc_succeeds(void **state) {
+ pe_working_set_t *data_set = pe_new_working_set();
+
+ /* Nothing else to test about this function, as all it does is call
+ * set_working_set_defaults which is also a public function and should
+ * get its own unit test.
+ */
+ assert_non_null(data_set);
+
+ /* Avoid calling pe_free_working_set here so we don't artificially
+ * inflate the coverage numbers.
+ */
+ free(data_set);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(calloc_fails),
+ cmocka_unit_test(calloc_succeeds))
diff --git a/lib/pengine/tests/status/set_working_set_defaults_test.c b/lib/pengine/tests/status/set_working_set_defaults_test.c
new file mode 100644
index 0000000..c822278
--- /dev/null
+++ b/lib/pengine/tests/status/set_working_set_defaults_test.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+#include <crm/pengine/pe_types.h>
+#include <crm/pengine/status.h>
+
+#include "mock_private.h"
+
+static void
+check_defaults(void **state) {
+ uint32_t flags;
+ pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+
+ set_working_set_defaults(data_set);
+
+ flags = pe_flag_stop_rsc_orphans|pe_flag_symmetric_cluster|pe_flag_stop_action_orphans;
+
+ if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
+ flags |= pe_flag_concurrent_fencing;
+ }
+
+
+ assert_null(data_set->priv);
+ assert_int_equal(data_set->order_id, 1);
+ assert_int_equal(data_set->action_id, 1);
+ assert_int_equal(data_set->no_quorum_policy, no_quorum_stop);
+ assert_int_equal(data_set->flags, flags);
+
+ /* Avoid calling pe_free_working_set here so we don't artificially
+ * inflate the coverage numbers.
+ */
+ free(data_set);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(check_defaults))
diff --git a/lib/pengine/tests/unpack/Makefile.am b/lib/pengine/tests/unpack/Makefile.am
new file mode 100644
index 0000000..baa8633
--- /dev/null
+++ b/lib/pengine/tests/unpack/Makefile.am
@@ -0,0 +1,18 @@
+#
+# Copyright 2022 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+include $(top_srcdir)/mk/tap.mk
+include $(top_srcdir)/mk/unittest.mk
+
+LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
+
+# Add "_test" to the end of all test program names to simplify .gitignore.
+check_PROGRAMS = pe_base_name_end_test
+
+TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/unpack/pe_base_name_end_test.c b/lib/pengine/tests/unpack/pe_base_name_end_test.c
new file mode 100644
index 0000000..6f1c165
--- /dev/null
+++ b/lib/pengine/tests/unpack/pe_base_name_end_test.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+
+static void
+bad_args(void **state) {
+ assert_null(pe_base_name_end(NULL));
+ assert_null(pe_base_name_end(""));
+}
+
+static void
+no_suffix(void **state) {
+ assert_string_equal(pe_base_name_end("rsc"), "c");
+ assert_string_equal(pe_base_name_end("rsc0"), "0");
+}
+
+static void
+has_suffix(void **state) {
+ assert_string_equal(pe_base_name_end("rsc:0"), "c:0");
+ assert_string_equal(pe_base_name_end("rsc:100"), "c:100");
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(bad_args),
+ cmocka_unit_test(no_suffix),
+ cmocka_unit_test(has_suffix))
diff --git a/lib/pengine/tests/utils/Makefile.am b/lib/pengine/tests/utils/Makefile.am
new file mode 100644
index 0000000..4a3e8a2
--- /dev/null
+++ b/lib/pengine/tests/utils/Makefile.am
@@ -0,0 +1,21 @@
+#
+# Copyright 2022 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+include $(top_srcdir)/mk/tap.mk
+include $(top_srcdir)/mk/unittest.mk
+
+AM_CPPFLAGS += -I$(top_srcdir)/lib/pengine
+LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
+
+# Add "_test" to the end of all test program names to simplify .gitignore.
+check_PROGRAMS = \
+ pe__cmp_node_name_test \
+ pe__cmp_rsc_priority_test
+
+TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/utils/pe__cmp_node_name_test.c b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
new file mode 100644
index 0000000..45d87ee
--- /dev/null
+++ b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+
+struct pe_node_shared_s node1_details;
+struct pe_node_shared_s node2_details;
+
+pe_node_t node1 = {.details = &node1_details};
+pe_node_t node2 = {.details = &node2_details};
+
+static void
+nodes_equal(void **state)
+{
+ assert_int_equal(pe__cmp_node_name(NULL, NULL), 0);
+
+ node1.details->uname = "node10";
+ node2.details->uname = "node10";
+ assert_int_equal(pe__cmp_node_name(&node1, &node2), 0);
+}
+
+static void
+node1_first(void **state)
+{
+ assert_int_equal(pe__cmp_node_name(NULL, &node2), -1);
+
+ // The heavy testing is done in pcmk__numeric_strcasecmp()'s unit tests
+ node1.details->uname = "node9";
+ node2.details->uname = "node10";
+ assert_int_equal(pe__cmp_node_name(&node1, &node2), -1);
+}
+
+static void
+node2_first(void **state)
+{
+ assert_int_equal(pe__cmp_node_name(&node1, NULL), 1);
+
+ node1.details->uname = "node10";
+ node2.details->uname = "node9";
+ assert_int_equal(pe__cmp_node_name(&node1, &node2), 1);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(nodes_equal),
+ cmocka_unit_test(node1_first),
+ cmocka_unit_test(node2_first))
diff --git a/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
new file mode 100644
index 0000000..669e7a9
--- /dev/null
+++ b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/pengine/internal.h>
+
+#include "pe_status_private.h"
+
+pe_resource_t rsc1;
+pe_resource_t rsc2;
+
+static void
+rscs_equal(void **state)
+{
+ rsc1.priority = 0;
+ rsc2.priority = 0;
+ assert_int_equal(pe__cmp_rsc_priority(NULL, NULL), 0);
+ assert_int_equal(pe__cmp_rsc_priority(&rsc1, &rsc2), 0);
+}
+
+static void
+rsc1_first(void **state)
+{
+ rsc1.priority = 1;
+ rsc2.priority = 0;
+ assert_int_equal(pe__cmp_rsc_priority(&rsc1, NULL), -1);
+ assert_int_equal(pe__cmp_rsc_priority(&rsc1, &rsc2), -1);
+}
+
+static void
+rsc2_first(void **state)
+{
+ rsc1.priority = 0;
+ rsc2.priority = 1;
+ assert_int_equal(pe__cmp_rsc_priority(NULL, &rsc2), 1);
+ assert_int_equal(pe__cmp_rsc_priority(&rsc1, &rsc2), 1);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(rscs_equal),
+ cmocka_unit_test(rsc1_first),
+ cmocka_unit_test(rsc2_first))
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
new file mode 100644
index 0000000..2bd6707
--- /dev/null
+++ b/lib/pengine/unpack.c
@@ -0,0 +1,4829 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <glib.h>
+#include <time.h>
+
+#include <crm/crm.h>
+#include <crm/services.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/common/xml_internal.h>
+
+#include <crm/common/util.h>
+#include <crm/pengine/rules.h>
+#include <crm/pengine/internal.h>
+#include <pe_status_private.h>
+
+CRM_TRACE_INIT_DATA(pe_status);
+
+// A (parsed) resource action history entry
+struct action_history {
+ pe_resource_t *rsc; // Resource that history is for
+ pe_node_t *node; // Node that history is for
+ xmlNode *xml; // History entry XML
+
+ // Parsed from entry XML
+ const char *id; // XML ID of history entry
+ const char *key; // Operation key of action
+ const char *task; // Action name
+ const char *exit_reason; // Exit reason given for result
+ guint interval_ms; // Action interval
+ int call_id; // Call ID of action
+ int expected_exit_status; // Expected exit status of action
+ int exit_status; // Actual exit status of action
+ int execution_status; // Execution status of action
+};
+
+/* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
+ * use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the
+ * flag is stringified more readably in log messages.
+ */
+#define set_config_flag(data_set, option, flag) do { \
+ const char *scf_value = pe_pref((data_set)->config_hash, (option)); \
+ if (scf_value != NULL) { \
+ if (crm_is_true(scf_value)) { \
+ (data_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Working set", \
+ crm_system_name, (data_set)->flags, \
+ (flag), #flag); \
+ } else { \
+ (data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\
+ LOG_TRACE, "Working set", \
+ crm_system_name, (data_set)->flags, \
+ (flag), #flag); \
+ } \
+ } \
+ } while(0)
+
+static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
+ xmlNode **last_failure,
+ enum action_fail_response *failed);
+static void determine_remote_online_status(pe_working_set_t *data_set,
+ pe_node_t *this_node);
+static void add_node_attrs(const xmlNode *xml_obj, pe_node_t *node,
+ bool overwrite, pe_working_set_t *data_set);
+static void determine_online_status(const xmlNode *node_state,
+ pe_node_t *this_node,
+ pe_working_set_t *data_set);
+
+static void unpack_node_lrm(pe_node_t *node, const xmlNode *xml,
+ pe_working_set_t *data_set);
+
+
+// Bitmask for warnings we only want to print once
+uint32_t pe_wo = 0;
+
+static gboolean
+is_dangling_guest_node(pe_node_t *node)
+{
+ /* we are looking for a remote-node that was supposed to be mapped to a
+ * container resource, but all traces of that container have disappeared
+ * from both the config and the status section. */
+ if (pe__is_guest_or_remote_node(node) &&
+ node->details->remote_rsc &&
+ node->details->remote_rsc->container == NULL &&
+ pcmk_is_set(node->details->remote_rsc->flags,
+ pe_rsc_orphan_container_filler)) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*!
+ * \brief Schedule a fence action for a node
+ *
+ * \param[in,out] data_set Current working set of cluster
+ * \param[in,out] node Node to fence
+ * \param[in] reason Text description of why fencing is needed
+ * \param[in] priority_delay Whether to consider `priority-fencing-delay`
+ */
+void
+pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
+ const char *reason, bool priority_delay)
+{
+ CRM_CHECK(node, return);
+
+ /* A guest node is fenced by marking its container as failed */
+ if (pe__is_guest_node(node)) {
+ pe_resource_t *rsc = node->details->remote_rsc->container;
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ crm_notice("Not fencing guest node %s "
+ "(otherwise would because %s): "
+ "its guest resource %s is unmanaged",
+ pe__node_name(node), reason, rsc->id);
+ } else {
+ crm_warn("Guest node %s will be fenced "
+ "(by recovering its guest resource %s): %s",
+ pe__node_name(node), rsc->id, reason);
+
+ /* We don't mark the node as unclean because that would prevent the
+ * node from running resources. We want to allow it to run resources
+ * in this transition if the recovery succeeds.
+ */
+ node->details->remote_requires_reset = TRUE;
+ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ }
+ }
+
+ } else if (is_dangling_guest_node(node)) {
+ crm_info("Cleaning up dangling connection for guest node %s: "
+ "fencing was already done because %s, "
+ "and guest resource no longer exists",
+ pe__node_name(node), reason);
+ pe__set_resource_flags(node->details->remote_rsc,
+ pe_rsc_failed|pe_rsc_stop);
+
+ } else if (pe__is_remote_node(node)) {
+ pe_resource_t *rsc = node->details->remote_rsc;
+
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ crm_notice("Not fencing remote node %s "
+ "(otherwise would because %s): connection is unmanaged",
+ pe__node_name(node), reason);
+ } else if(node->details->remote_requires_reset == FALSE) {
+ node->details->remote_requires_reset = TRUE;
+ crm_warn("Remote node %s %s: %s",
+ pe__node_name(node),
+ pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ reason);
+ }
+ node->details->unclean = TRUE;
+ // No need to apply `priority-fencing-delay` for remote nodes
+ pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set);
+
+ } else if (node->details->unclean) {
+ crm_trace("Cluster node %s %s because %s",
+ pe__node_name(node),
+ pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean",
+ reason);
+
+ } else {
+ crm_warn("Cluster node %s %s: %s",
+ pe__node_name(node),
+ pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ reason);
+ node->details->unclean = TRUE;
+ pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set);
+ }
+}
+
+// @TODO xpaths can't handle templates, rules, or id-refs
+
+// nvpair with provides or requires set to unfencing
+#define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \
+ "[(@" XML_NVPAIR_ATTR_NAME "='" PCMK_STONITH_PROVIDES "'" \
+ "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \
+ "and @" XML_NVPAIR_ATTR_VALUE "='" PCMK__VALUE_UNFENCING "']"
+
+// unfencing in rsc_defaults or any resource
+#define XPATH_ENABLE_UNFENCING \
+ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \
+ "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR \
+ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG \
+ "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR
+
+static void
+set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set)
+{
+ xmlXPathObjectPtr result = NULL;
+
+ if (!pcmk_is_set(data_set->flags, flag)) {
+ result = xpath_search(data_set->input, xpath);
+ if (result && (numXpathResults(result) > 0)) {
+ pe__set_working_set_flags(data_set, flag);
+ }
+ freeXpathObject(result);
+ }
+}
+
+gboolean
+unpack_config(xmlNode * config, pe_working_set_t * data_set)
+{
+ const char *value = NULL;
+ GHashTable *config_hash = pcmk__strkey_table(free, free);
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ data_set->config_hash = config_hash;
+
+ pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
+ CIB_OPTIONS_FIRST, FALSE, data_set);
+
+ verify_pe_options(data_set->config_hash);
+
+ set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
+ if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
+ crm_info("Startup probes: disabled (dangerous)");
+ }
+
+ value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
+ if (value && crm_is_true(value)) {
+ crm_info("Watchdog-based self-fencing will be performed via SBD if "
+ "fencing is required and stonith-watchdog-timeout is nonzero");
+ pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
+ }
+
+ /* Set certain flags via xpath here, so they can be used before the relevant
+ * configuration sections are unpacked.
+ */
+ set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set);
+
+ value = pe_pref(data_set->config_hash, "stonith-timeout");
+ data_set->stonith_timeout = (int) crm_parse_interval_spec(value);
+ crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
+
+ set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
+ crm_debug("STONITH of failed nodes is %s",
+ pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
+
+ data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
+ if (!strcmp(data_set->stonith_action, "poweroff")) {
+ pe_warn_once(pe_wo_poweroff,
+ "Support for stonith-action of 'poweroff' is deprecated "
+ "and will be removed in a future release (use 'off' instead)");
+ data_set->stonith_action = "off";
+ }
+ crm_trace("STONITH will %s nodes", data_set->stonith_action);
+
+ set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
+ crm_debug("Concurrent fencing is %s",
+ pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled");
+
+ value = pe_pref(data_set->config_hash,
+ XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY);
+ if (value) {
+ data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000;
+ crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay);
+ }
+
+ set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
+ crm_debug("Stop all active resources: %s",
+ pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
+
+ set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
+ if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
+ }
+
+ value = pe_pref(data_set->config_hash, "no-quorum-policy");
+
+ if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
+ data_set->no_quorum_policy = no_quorum_ignore;
+
+ } else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) {
+ data_set->no_quorum_policy = no_quorum_freeze;
+
+ } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ data_set->no_quorum_policy = no_quorum_demote;
+
+ } else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) {
+ if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ int do_panic = 0;
+
+ crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC,
+ &do_panic);
+ if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ data_set->no_quorum_policy = no_quorum_suicide;
+ } else {
+ crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum");
+ data_set->no_quorum_policy = no_quorum_stop;
+ }
+ } else {
+ pcmk__config_err("Resetting no-quorum-policy to 'stop' because "
+ "fencing is disabled");
+ data_set->no_quorum_policy = no_quorum_stop;
+ }
+
+ } else {
+ data_set->no_quorum_policy = no_quorum_stop;
+ }
+
+ switch (data_set->no_quorum_policy) {
+ case no_quorum_freeze:
+ crm_debug("On loss of quorum: Freeze resources");
+ break;
+ case no_quorum_stop:
+ crm_debug("On loss of quorum: Stop ALL resources");
+ break;
+ case no_quorum_demote:
+ crm_debug("On loss of quorum: "
+ "Demote promotable resources and stop other resources");
+ break;
+ case no_quorum_suicide:
+ crm_notice("On loss of quorum: Fence all remaining nodes");
+ break;
+ case no_quorum_ignore:
+ crm_notice("On loss of quorum: Ignore");
+ break;
+ }
+
+ set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
+ crm_trace("Orphan resources are %s",
+ pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored");
+
+ set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
+ crm_trace("Orphan resource actions are %s",
+ pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored");
+
+ value = pe_pref(data_set->config_hash, "remove-after-stop");
+ if (value != NULL) {
+ if (crm_is_true(value)) {
+ pe__set_working_set_flags(data_set, pe_flag_remove_after_stop);
+#ifndef PCMK__COMPAT_2_0
+ pe_warn_once(pe_wo_remove_after,
+ "Support for the remove-after-stop cluster property is"
+ " deprecated and will be removed in a future release");
+#endif
+ } else {
+ pe__clear_working_set_flags(data_set, pe_flag_remove_after_stop);
+ }
+ }
+
+ set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
+ crm_trace("Maintenance mode: %s",
+ pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
+
+ set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
+ crm_trace("Start failures are %s",
+ pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount");
+
+ if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing);
+ }
+ if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ crm_trace("Unseen nodes will be fenced");
+ } else {
+ pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes");
+ }
+
+ pe__unpack_node_health_scores(data_set);
+
+ data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
+ crm_trace("Placement strategy: %s", data_set->placement_strategy);
+
+ set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock);
+ crm_trace("Resources will%s be locked to cleanly shut down nodes",
+ (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not"));
+ if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ value = pe_pref(data_set->config_hash,
+ XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT);
+ data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000;
+ crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock);
+ }
+
+ return TRUE;
+}
+
+pe_node_t *
+pe_create_node(const char *id, const char *uname, const char *type,
+ const char *score, pe_working_set_t * data_set)
+{
+ pe_node_t *new_node = NULL;
+
+ if (pe_find_node(data_set->nodes, uname) != NULL) {
+ pcmk__config_warn("More than one node entry has name '%s'", uname);
+ }
+
+ new_node = calloc(1, sizeof(pe_node_t));
+ if (new_node == NULL) {
+ return NULL;
+ }
+
+ new_node->weight = char2score(score);
+ new_node->details = calloc(1, sizeof(struct pe_node_shared_s));
+
+ if (new_node->details == NULL) {
+ free(new_node);
+ return NULL;
+ }
+
+ crm_trace("Creating node for entry %s/%s", uname, id);
+ new_node->details->id = id;
+ new_node->details->uname = uname;
+ new_node->details->online = FALSE;
+ new_node->details->shutdown = FALSE;
+ new_node->details->rsc_discovery_enabled = TRUE;
+ new_node->details->running_rsc = NULL;
+ new_node->details->data_set = data_set;
+
+ if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) {
+ new_node->details->type = node_member;
+
+ } else if (pcmk__str_eq(type, "remote", pcmk__str_casei)) {
+ new_node->details->type = node_remote;
+ pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes);
+
+ } else {
+ /* @COMPAT 'ping' is the default for backward compatibility, but it
+ * should be changed to 'member' at a compatibility break
+ */
+ if (!pcmk__str_eq(type, "ping", pcmk__str_casei)) {
+ pcmk__config_warn("Node %s has unrecognized type '%s', "
+ "assuming 'ping'", pcmk__s(uname, "without name"),
+ type);
+ }
+ pe_warn_once(pe_wo_ping_node,
+ "Support for nodes of type 'ping' (such as %s) is "
+ "deprecated and will be removed in a future release",
+ pcmk__s(uname, "unnamed node"));
+ new_node->details->type = node_ping;
+ }
+
+ new_node->details->attrs = pcmk__strkey_table(free, free);
+
+ if (pe__is_guest_or_remote_node(new_node)) {
+ g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
+ strdup("remote"));
+ } else {
+ g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
+ strdup("cluster"));
+ }
+
+ new_node->details->utilization = pcmk__strkey_table(free, free);
+ new_node->details->digest_cache = pcmk__strkey_table(free,
+ pe__free_digests);
+
+ data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node,
+ pe__cmp_node_name);
+ return new_node;
+}
+
+static const char *
+expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
+{
+ xmlNode *attr_set = NULL;
+ xmlNode *attr = NULL;
+
+ const char *container_id = ID(xml_obj);
+ const char *remote_name = NULL;
+ const char *remote_server = NULL;
+ const char *remote_port = NULL;
+ const char *connect_timeout = "60s";
+ const char *remote_allow_migrate=NULL;
+ const char *is_managed = NULL;
+
+ for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL;
+ attr_set = pcmk__xe_next(attr_set)) {
+
+ if (!pcmk__str_eq((const char *)attr_set->name, XML_TAG_META_SETS,
+ pcmk__str_casei)) {
+ continue;
+ }
+
+ for (attr = pcmk__xe_first_child(attr_set); attr != NULL;
+ attr = pcmk__xe_next(attr)) {
+ const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
+ const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
+
+ if (pcmk__str_eq(name, XML_RSC_ATTR_REMOTE_NODE, pcmk__str_casei)) {
+ remote_name = value;
+ } else if (pcmk__str_eq(name, "remote-addr", pcmk__str_casei)) {
+ remote_server = value;
+ } else if (pcmk__str_eq(name, "remote-port", pcmk__str_casei)) {
+ remote_port = value;
+ } else if (pcmk__str_eq(name, "remote-connect-timeout", pcmk__str_casei)) {
+ connect_timeout = value;
+ } else if (pcmk__str_eq(name, "remote-allow-migrate", pcmk__str_casei)) {
+ remote_allow_migrate=value;
+ } else if (pcmk__str_eq(name, XML_RSC_ATTR_MANAGED, pcmk__str_casei)) {
+ is_managed = value;
+ }
+ }
+ }
+
+ if (remote_name == NULL) {
+ return NULL;
+ }
+
+ if (pe_find_resource(data->resources, remote_name) != NULL) {
+ return NULL;
+ }
+
+ pe_create_remote_xml(parent, remote_name, container_id,
+ remote_allow_migrate, is_managed,
+ connect_timeout, remote_server, remote_port);
+ return remote_name;
+}
+
+static void
+handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
+{
+ if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
+ /* Ignore fencing for remote nodes that don't have a connection resource
+ * associated with them. This happens when remote node entries get left
+ * in the nodes section after the connection resource is removed.
+ */
+ return;
+ }
+
+ if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ // All nodes are unclean until we've seen their status entry
+ new_node->details->unclean = TRUE;
+
+ } else {
+ // Blind faith ...
+ new_node->details->unclean = FALSE;
+ }
+
+ /* We need to be able to determine if a node's status section
+ * exists or not separate from whether the node is unclean. */
+ new_node->details->unseen = TRUE;
+}
+
+gboolean
+unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
+{
+ xmlNode *xml_obj = NULL;
+ pe_node_t *new_node = NULL;
+ const char *id = NULL;
+ const char *uname = NULL;
+ const char *type = NULL;
+ const char *score = NULL;
+
+ for (xml_obj = pcmk__xe_first_child(xml_nodes); xml_obj != NULL;
+ xml_obj = pcmk__xe_next(xml_obj)) {
+
+ if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, pcmk__str_none)) {
+ new_node = NULL;
+
+ id = crm_element_value(xml_obj, XML_ATTR_ID);
+ uname = crm_element_value(xml_obj, XML_ATTR_UNAME);
+ type = crm_element_value(xml_obj, XML_ATTR_TYPE);
+ score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
+ crm_trace("Processing node %s/%s", uname, id);
+
+ if (id == NULL) {
+ pcmk__config_err("Ignoring <" XML_CIB_TAG_NODE
+ "> entry in configuration without id");
+ continue;
+ }
+ new_node = pe_create_node(id, uname, type, score, data_set);
+
+ if (new_node == NULL) {
+ return FALSE;
+ }
+
+ handle_startup_fencing(data_set, new_node);
+
+ add_node_attrs(xml_obj, new_node, FALSE, data_set);
+
+ crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
+ }
+ }
+
+ if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
+ crm_info("Creating a fake local node");
+ pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
+ data_set);
+ }
+
+ return TRUE;
+}
+
+static void
+setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
+{
+ const char *container_id = NULL;
+
+ if (rsc->children) {
+ g_list_foreach(rsc->children, (GFunc) setup_container, data_set);
+ return;
+ }
+
+ container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
+ if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
+ pe_resource_t *container = pe_find_resource(data_set->resources, container_id);
+
+ if (container) {
+ rsc->container = container;
+ pe__set_resource_flags(container, pe_rsc_is_container);
+ container->fillers = g_list_append(container->fillers, rsc);
+ pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
+ } else {
+ pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id);
+ }
+ }
+}
+
+gboolean
+unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
+{
+ xmlNode *xml_obj = NULL;
+
+ /* Create remote nodes and guest nodes from the resource configuration
+ * before unpacking resources.
+ */
+ for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
+ xml_obj = pcmk__xe_next(xml_obj)) {
+
+ const char *new_node_id = NULL;
+
+ /* Check for remote nodes, which are defined by ocf:pacemaker:remote
+ * primitives.
+ */
+ if (xml_contains_remote_node(xml_obj)) {
+ new_node_id = ID(xml_obj);
+ /* The "pe_find_node" check is here to make sure we don't iterate over
+ * an expanded node that has already been added to the node list. */
+ if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ crm_trace("Found remote node %s defined by resource %s",
+ new_node_id, ID(xml_obj));
+ pe_create_node(new_node_id, new_node_id, "remote", NULL,
+ data_set);
+ }
+ continue;
+ }
+
+ /* Check for guest nodes, which are defined by special meta-attributes
+ * of a primitive of any type (for example, VirtualDomain or Xen).
+ */
+ if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
+ /* This will add an ocf:pacemaker:remote primitive to the
+ * configuration for the guest node's connection, to be unpacked
+ * later.
+ */
+ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
+ if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ crm_trace("Found guest node %s in resource %s",
+ new_node_id, ID(xml_obj));
+ pe_create_node(new_node_id, new_node_id, "remote", NULL,
+ data_set);
+ }
+ continue;
+ }
+
+ /* Check for guest nodes inside a group. Clones are currently not
+ * supported as guest nodes.
+ */
+ if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, pcmk__str_none)) {
+ xmlNode *xml_obj2 = NULL;
+ for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL;
+ xml_obj2 = pcmk__xe_next(xml_obj2)) {
+
+ new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
+
+ if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ crm_trace("Found guest node %s in resource %s inside group %s",
+ new_node_id, ID(xml_obj2), ID(xml_obj));
+ pe_create_node(new_node_id, new_node_id, "remote", NULL,
+ data_set);
+ }
+ }
+ }
+ }
+ return TRUE;
+}
+
+/* Call this after all the nodes and resources have been
+ * unpacked, but before the status section is read.
+ *
+ * A remote node's online status is reflected by the state
+ * of the remote node's connection resource. We need to link
+ * the remote node to this connection resource so we can have
+ * easy access to the connection resource during the scheduler calculations.
+ */
+static void
+link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
+{
+ pe_node_t *remote_node = NULL;
+
+ if (new_rsc->is_remote_node == FALSE) {
+ return;
+ }
+
+ if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ /* remote_nodes and remote_resources are not linked in quick location calculations */
+ return;
+ }
+
+ remote_node = pe_find_node(data_set->nodes, new_rsc->id);
+ CRM_CHECK(remote_node != NULL, return);
+
+ pe_rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
+ new_rsc->id, pe__node_name(remote_node));
+ remote_node->details->remote_rsc = new_rsc;
+
+ if (new_rsc->container == NULL) {
+ /* Handle start-up fencing for remote nodes (as opposed to guest nodes)
+ * the same as is done for cluster nodes.
+ */
+ handle_startup_fencing(data_set, remote_node);
+
+ } else {
+ /* pe_create_node() marks the new node as "remote" or "cluster"; now
+ * that we know the node is a guest node, update it correctly.
+ */
+ g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND),
+ strdup("container"));
+ }
+}
+
+static void
+destroy_tag(gpointer data)
+{
+ pe_tag_t *tag = data;
+
+ if (tag) {
+ free(tag->id);
+ g_list_free_full(tag->refs, free);
+ free(tag);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Parse configuration XML for resource information
+ *
+ * \param[in] xml_resources Top of resource configuration XML
+ * \param[in,out] data_set Where to put resource information
+ *
+ * \return TRUE
+ *
+ * \note unpack_remote_nodes() MUST be called before this, so that the nodes can
+ * be used when pe__unpack_resource() calls resource_location()
+ */
+gboolean
+unpack_resources(const xmlNode *xml_resources, pe_working_set_t * data_set)
+{
+ xmlNode *xml_obj = NULL;
+ GList *gIter = NULL;
+
+ data_set->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
+
+ for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
+ xml_obj = pcmk__xe_next(xml_obj)) {
+
+ pe_resource_t *new_rsc = NULL;
+ const char *id = ID(xml_obj);
+
+ if (pcmk__str_empty(id)) {
+ pcmk__config_err("Ignoring <%s> resource without ID",
+ crm_element_name(xml_obj));
+ continue;
+ }
+
+ if (pcmk__str_eq((const char *) xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE,
+ pcmk__str_none)) {
+ if (g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
+ NULL, NULL) == FALSE) {
+ /* Record the template's ID for the knowledge of its existence anyway. */
+ g_hash_table_insert(data_set->template_rsc_sets, strdup(id), NULL);
+ }
+ continue;
+ }
+
+ crm_trace("Unpacking <%s " XML_ATTR_ID "='%s'>",
+ crm_element_name(xml_obj), id);
+ if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
+ data_set) == pcmk_rc_ok) {
+ data_set->resources = g_list_append(data_set->resources, new_rsc);
+ pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
+
+ } else {
+ pcmk__config_err("Ignoring <%s> resource '%s' "
+ "because configuration is invalid",
+ crm_element_name(xml_obj), id);
+ }
+ }
+
+ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+
+ setup_container(rsc, data_set);
+ link_rsc2remotenode(data_set, rsc);
+ }
+
+ data_set->resources = g_list_sort(data_set->resources,
+ pe__cmp_rsc_priority);
+ if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ /* Ignore */
+
+ } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
+ && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+
+ pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
+ pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option");
+ pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
+ }
+
+ return TRUE;
+}
+
+gboolean
+unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
+{
+ xmlNode *xml_tag = NULL;
+
+ data_set->tags = pcmk__strkey_table(free, destroy_tag);
+
+ for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL;
+ xml_tag = pcmk__xe_next(xml_tag)) {
+
+ xmlNode *xml_obj_ref = NULL;
+ const char *tag_id = ID(xml_tag);
+
+ if (!pcmk__str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, pcmk__str_none)) {
+ continue;
+ }
+
+ if (tag_id == NULL) {
+ pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID,
+ crm_element_name(xml_tag));
+ continue;
+ }
+
+ for (xml_obj_ref = pcmk__xe_first_child(xml_tag); xml_obj_ref != NULL;
+ xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
+
+ const char *obj_ref = ID(xml_obj_ref);
+
+ if (!pcmk__str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, pcmk__str_none)) {
+ continue;
+ }
+
+ if (obj_ref == NULL) {
+ pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID,
+ crm_element_name(xml_obj_ref), tag_id);
+ continue;
+ }
+
+ if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+/* The ticket state section:
+ * "/cib/status/tickets/ticket_state" */
+static gboolean
+unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
+{
+ const char *ticket_id = NULL;
+ const char *granted = NULL;
+ const char *last_granted = NULL;
+ const char *standby = NULL;
+ xmlAttrPtr xIter = NULL;
+
+ pe_ticket_t *ticket = NULL;
+
+ ticket_id = ID(xml_ticket);
+ if (pcmk__str_empty(ticket_id)) {
+ return FALSE;
+ }
+
+ crm_trace("Processing ticket state for %s", ticket_id);
+
+ ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ if (ticket == NULL) {
+ ticket = ticket_new(ticket_id, data_set);
+ if (ticket == NULL) {
+ return FALSE;
+ }
+ }
+
+ for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
+ const char *prop_name = (const char *)xIter->name;
+ const char *prop_value = crm_element_value(xml_ticket, prop_name);
+
+ if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) {
+ continue;
+ }
+ g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value));
+ }
+
+ granted = g_hash_table_lookup(ticket->state, "granted");
+ if (granted && crm_is_true(granted)) {
+ ticket->granted = TRUE;
+ crm_info("We have ticket '%s'", ticket->id);
+ } else {
+ ticket->granted = FALSE;
+ crm_info("We do not have ticket '%s'", ticket->id);
+ }
+
+ last_granted = g_hash_table_lookup(ticket->state, "last-granted");
+ if (last_granted) {
+ long long last_granted_ll;
+
+ pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
+ ticket->last_granted = (time_t) last_granted_ll;
+ }
+
+ standby = g_hash_table_lookup(ticket->state, "standby");
+ if (standby && crm_is_true(standby)) {
+ ticket->standby = TRUE;
+ if (ticket->granted) {
+ crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
+ }
+ } else {
+ ticket->standby = FALSE;
+ }
+
+ crm_trace("Done with ticket state for %s", ticket_id);
+
+ return TRUE;
+}
+
+static gboolean
+unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
+{
+ xmlNode *xml_obj = NULL;
+
+ for (xml_obj = pcmk__xe_first_child(xml_tickets); xml_obj != NULL;
+ xml_obj = pcmk__xe_next(xml_obj)) {
+
+ if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) {
+ continue;
+ }
+ unpack_ticket_state(xml_obj, data_set);
+ }
+
+ return TRUE;
+}
+
+static void
+unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
+ pe_working_set_t *data_set)
+{
+ const char *resource_discovery_enabled = NULL;
+ const xmlNode *attrs = NULL;
+ pe_resource_t *rsc = NULL;
+
+ if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
+ return;
+ }
+
+ if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) {
+ return;
+ }
+ crm_trace("Processing Pacemaker Remote node %s", pe__node_name(this_node));
+
+ pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_MAINTENANCE),
+ &(this_node->details->remote_maintenance), 0);
+
+ rsc = this_node->details->remote_rsc;
+ if (this_node->details->remote_requires_reset == FALSE) {
+ this_node->details->unclean = FALSE;
+ this_node->details->unseen = FALSE;
+ }
+ attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
+ add_node_attrs(attrs, this_node, TRUE, data_set);
+
+ if (pe__shutdown_requested(this_node)) {
+ crm_info("%s is shutting down", pe__node_name(this_node));
+ this_node->details->shutdown = TRUE;
+ }
+
+ if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) {
+ crm_info("%s is in standby mode", pe__node_name(this_node));
+ this_node->details->standby = TRUE;
+ }
+
+ if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) ||
+ ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) {
+ crm_info("%s is in maintenance mode", pe__node_name(this_node));
+ this_node->details->maintenance = TRUE;
+ }
+
+ resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
+ if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
+ if (pe__is_remote_node(this_node)
+ && !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ crm_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY
+ " attribute on Pacemaker Remote node %s"
+ " because fencing is disabled",
+ pe__node_name(this_node));
+ } else {
+ /* This is either a remote node with fencing enabled, or a guest
+ * node. We don't care whether fencing is enabled when fencing guest
+ * nodes, because they are "fenced" by recovering their containing
+ * resource.
+ */
+ crm_info("%s has resource discovery disabled",
+ pe__node_name(this_node));
+ this_node->details->rsc_discovery_enabled = FALSE;
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack a cluster node's transient attributes
+ *
+ * \param[in] state CIB node state XML
+ * \param[in,out] node Cluster node whose attributes are being unpacked
+ * \param[in,out] data_set Cluster working set
+ */
+static void
+unpack_transient_attributes(const xmlNode *state, pe_node_t *node,
+ pe_working_set_t *data_set)
+{
+ const char *discovery = NULL;
+ const xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS,
+ FALSE);
+
+ add_node_attrs(attrs, node, TRUE, data_set);
+
+ if (crm_is_true(pe_node_attribute_raw(node, "standby"))) {
+ crm_info("%s is in standby mode", pe__node_name(node));
+ node->details->standby = TRUE;
+ }
+
+ if (crm_is_true(pe_node_attribute_raw(node, "maintenance"))) {
+ crm_info("%s is in maintenance mode", pe__node_name(node));
+ node->details->maintenance = TRUE;
+ }
+
+ discovery = pe_node_attribute_raw(node, XML_NODE_ATTR_RSC_DISCOVERY);
+ if ((discovery != NULL) && !crm_is_true(discovery)) {
+ crm_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY
+ " attribute for %s because disabling resource discovery "
+ "is not allowed for cluster nodes", pe__node_name(node));
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack a node state entry (first pass)
+ *
+ * Unpack one node state entry from status. This unpacks information from the
+ * node_state element itself and node attributes inside it, but not the
+ * resource history inside it. Multiple passes through the status are needed to
+ * fully unpack everything.
+ *
+ * \param[in] state CIB node state XML
+ * \param[in,out] data_set Cluster working set
+ */
+static void
+unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
+{
+ const char *id = NULL;
+ const char *uname = NULL;
+ pe_node_t *this_node = NULL;
+
+ id = crm_element_value(state, XML_ATTR_ID);
+ if (id == NULL) {
+ crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
+ XML_ATTR_ID);
+ return;
+ }
+
+ uname = crm_element_value(state, XML_ATTR_UNAME);
+ if (uname == NULL) {
+ crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
+ XML_ATTR_UNAME);
+ return;
+ }
+
+ this_node = pe_find_node_any(data_set->nodes, id, uname);
+ if (this_node == NULL) {
+ pcmk__config_warn("Ignoring recorded node state for '%s' because "
+ "it is no longer in the configuration", uname);
+ return;
+ }
+
+ if (pe__is_guest_or_remote_node(this_node)) {
+ /* We can't determine the online status of Pacemaker Remote nodes until
+ * after all resource history has been unpacked. In this first pass, we
+ * do need to mark whether the node has been fenced, as this plays a
+ * role during unpacking cluster node resource state.
+ */
+ pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_FENCED),
+ &(this_node->details->remote_was_fenced), 0);
+ return;
+ }
+
+ unpack_transient_attributes(state, this_node, data_set);
+
+ /* Provisionally mark this cluster node as clean. We have at least seen it
+ * in the current cluster's lifetime.
+ */
+ this_node->details->unclean = FALSE;
+ this_node->details->unseen = FALSE;
+
+ crm_trace("Determining online status of cluster node %s (id %s)",
+ pe__node_name(this_node), id);
+ determine_online_status(state, this_node, data_set);
+
+ if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)
+ && this_node->details->online
+ && (data_set->no_quorum_policy == no_quorum_suicide)) {
+ /* Everything else should flow from this automatically
+ * (at least until the scheduler becomes able to migrate off
+ * healthy resources)
+ */
+ pe_fence_node(data_set, this_node, "cluster does not have quorum",
+ FALSE);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack nodes' resource history as much as possible
+ *
+ * Unpack as many nodes' resource history as possible in one pass through the
+ * status. We need to process Pacemaker Remote nodes' connections/containers
+ * before unpacking their history; the connection/container history will be
+ * in another node's history, so it might take multiple passes to unpack
+ * everything.
+ *
+ * \param[in] status CIB XML status section
+ * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
+ * or EAGAIN if more unpacking remains to be done)
+ */
+static int
+unpack_node_history(const xmlNode *status, bool fence,
+ pe_working_set_t *data_set)
+{
+ int rc = pcmk_rc_ok;
+
+ // Loop through all node_state entries in CIB status
+ for (const xmlNode *state = first_named_child(status, XML_CIB_TAG_STATE);
+ state != NULL; state = crm_next_same_xml(state)) {
+
+ const char *id = ID(state);
+ const char *uname = crm_element_value(state, XML_ATTR_UNAME);
+ pe_node_t *this_node = NULL;
+
+ if ((id == NULL) || (uname == NULL)) {
+ // Warning already logged in first pass through status section
+ crm_trace("Not unpacking resource history from malformed "
+ XML_CIB_TAG_STATE " without id and/or uname");
+ continue;
+ }
+
+ this_node = pe_find_node_any(data_set->nodes, id, uname);
+ if (this_node == NULL) {
+ // Warning already logged in first pass through status section
+ crm_trace("Not unpacking resource history for node %s because "
+ "no longer in configuration", id);
+ continue;
+ }
+
+ if (this_node->details->unpacked) {
+ crm_trace("Not unpacking resource history for node %s because "
+ "already unpacked", id);
+ continue;
+ }
+
+ if (fence) {
+ // We're processing all remaining nodes
+
+ } else if (pe__is_guest_node(this_node)) {
+ /* We can unpack a guest node's history only after we've unpacked
+ * other resource history to the point that we know that the node's
+ * connection and containing resource are both up.
+ */
+ pe_resource_t *rsc = this_node->details->remote_rsc;
+
+ if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED)
+ || (rsc->container->role != RSC_ROLE_STARTED)) {
+ crm_trace("Not unpacking resource history for guest node %s "
+ "because container and connection are not known to "
+ "be up", id);
+ continue;
+ }
+
+ } else if (pe__is_remote_node(this_node)) {
+ /* We can unpack a remote node's history only after we've unpacked
+ * other resource history to the point that we know that the node's
+ * connection is up, with the exception of when shutdown locks are
+ * in use.
+ */
+ pe_resource_t *rsc = this_node->details->remote_rsc;
+
+ if ((rsc == NULL)
+ || (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)
+ && (rsc->role != RSC_ROLE_STARTED))) {
+ crm_trace("Not unpacking resource history for remote node %s "
+ "because connection is not known to be up", id);
+ continue;
+ }
+
+ /* If fencing and shutdown locks are disabled and we're not processing
+ * unseen nodes, then we don't want to unpack offline nodes until online
+ * nodes have been unpacked. This allows us to number active clone
+ * instances first.
+ */
+ } else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled
+ |pe_flag_shutdown_lock)
+ && !this_node->details->online) {
+ crm_trace("Not unpacking resource history for offline "
+ "cluster node %s", id);
+ continue;
+ }
+
+ if (pe__is_guest_or_remote_node(this_node)) {
+ determine_remote_online_status(data_set, this_node);
+ unpack_handle_remote_attrs(this_node, state, data_set);
+ }
+
+ crm_trace("Unpacking resource history for %snode %s",
+ (fence? "unseen " : ""), id);
+
+ this_node->details->unpacked = TRUE;
+ unpack_node_lrm(this_node, state, data_set);
+
+ rc = EAGAIN; // Other node histories might depend on this one
+ }
+ return rc;
+}
+
+/* remove nodes that are down, stopping */
+/* create positive rsc_to_node constraints between resources and the nodes they are running on */
+/* anything else? */
+gboolean
+unpack_status(xmlNode * status, pe_working_set_t * data_set)
+{
+ xmlNode *state = NULL;
+
+ crm_trace("Beginning unpack");
+
+ if (data_set->tickets == NULL) {
+ data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ }
+
+ for (state = pcmk__xe_first_child(status); state != NULL;
+ state = pcmk__xe_next(state)) {
+
+ if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) {
+ unpack_tickets_state((xmlNode *) state, data_set);
+
+ } else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
+ unpack_node_state(state, data_set);
+ }
+ }
+
+ while (unpack_node_history(status, FALSE, data_set) == EAGAIN) {
+ crm_trace("Another pass through node resource histories is needed");
+ }
+
+ // Now catch any nodes we didn't see
+ unpack_node_history(status,
+ pcmk_is_set(data_set->flags, pe_flag_stonith_enabled),
+ data_set);
+
+ /* Now that we know where resources are, we can schedule stops of containers
+ * with failed bundle connections
+ */
+ if (data_set->stop_needed != NULL) {
+ for (GList *item = data_set->stop_needed; item; item = item->next) {
+ pe_resource_t *container = item->data;
+ pe_node_t *node = pe__current_node(container);
+
+ if (node) {
+ stop_action(container, node, FALSE);
+ }
+ }
+ g_list_free(data_set->stop_needed);
+ data_set->stop_needed = NULL;
+ }
+
+ /* Now that we know status of all Pacemaker Remote connections and nodes,
+ * we can stop connections for node shutdowns, and check the online status
+ * of remote/guest nodes that didn't have any node history to unpack.
+ */
+ for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *this_node = gIter->data;
+
+ if (!pe__is_guest_or_remote_node(this_node)) {
+ continue;
+ }
+ if (this_node->details->shutdown
+ && (this_node->details->remote_rsc != NULL)) {
+ pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED,
+ "remote shutdown");
+ }
+ if (!this_node->details->unpacked) {
+ determine_remote_online_status(data_set, this_node);
+ }
+ }
+
+ return TRUE;
+}
+
+static gboolean
+determine_online_status_no_fencing(pe_working_set_t *data_set,
+ const xmlNode *node_state,
+ pe_node_t *this_node)
+{
+ gboolean online = FALSE;
+ const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
+ const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
+ const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
+ const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+
+ if (!crm_is_true(in_cluster)) {
+ crm_trace("Node is down: in_cluster=%s",
+ pcmk__s(in_cluster, "<null>"));
+
+ } else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) {
+ if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
+ online = TRUE;
+ } else {
+ crm_debug("Node is not ready to run resources: %s", join);
+ }
+
+ } else if (this_node->details->expected_up == FALSE) {
+ crm_trace("Controller is down: "
+ "in_cluster=%s is_peer=%s join=%s expected=%s",
+ pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
+
+ } else {
+ /* mark it unclean */
+ pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE);
+ crm_info("in_cluster=%s is_peer=%s join=%s expected=%s",
+ pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
+ }
+ return online;
+}
+
+static gboolean
+determine_online_status_fencing(pe_working_set_t *data_set,
+ const xmlNode *node_state, pe_node_t *this_node)
+{
+ gboolean online = FALSE;
+ gboolean do_terminate = FALSE;
+ bool crmd_online = FALSE;
+ const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
+ const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
+ const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
+ const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ const char *terminate = pe_node_attribute_raw(this_node, "terminate");
+
+/*
+ - XML_NODE_IN_CLUSTER ::= true|false
+ - XML_NODE_IS_PEER ::= online|offline
+ - XML_NODE_JOIN_STATE ::= member|down|pending|banned
+ - XML_NODE_EXPECTED ::= member|down
+*/
+
+ if (crm_is_true(terminate)) {
+ do_terminate = TRUE;
+
+ } else if (terminate != NULL && strlen(terminate) > 0) {
+ /* could be a time() value */
+ char t = terminate[0];
+
+ if (t != '0' && isdigit(t)) {
+ do_terminate = TRUE;
+ }
+ }
+
+ crm_trace("%s: in_cluster=%s is_peer=%s join=%s expected=%s term=%d",
+ pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
+ pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
+ pcmk__s(exp_state, "<null>"), do_terminate);
+
+ online = crm_is_true(in_cluster);
+ crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei);
+ if (exp_state == NULL) {
+ exp_state = CRMD_JOINSTATE_DOWN;
+ }
+
+ if (this_node->details->shutdown) {
+ crm_debug("%s is shutting down", pe__node_name(this_node));
+
+ /* Slightly different criteria since we can't shut down a dead peer */
+ online = crmd_online;
+
+ } else if (in_cluster == NULL) {
+ pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE);
+
+ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) {
+ pe_fence_node(data_set, this_node,
+ "peer failed Pacemaker membership criteria", FALSE);
+
+ } else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) {
+
+ if (crm_is_true(in_cluster) || crmd_online) {
+ crm_info("- %s is not ready to run resources",
+ pe__node_name(this_node));
+ this_node->details->standby = TRUE;
+ this_node->details->pending = TRUE;
+
+ } else {
+ crm_trace("%s is down or still coming up",
+ pe__node_name(this_node));
+ }
+
+ } else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei)
+ && crm_is_true(in_cluster) == FALSE && !crmd_online) {
+ crm_info("%s was just shot", pe__node_name(this_node));
+ online = FALSE;
+
+ } else if (crm_is_true(in_cluster) == FALSE) {
+ // Consider `priority-fencing-delay` for lost nodes
+ pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE);
+
+ } else if (!crmd_online) {
+ pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE);
+
+ /* Everything is running at this point, now check join state */
+ } else if (do_terminate) {
+ pe_fence_node(data_set, this_node, "termination was requested", FALSE);
+
+ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
+ crm_info("%s is active", pe__node_name(this_node));
+
+ } else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) {
+ crm_info("%s is not ready to run resources", pe__node_name(this_node));
+ this_node->details->standby = TRUE;
+ this_node->details->pending = TRUE;
+
+ } else {
+ pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE);
+ crm_warn("%s: in-cluster=%s is-peer=%s join=%s expected=%s term=%d shutdown=%d",
+ pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
+ pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
+ pcmk__s(exp_state, "<null>"), do_terminate,
+ this_node->details->shutdown);
+ }
+
+ return online;
+}
+
+static void
+determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node)
+{
+ pe_resource_t *rsc = this_node->details->remote_rsc;
+ pe_resource_t *container = NULL;
+ pe_node_t *host = NULL;
+
+ /* If there is a node state entry for a (former) Pacemaker Remote node
+ * but no resource creating that node, the node's connection resource will
+ * be NULL. Consider it an offline remote node in that case.
+ */
+ if (rsc == NULL) {
+ this_node->details->online = FALSE;
+ goto remote_online_done;
+ }
+
+ container = rsc->container;
+
+ if (container && pcmk__list_of_1(rsc->running_on)) {
+ host = rsc->running_on->data;
+ }
+
+ /* If the resource is currently started, mark it online. */
+ if (rsc->role == RSC_ROLE_STARTED) {
+ crm_trace("%s node %s presumed ONLINE because connection resource is started",
+ (container? "Guest" : "Remote"), this_node->details->id);
+ this_node->details->online = TRUE;
+ }
+
+ /* consider this node shutting down if transitioning start->stop */
+ if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
+ crm_trace("%s node %s shutting down because connection resource is stopping",
+ (container? "Guest" : "Remote"), this_node->details->id);
+ this_node->details->shutdown = TRUE;
+ }
+
+ /* Now check all the failure conditions. */
+ if(container && pcmk_is_set(container->flags, pe_rsc_failed)) {
+ crm_trace("Guest node %s UNCLEAN because guest resource failed",
+ this_node->details->id);
+ this_node->details->online = FALSE;
+ this_node->details->remote_requires_reset = TRUE;
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ crm_trace("%s node %s OFFLINE because connection resource failed",
+ (container? "Guest" : "Remote"), this_node->details->id);
+ this_node->details->online = FALSE;
+
+ } else if (rsc->role == RSC_ROLE_STOPPED
+ || (container && container->role == RSC_ROLE_STOPPED)) {
+
+ crm_trace("%s node %s OFFLINE because its resource is stopped",
+ (container? "Guest" : "Remote"), this_node->details->id);
+ this_node->details->online = FALSE;
+ this_node->details->remote_requires_reset = FALSE;
+
+ } else if (host && (host->details->online == FALSE)
+ && host->details->unclean) {
+ crm_trace("Guest node %s UNCLEAN because host is unclean",
+ this_node->details->id);
+ this_node->details->online = FALSE;
+ this_node->details->remote_requires_reset = TRUE;
+ }
+
+remote_online_done:
+ crm_trace("Remote node %s online=%s",
+ this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
+}
+
+static void
+determine_online_status(const xmlNode *node_state, pe_node_t *this_node,
+ pe_working_set_t *data_set)
+{
+ gboolean online = FALSE;
+ const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+
+ CRM_CHECK(this_node != NULL, return);
+
+ this_node->details->shutdown = FALSE;
+ this_node->details->expected_up = FALSE;
+
+ if (pe__shutdown_requested(this_node)) {
+ this_node->details->shutdown = TRUE;
+
+ } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
+ this_node->details->expected_up = TRUE;
+ }
+
+ if (this_node->details->type == node_ping) {
+ this_node->details->unclean = FALSE;
+ online = FALSE; /* As far as resource management is concerned,
+ * the node is safely offline.
+ * Anyone caught abusing this logic will be shot
+ */
+
+ } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ online = determine_online_status_no_fencing(data_set, node_state, this_node);
+
+ } else {
+ online = determine_online_status_fencing(data_set, node_state, this_node);
+ }
+
+ if (online) {
+ this_node->details->online = TRUE;
+
+ } else {
+ /* remove node from contention */
+ this_node->fixed = TRUE; // @COMPAT deprecated and unused
+ this_node->weight = -INFINITY;
+ }
+
+ if (online && this_node->details->shutdown) {
+ /* don't run resources here */
+ this_node->fixed = TRUE; // @COMPAT deprecated and unused
+ this_node->weight = -INFINITY;
+ }
+
+ if (this_node->details->type == node_ping) {
+ crm_info("%s is not a Pacemaker node", pe__node_name(this_node));
+
+ } else if (this_node->details->unclean) {
+ pe_proc_warn("%s is unclean", pe__node_name(this_node));
+
+ } else if (this_node->details->online) {
+ crm_info("%s is %s", pe__node_name(this_node),
+ this_node->details->shutdown ? "shutting down" :
+ this_node->details->pending ? "pending" :
+ this_node->details->standby ? "standby" :
+ this_node->details->maintenance ? "maintenance" : "online");
+
+ } else {
+ crm_trace("%s is offline", pe__node_name(this_node));
+ }
+}
+
+/*!
+ * \internal
+ * \brief Find the end of a resource's name, excluding any clone suffix
+ *
+ * \param[in] id Resource ID to check
+ *
+ * \return Pointer to last character of resource's base name
+ */
+const char *
+pe_base_name_end(const char *id)
+{
+ if (!pcmk__str_empty(id)) {
+ const char *end = id + strlen(id) - 1;
+
+ for (const char *s = end; s > id; --s) {
+ switch (*s) {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ break;
+ case ':':
+ return (s == end)? s : (s - 1);
+ default:
+ return end;
+ }
+ }
+ return end;
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \brief Get a resource name excluding any clone suffix
+ *
+ * \param[in] last_rsc_id Resource ID to check
+ *
+ * \return Pointer to newly allocated string with resource's base name
+ * \note It is the caller's responsibility to free() the result.
+ * This asserts on error, so callers can assume result is not NULL.
+ */
+char *
+clone_strip(const char *last_rsc_id)
+{
+ const char *end = pe_base_name_end(last_rsc_id);
+ char *basename = NULL;
+
+ CRM_ASSERT(end);
+ basename = strndup(last_rsc_id, end - last_rsc_id + 1);
+ CRM_ASSERT(basename);
+ return basename;
+}
+
+/*!
+ * \internal
+ * \brief Get the name of the first instance of a cloned resource
+ *
+ * \param[in] last_rsc_id Resource ID to check
+ *
+ * \return Pointer to newly allocated string with resource's base name plus :0
+ * \note It is the caller's responsibility to free() the result.
+ * This asserts on error, so callers can assume result is not NULL.
+ */
+char *
+clone_zero(const char *last_rsc_id)
+{
+ const char *end = pe_base_name_end(last_rsc_id);
+ size_t base_name_len = end - last_rsc_id + 1;
+ char *zero = NULL;
+
+ CRM_ASSERT(end);
+ zero = calloc(base_name_len + 3, sizeof(char));
+ CRM_ASSERT(zero);
+ memcpy(zero, last_rsc_id, base_name_len);
+ zero[base_name_len] = ':';
+ zero[base_name_len + 1] = '0';
+ return zero;
+}
+
+static pe_resource_t *
+create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
+ pe_working_set_t *data_set)
+{
+ pe_resource_t *rsc = NULL;
+ xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
+
+ copy_in_properties(xml_rsc, rsc_entry);
+ crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
+ crm_log_xml_debug(xml_rsc, "Orphan resource");
+
+ if (pe__unpack_resource(xml_rsc, &rsc, NULL, data_set) != pcmk_rc_ok) {
+ return NULL;
+ }
+
+ if (xml_contains_remote_node(xml_rsc)) {
+ pe_node_t *node;
+
+ crm_debug("Detected orphaned remote node %s", rsc_id);
+ node = pe_find_node(data_set->nodes, rsc_id);
+ if (node == NULL) {
+ node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
+ }
+ link_rsc2remotenode(data_set, rsc);
+
+ if (node) {
+ crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
+ node->details->shutdown = TRUE;
+ }
+ }
+
+ if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
+ /* This orphaned rsc needs to be mapped to a container. */
+ crm_trace("Detected orphaned container filler %s", rsc_id);
+ pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler);
+ }
+ pe__set_resource_flags(rsc, pe_rsc_orphan);
+ data_set->resources = g_list_append(data_set->resources, rsc);
+ return rsc;
+}
+
+/*!
+ * \internal
+ * \brief Create orphan instance for anonymous clone resource history
+ *
+ * \param[in,out] parent Clone resource that orphan will be added to
+ * \param[in] rsc_id Orphan's resource ID
+ * \param[in] node Where orphan is active (for logging only)
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Newly added orphaned instance of \p parent
+ */
+static pe_resource_t *
+create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
+ const pe_node_t *node, pe_working_set_t *data_set)
+{
+ pe_resource_t *top = pe__create_clone_child(parent, data_set);
+
+ // find_rsc() because we might be a cloned group
+ pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
+
+ pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s",
+ top->id, parent->id, rsc_id, pe__node_name(node));
+ return orphan;
+}
+
+/*!
+ * \internal
+ * \brief Check a node for an instance of an anonymous clone
+ *
+ * Return a child instance of the specified anonymous clone, in order of
+ * preference: (1) the instance running on the specified node, if any;
+ * (2) an inactive instance (i.e. within the total of clone-max instances);
+ * (3) a newly created orphan (i.e. clone-max instances are already active).
+ *
+ * \param[in,out] data_set Cluster information
+ * \param[in] node Node on which to check for instance
+ * \param[in,out] parent Clone to check
+ * \param[in] rsc_id Name of cloned resource in history (without instance)
+ */
+static pe_resource_t *
+find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
+ pe_resource_t *parent, const char *rsc_id)
+{
+ GList *rIter = NULL;
+ pe_resource_t *rsc = NULL;
+ pe_resource_t *inactive_instance = NULL;
+ gboolean skip_inactive = FALSE;
+
+ CRM_ASSERT(parent != NULL);
+ CRM_ASSERT(pe_rsc_is_clone(parent));
+ CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique));
+
+ // Check for active (or partially active, for cloned groups) instance
+ pe_rsc_trace(parent, "Looking for %s on %s in %s",
+ rsc_id, pe__node_name(node), parent->id);
+ for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
+ GList *locations = NULL;
+ pe_resource_t *child = rIter->data;
+
+ /* Check whether this instance is already known to be active or pending
+ * anywhere, at this stage of unpacking. Because this function is called
+ * for a resource before the resource's individual operation history
+ * entries are unpacked, locations will generally not contain the
+ * desired node.
+ *
+ * However, there are three exceptions:
+ * (1) when child is a cloned group and we have already unpacked the
+ * history of another member of the group on the same node;
+ * (2) when we've already unpacked the history of another numbered
+ * instance on the same node (which can happen if globally-unique
+ * was flipped from true to false); and
+ * (3) when we re-run calculations on the same data set as part of a
+ * simulation.
+ */
+ child->fns->location(child, &locations, 2);
+ if (locations) {
+ /* We should never associate the same numbered anonymous clone
+ * instance with multiple nodes, and clone instances can't migrate,
+ * so there must be only one location, regardless of history.
+ */
+ CRM_LOG_ASSERT(locations->next == NULL);
+
+ if (((pe_node_t *)locations->data)->details == node->details) {
+ /* This child instance is active on the requested node, so check
+ * for a corresponding configured resource. We use find_rsc()
+ * instead of child because child may be a cloned group, and we
+ * need the particular member corresponding to rsc_id.
+ *
+ * If the history entry is orphaned, rsc will be NULL.
+ */
+ rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
+ if (rsc) {
+ /* If there are multiple instance history entries for an
+ * anonymous clone in a single node's history (which can
+ * happen if globally-unique is switched from true to
+ * false), we want to consider the instances beyond the
+ * first as orphans, even if there are inactive instance
+ * numbers available.
+ */
+ if (rsc->running_on) {
+ crm_notice("Active (now-)anonymous clone %s has "
+ "multiple (orphan) instance histories on %s",
+ parent->id, pe__node_name(node));
+ skip_inactive = TRUE;
+ rsc = NULL;
+ } else {
+ pe_rsc_trace(parent, "Resource %s, active", rsc->id);
+ }
+ }
+ }
+ g_list_free(locations);
+
+ } else {
+ pe_rsc_trace(parent, "Resource %s, skip inactive", child->id);
+ if (!skip_inactive && !inactive_instance
+ && !pcmk_is_set(child->flags, pe_rsc_block)) {
+ // Remember one inactive instance in case we don't find active
+ inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
+ pe_find_clone);
+
+ /* ... but don't use it if it was already associated with a
+ * pending action on another node
+ */
+ if (inactive_instance && inactive_instance->pending_node
+ && (inactive_instance->pending_node->details != node->details)) {
+ inactive_instance = NULL;
+ }
+ }
+ }
+ }
+
+ if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
+ pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id);
+ rsc = inactive_instance;
+ }
+
+ /* If the resource has "requires" set to "quorum" or "nothing", and we don't
+ * have a clone instance for every node, we don't want to consume a valid
+ * instance number for unclean nodes. Such instances may appear to be active
+ * according to the history, but should be considered inactive, so we can
+ * start an instance elsewhere. Treat such instances as orphans.
+ *
+ * An exception is instances running on guest nodes -- since guest node
+ * "fencing" is actually just a resource stop, requires shouldn't apply.
+ *
+ * @TODO Ideally, we'd use an inactive instance number if it is not needed
+ * for any clean instances. However, we don't know that at this point.
+ */
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
+ && (!node->details->online || node->details->unclean)
+ && !pe__is_guest_node(node)
+ && !pe__is_universal_clone(parent, data_set)) {
+
+ rsc = NULL;
+ }
+
+ if (rsc == NULL) {
+ rsc = create_anonymous_orphan(parent, rsc_id, node, data_set);
+ pe_rsc_trace(parent, "Resource %s, orphan", rsc->id);
+ }
+ return rsc;
+}
+
+static pe_resource_t *
+unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
+ const char *rsc_id)
+{
+ pe_resource_t *rsc = NULL;
+ pe_resource_t *parent = NULL;
+
+ crm_trace("looking for %s", rsc_id);
+ rsc = pe_find_resource(data_set->resources, rsc_id);
+
+ if (rsc == NULL) {
+ /* If we didn't find the resource by its name in the operation history,
+ * check it again as a clone instance. Even when clone-max=0, we create
+ * a single :0 orphan to match against here.
+ */
+ char *clone0_id = clone_zero(rsc_id);
+ pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id);
+
+ if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) {
+ rsc = clone0;
+ parent = uber_parent(clone0);
+ crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
+ } else {
+ crm_trace("%s is not known as %s either (orphan)",
+ rsc_id, clone0_id);
+ }
+ free(clone0_id);
+
+ } else if (rsc->variant > pe_native) {
+ crm_trace("Resource history for %s is orphaned because it is no longer primitive",
+ rsc_id);
+ return NULL;
+
+ } else {
+ parent = uber_parent(rsc);
+ }
+
+ if (pe_rsc_is_anon_clone(parent)) {
+
+ if (pe_rsc_is_bundled(parent)) {
+ rsc = pe__find_bundle_replica(parent->parent, node);
+ } else {
+ char *base = clone_strip(rsc_id);
+
+ rsc = find_anonymous_clone(data_set, node, parent, base);
+ free(base);
+ CRM_ASSERT(rsc != NULL);
+ }
+ }
+
+ if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_casei)
+ && !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_casei)) {
+
+ pcmk__str_update(&rsc->clone_name, rsc_id);
+ pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
+ rsc_id, pe__node_name(node), rsc->id,
+ (pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : ""));
+ }
+ return rsc;
+}
+
+static pe_resource_t *
+process_orphan_resource(const xmlNode *rsc_entry, const pe_node_t *node,
+ pe_working_set_t *data_set)
+{
+ pe_resource_t *rsc = NULL;
+ const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
+
+ crm_debug("Detected orphan resource %s on %s", rsc_id, pe__node_name(node));
+ rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
+ if (rsc == NULL) {
+ return NULL;
+ }
+
+ if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
+ pe__clear_resource_flags(rsc, pe_rsc_managed);
+
+ } else {
+ CRM_CHECK(rsc != NULL, return NULL);
+ pe_rsc_trace(rsc, "Added orphan %s", rsc->id);
+ resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set);
+ }
+ return rsc;
+}
+
+static void
+process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
+ enum action_fail_response on_fail)
+{
+ pe_node_t *tmpnode = NULL;
+ char *reason = NULL;
+ enum action_fail_response save_on_fail = action_fail_ignore;
+
+ CRM_ASSERT(rsc);
+ pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
+ rsc->id, role2text(rsc->role), pe__node_name(node),
+ fail2text(on_fail));
+
+ /* process current state */
+ if (rsc->role != RSC_ROLE_UNKNOWN) {
+ pe_resource_t *iter = rsc;
+
+ while (iter) {
+ if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
+ pe_node_t *n = pe__copy_node(node);
+
+ pe_rsc_trace(rsc, "%s%s%s known on %s",
+ rsc->id,
+ ((rsc->clone_name == NULL)? "" : " also known as "),
+ ((rsc->clone_name == NULL)? "" : rsc->clone_name),
+ pe__node_name(n));
+ g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
+ }
+ if (pcmk_is_set(iter->flags, pe_rsc_unique)) {
+ break;
+ }
+ iter = iter->parent;
+ }
+ }
+
+ /* If a managed resource is believed to be running, but node is down ... */
+ if (rsc->role > RSC_ROLE_STOPPED
+ && node->details->online == FALSE
+ && node->details->maintenance == FALSE
+ && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+
+ gboolean should_fence = FALSE;
+
+ /* If this is a guest node, fence it (regardless of whether fencing is
+ * enabled, because guest node fencing is done by recovery of the
+ * container resource rather than by the fencer). Mark the resource
+ * we're processing as failed. When the guest comes back up, its
+ * operation history in the CIB will be cleared, freeing the affected
+ * resource to run again once we are sure we know its state.
+ */
+ if (pe__is_guest_node(node)) {
+ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ should_fence = TRUE;
+
+ } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ if (pe__is_remote_node(node) && node->details->remote_rsc
+ && !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
+
+ /* Setting unseen means that fencing of the remote node will
+ * occur only if the connection resource is not going to start
+ * somewhere. This allows connection resources on a failed
+ * cluster node to move to another node without requiring the
+ * remote nodes to be fenced as well.
+ */
+ node->details->unseen = TRUE;
+ reason = crm_strdup_printf("%s is active there (fencing will be"
+ " revoked if remote connection can "
+ "be re-established elsewhere)",
+ rsc->id);
+ }
+ should_fence = TRUE;
+ }
+
+ if (should_fence) {
+ if (reason == NULL) {
+ reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
+ }
+ pe_fence_node(rsc->cluster, node, reason, FALSE);
+ }
+ free(reason);
+ }
+
+ /* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
+ save_on_fail = on_fail;
+
+ if (node->details->unclean) {
+ /* No extra processing needed
+ * Also allows resources to be started again after a node is shot
+ */
+ on_fail = action_fail_ignore;
+ }
+
+ switch (on_fail) {
+ case action_fail_ignore:
+ /* nothing to do */
+ break;
+
+ case action_fail_demote:
+ pe__set_resource_flags(rsc, pe_rsc_failed);
+ demote_action(rsc, node, FALSE);
+ break;
+
+ case action_fail_fence:
+ /* treat it as if it is still running
+ * but also mark the node as unclean
+ */
+ reason = crm_strdup_printf("%s failed there", rsc->id);
+ pe_fence_node(rsc->cluster, node, reason, FALSE);
+ free(reason);
+ break;
+
+ case action_fail_standby:
+ node->details->standby = TRUE;
+ node->details->standby_onfail = TRUE;
+ break;
+
+ case action_fail_block:
+ /* is_managed == FALSE will prevent any
+ * actions being sent for the resource
+ */
+ pe__clear_resource_flags(rsc, pe_rsc_managed);
+ pe__set_resource_flags(rsc, pe_rsc_block);
+ break;
+
+ case action_fail_migrate:
+ /* make sure it comes up somewhere else
+ * or not at all
+ */
+ resource_location(rsc, node, -INFINITY, "__action_migration_auto__",
+ rsc->cluster);
+ break;
+
+ case action_fail_stop:
+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop");
+ break;
+
+ case action_fail_recover:
+ if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
+ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ stop_action(rsc, node, FALSE);
+ }
+ break;
+
+ case action_fail_restart_container:
+ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ if (rsc->container && pe_rsc_is_bundled(rsc)) {
+ /* A bundle's remote connection can run on a different node than
+ * the bundle's container. We don't necessarily know where the
+ * container is running yet, so remember it and add a stop
+ * action for it later.
+ */
+ rsc->cluster->stop_needed =
+ g_list_prepend(rsc->cluster->stop_needed, rsc->container);
+ } else if (rsc->container) {
+ stop_action(rsc->container, node, FALSE);
+ } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
+ stop_action(rsc, node, FALSE);
+ }
+ break;
+
+ case action_fail_reset_remote:
+ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ tmpnode = NULL;
+ if (rsc->is_remote_node) {
+ tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
+ }
+ if (tmpnode &&
+ pe__is_remote_node(tmpnode) &&
+ tmpnode->details->remote_was_fenced == 0) {
+
+ /* The remote connection resource failed in a way that
+ * should result in fencing the remote node.
+ */
+ pe_fence_node(rsc->cluster, tmpnode,
+ "remote connection is unrecoverable", FALSE);
+ }
+ }
+
+ /* require the stop action regardless if fencing is occurring or not. */
+ if (rsc->role > RSC_ROLE_STOPPED) {
+ stop_action(rsc, node, FALSE);
+ }
+
+ /* if reconnect delay is in use, prevent the connection from exiting the
+ * "STOPPED" role until the failure is cleared by the delay timeout. */
+ if (rsc->remote_reconnect_ms) {
+ pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset");
+ }
+ break;
+ }
+
+ /* ensure a remote-node connection failure forces an unclean remote-node
+ * to be fenced. By setting unseen = FALSE, the remote-node failure will
+ * result in a fencing operation regardless if we're going to attempt to
+ * reconnect to the remote-node in this transition or not. */
+ if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
+ tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
+ if (tmpnode && tmpnode->details->unclean) {
+ tmpnode->details->unseen = FALSE;
+ }
+ }
+
+ if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
+ if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk__config_warn("Detected active orphan %s running on %s",
+ rsc->id, pe__node_name(node));
+ } else {
+ pcmk__config_warn("Resource '%s' must be stopped manually on "
+ "%s because cluster is configured not to "
+ "stop active orphans",
+ rsc->id, pe__node_name(node));
+ }
+ }
+
+ native_add_running(rsc, node, rsc->cluster,
+ (save_on_fail != action_fail_ignore));
+ switch (on_fail) {
+ case action_fail_ignore:
+ break;
+ case action_fail_demote:
+ case action_fail_block:
+ pe__set_resource_flags(rsc, pe_rsc_failed);
+ break;
+ default:
+ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ break;
+ }
+
+ } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) {
+ /* Only do this for older status sections that included instance numbers
+ * Otherwise stopped instances will appear as orphans
+ */
+ pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id);
+ free(rsc->clone_name);
+ rsc->clone_name = NULL;
+
+ } else {
+ GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP,
+ FALSE);
+ GList *gIter = possible_matches;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_action_t *stop = (pe_action_t *) gIter->data;
+
+ pe__set_action_flags(stop, pe_action_optional);
+ }
+
+ g_list_free(possible_matches);
+ }
+
+ /* A successful stop after migrate_to on the migration source doesn't make
+ * the partially migrated resource stopped on the migration target.
+ */
+ if (rsc->role == RSC_ROLE_STOPPED
+ && rsc->partial_migration_source
+ && rsc->partial_migration_source->details == node->details
+ && rsc->partial_migration_target
+ && rsc->running_on) {
+
+ rsc->role = RSC_ROLE_STARTED;
+ }
+}
+
+/* create active recurring operations as optional */
+static void
+process_recurring(pe_node_t * node, pe_resource_t * rsc,
+ int start_index, int stop_index,
+ GList *sorted_op_list, pe_working_set_t * data_set)
+{
+ int counter = -1;
+ const char *task = NULL;
+ const char *status = NULL;
+ GList *gIter = sorted_op_list;
+
+ CRM_ASSERT(rsc);
+ pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index);
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ xmlNode *rsc_op = (xmlNode *) gIter->data;
+
+ guint interval_ms = 0;
+ char *key = NULL;
+ const char *id = ID(rsc_op);
+
+ counter++;
+
+ if (node->details->online == FALSE) {
+ pe_rsc_trace(rsc, "Skipping %s on %s: node is offline",
+ rsc->id, pe__node_name(node));
+ break;
+
+ /* Need to check if there's a monitor for role="Stopped" */
+ } else if (start_index < stop_index && counter <= stop_index) {
+ pe_rsc_trace(rsc, "Skipping %s on %s: resource is not active",
+ id, pe__node_name(node));
+ continue;
+
+ } else if (counter < start_index) {
+ pe_rsc_trace(rsc, "Skipping %s on %s: old %d",
+ id, pe__node_name(node), counter);
+ continue;
+ }
+
+ crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
+ if (interval_ms == 0) {
+ pe_rsc_trace(rsc, "Skipping %s on %s: non-recurring",
+ id, pe__node_name(node));
+ continue;
+ }
+
+ status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
+ if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
+ pe_rsc_trace(rsc, "Skipping %s on %s: status",
+ id, pe__node_name(node));
+ continue;
+ }
+ task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
+ /* create the action */
+ key = pcmk__op_key(rsc->id, task, interval_ms);
+ pe_rsc_trace(rsc, "Creating %s on %s", key, pe__node_name(node));
+ custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
+ }
+}
+
+void
+calculate_active_ops(const GList *sorted_op_list, int *start_index,
+ int *stop_index)
+{
+ int counter = -1;
+ int implied_monitor_start = -1;
+ int implied_clone_start = -1;
+ const char *task = NULL;
+ const char *status = NULL;
+
+ *stop_index = -1;
+ *start_index = -1;
+
+ for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
+ const xmlNode *rsc_op = (const xmlNode *) iter->data;
+
+ counter++;
+
+ task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
+ status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
+
+ if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)
+ && pcmk__str_eq(status, "0", pcmk__str_casei)) {
+ *stop_index = counter;
+
+ } else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) {
+ *start_index = counter;
+
+ } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
+
+ if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
+ implied_monitor_start = counter;
+ }
+ } else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) {
+ implied_clone_start = counter;
+ }
+ }
+
+ if (*start_index == -1) {
+ if (implied_clone_start != -1) {
+ *start_index = implied_clone_start;
+ } else if (implied_monitor_start != -1) {
+ *start_index = implied_monitor_start;
+ }
+ }
+}
+
+// If resource history entry has shutdown lock, remember lock node and time
+static void
+unpack_shutdown_lock(const xmlNode *rsc_entry, pe_resource_t *rsc,
+ const pe_node_t *node, pe_working_set_t *data_set)
+{
+ time_t lock_time = 0; // When lock started (i.e. node shutdown time)
+
+ if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
+ &lock_time) == pcmk_ok) && (lock_time != 0)) {
+
+ if ((data_set->shutdown_lock > 0)
+ && (get_effective_time(data_set)
+ > (lock_time + data_set->shutdown_lock))) {
+ pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
+ rsc->id, pe__node_name(node));
+ pe__clear_resource_history(rsc, node, data_set);
+ } else {
+ /* @COMPAT I don't like breaking const signatures, but
+ * rsc->lock_node should really be const -- we just can't change it
+ * until the next API compatibility break.
+ */
+ rsc->lock_node = (pe_node_t *) node;
+ rsc->lock_time = lock_time;
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack one lrm_resource entry from a node's CIB status
+ *
+ * \param[in,out] node Node whose status is being unpacked
+ * \param[in] rsc_entry lrm_resource XML being unpacked
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return Resource corresponding to the entry, or NULL if no operation history
+ */
+static pe_resource_t *
+unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
+ pe_working_set_t *data_set)
+{
+ GList *gIter = NULL;
+ int stop_index = -1;
+ int start_index = -1;
+ enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
+
+ const char *rsc_id = ID(lrm_resource);
+
+ pe_resource_t *rsc = NULL;
+ GList *op_list = NULL;
+ GList *sorted_op_list = NULL;
+
+ xmlNode *rsc_op = NULL;
+ xmlNode *last_failure = NULL;
+
+ enum action_fail_response on_fail = action_fail_ignore;
+ enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
+
+ if (rsc_id == NULL) {
+ crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE
+ " entry without id");
+ return NULL;
+ }
+ crm_trace("Unpacking " XML_LRM_TAG_RESOURCE " for %s on %s",
+ rsc_id, pe__node_name(node));
+
+ // Build a list of individual lrm_rsc_op entries, so we can sort them
+ for (rsc_op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP);
+ rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) {
+
+ op_list = g_list_prepend(op_list, rsc_op);
+ }
+
+ if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ if (op_list == NULL) {
+ // If there are no operations, there is nothing to do
+ return NULL;
+ }
+ }
+
+ /* find the resource */
+ rsc = unpack_find_resource(data_set, node, rsc_id);
+ if (rsc == NULL) {
+ if (op_list == NULL) {
+ // If there are no operations, there is nothing to do
+ return NULL;
+ } else {
+ rsc = process_orphan_resource(lrm_resource, node, data_set);
+ }
+ }
+ CRM_ASSERT(rsc != NULL);
+
+ // Check whether the resource is "shutdown-locked" to this node
+ if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ unpack_shutdown_lock(lrm_resource, rsc, node, data_set);
+ }
+
+ /* process operations */
+ saved_role = rsc->role;
+ rsc->role = RSC_ROLE_UNKNOWN;
+ sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
+
+ for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
+ xmlNode *rsc_op = (xmlNode *) gIter->data;
+
+ unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail);
+ }
+
+ /* create active recurring operations as optional */
+ calculate_active_ops(sorted_op_list, &start_index, &stop_index);
+ process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
+
+ /* no need to free the contents */
+ g_list_free(sorted_op_list);
+
+ process_rsc_state(rsc, node, on_fail);
+
+ if (get_target_role(rsc, &req_role)) {
+ if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
+ pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE);
+
+ } else if (req_role > rsc->next_role) {
+ pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s"
+ " with requested next role %s",
+ rsc->id, role2text(rsc->next_role), role2text(req_role));
+ }
+ }
+
+ if (saved_role > rsc->role) {
+ rsc->role = saved_role;
+ }
+
+ return rsc;
+}
+
+static void
+handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
+ pe_working_set_t *data_set)
+{
+ for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list);
+ rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
+
+ pe_resource_t *rsc;
+ pe_resource_t *container;
+ const char *rsc_id;
+ const char *container_id;
+
+ if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_casei)) {
+ continue;
+ }
+
+ container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER);
+ rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
+ if (container_id == NULL || rsc_id == NULL) {
+ continue;
+ }
+
+ container = pe_find_resource(data_set->resources, container_id);
+ if (container == NULL) {
+ continue;
+ }
+
+ rsc = pe_find_resource(data_set->resources, rsc_id);
+ if (rsc == NULL ||
+ !pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) ||
+ rsc->container != NULL) {
+ continue;
+ }
+
+ pe_rsc_trace(rsc, "Mapped container of orphaned resource %s to %s",
+ rsc->id, container_id);
+ rsc->container = container;
+ container->fillers = g_list_append(container->fillers, rsc);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack one node's lrm status section
+ *
+ * \param[in,out] node Node whose status is being unpacked
+ * \param[in] xml CIB node state XML
+ * \param[in,out] data_set Cluster working set
+ */
+static void
+unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
+{
+ bool found_orphaned_container_filler = false;
+
+ // Drill down to lrm_resources section
+ xml = find_xml_node(xml, XML_CIB_TAG_LRM, FALSE);
+ if (xml == NULL) {
+ return;
+ }
+ xml = find_xml_node(xml, XML_LRM_TAG_RESOURCES, FALSE);
+ if (xml == NULL) {
+ return;
+ }
+
+ // Unpack each lrm_resource entry
+ for (const xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE);
+ rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
+
+ pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set);
+
+ if ((rsc != NULL)
+ && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
+ found_orphaned_container_filler = true;
+ }
+ }
+
+ /* Now that all resource state has been unpacked for this node, map any
+ * orphaned container fillers to their container resource.
+ */
+ if (found_orphaned_container_filler) {
+ handle_orphaned_container_fillers(xml, data_set);
+ }
+}
+
+static void
+set_active(pe_resource_t * rsc)
+{
+ const pe_resource_t *top = pe__const_top_resource(rsc, false);
+
+ if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) {
+ rsc->role = RSC_ROLE_UNPROMOTED;
+ } else {
+ rsc->role = RSC_ROLE_STARTED;
+ }
+}
+
+static void
+set_node_score(gpointer key, gpointer value, gpointer user_data)
+{
+ pe_node_t *node = value;
+ int *score = user_data;
+
+ node->weight = *score;
+}
+
+#define XPATH_NODE_STATE "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \
+ "/" XML_CIB_TAG_STATE
+#define SUB_XPATH_LRM_RESOURCE "/" XML_CIB_TAG_LRM \
+ "/" XML_LRM_TAG_RESOURCES \
+ "/" XML_LRM_TAG_RESOURCE
+#define SUB_XPATH_LRM_RSC_OP "/" XML_LRM_TAG_RSC_OP
+
+static xmlNode *
+find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
+ int target_rc, pe_working_set_t *data_set)
+{
+ GString *xpath = NULL;
+ xmlNode *xml = NULL;
+
+ CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL),
+ return NULL);
+
+ xpath = g_string_sized_new(256);
+ pcmk__g_strcat(xpath,
+ XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node, "']"
+ SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", resource, "']"
+ SUB_XPATH_LRM_RSC_OP "[@" XML_LRM_ATTR_TASK "='", op, "'",
+ NULL);
+
+ /* Need to check against transition_magic too? */
+ if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATE) == 0)) {
+ pcmk__g_strcat(xpath,
+ " and @" XML_LRM_ATTR_MIGRATE_TARGET "='", source, "']",
+ NULL);
+
+ } else if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATED) == 0)) {
+ pcmk__g_strcat(xpath,
+ " and @" XML_LRM_ATTR_MIGRATE_SOURCE "='", source, "']",
+ NULL);
+ } else {
+ g_string_append_c(xpath, ']');
+ }
+
+ xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ LOG_DEBUG);
+ g_string_free(xpath, TRUE);
+
+ if (xml && target_rc >= 0) {
+ int rc = PCMK_OCF_UNKNOWN_ERROR;
+ int status = PCMK_EXEC_ERROR;
+
+ crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc);
+ crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status);
+ if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) {
+ return NULL;
+ }
+ }
+ return xml;
+}
+
+static xmlNode *
+find_lrm_resource(const char *rsc_id, const char *node_name,
+ pe_working_set_t *data_set)
+{
+ GString *xpath = NULL;
+ xmlNode *xml = NULL;
+
+ CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL);
+
+ xpath = g_string_sized_new(256);
+ pcmk__g_strcat(xpath,
+ XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node_name, "']"
+ SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc_id, "']",
+ NULL);
+
+ xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ LOG_DEBUG);
+
+ g_string_free(xpath, TRUE);
+ return xml;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a resource has no completed action history on a node
+ *
+ * \param[in,out] rsc Resource to check
+ * \param[in] node_name Node to check
+ *
+ * \return true if \p rsc_id is unknown on \p node_name, otherwise false
+ */
+static bool
+unknown_on_node(pe_resource_t *rsc, const char *node_name)
+{
+ bool result = false;
+ xmlXPathObjectPtr search;
+ GString *xpath = g_string_sized_new(256);
+
+ pcmk__g_strcat(xpath,
+ XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node_name, "']"
+ SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc->id, "']"
+ SUB_XPATH_LRM_RSC_OP "[@" XML_LRM_ATTR_RC "!='193']",
+ NULL);
+ search = xpath_search(rsc->cluster->input, (const char *) xpath->str);
+ result = (numXpathResults(search) == 0);
+ freeXpathObject(search);
+ g_string_free(xpath, TRUE);
+ return result;
+}
+
+/*!
+ * \brief Check whether a probe/monitor indicating the resource was not running
+ * on a node happened after some event
+ *
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] xml_op Event that monitor is being compared to
+ * \param[in] same_node Whether the operations are on the same node
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return true if such a monitor happened after event, false otherwise
+ */
+static bool
+monitor_not_running_after(const char *rsc_id, const char *node_name,
+ const xmlNode *xml_op, bool same_node,
+ pe_working_set_t *data_set)
+{
+ /* Any probe/monitor operation on the node indicating it was not running
+ * there
+ */
+ xmlNode *monitor = find_lrm_op(rsc_id, CRMD_ACTION_STATUS, node_name,
+ NULL, PCMK_OCF_NOT_RUNNING, data_set);
+
+ return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
+}
+
+/*!
+ * \brief Check whether any non-monitor operation on a node happened after some
+ * event
+ *
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] xml_op Event that non-monitor is being compared to
+ * \param[in] same_node Whether the operations are on the same node
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return true if such a operation happened after event, false otherwise
+ */
+static bool
+non_monitor_after(const char *rsc_id, const char *node_name,
+ const xmlNode *xml_op, bool same_node,
+ pe_working_set_t *data_set)
+{
+ xmlNode *lrm_resource = NULL;
+
+ lrm_resource = find_lrm_resource(rsc_id, node_name, data_set);
+ if (lrm_resource == NULL) {
+ return false;
+ }
+
+ for (xmlNode *op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP);
+ op != NULL; op = crm_next_same_xml(op)) {
+ const char * task = NULL;
+
+ if (op == xml_op) {
+ continue;
+ }
+
+ task = crm_element_value(op, XML_LRM_ATTR_TASK);
+
+ if (pcmk__str_any_of(task, CRMD_ACTION_START, CRMD_ACTION_STOP,
+ CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)
+ && pe__is_newer_op(op, xml_op, same_node) > 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*!
+ * \brief Check whether the resource has newer state on a node after a migration
+ * attempt
+ *
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] migrate_to Any migrate_to event that is being compared to
+ * \param[in] migrate_from Any migrate_from event that is being compared to
+ * \param[in,out] data_set Cluster working set
+ *
+ * \return true if such a operation happened after event, false otherwise
+ */
+static bool
+newer_state_after_migrate(const char *rsc_id, const char *node_name,
+ const xmlNode *migrate_to,
+ const xmlNode *migrate_from,
+ pe_working_set_t *data_set)
+{
+ const xmlNode *xml_op = migrate_to;
+ const char *source = NULL;
+ const char *target = NULL;
+ bool same_node = false;
+
+ if (migrate_from) {
+ xml_op = migrate_from;
+ }
+
+ source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
+ target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
+
+ /* It's preferred to compare to the migrate event on the same node if
+ * existing, since call ids are more reliable.
+ */
+ if (pcmk__str_eq(node_name, target, pcmk__str_casei)) {
+ if (migrate_from) {
+ xml_op = migrate_from;
+ same_node = true;
+
+ } else {
+ xml_op = migrate_to;
+ }
+
+ } else if (pcmk__str_eq(node_name, source, pcmk__str_casei)) {
+ if (migrate_to) {
+ xml_op = migrate_to;
+ same_node = true;
+
+ } else {
+ xml_op = migrate_from;
+ }
+ }
+
+ /* If there's any newer non-monitor operation on the node, or any newer
+ * probe/monitor operation on the node indicating it was not running there,
+ * the migration events potentially no longer matter for the node.
+ */
+ return non_monitor_after(rsc_id, node_name, xml_op, same_node, data_set)
+ || monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
+ data_set);
+}
+
+/*!
+ * \internal
+ * \brief Parse migration source and target node names from history entry
+ *
+ * \param[in] entry Resource history entry for a migration action
+ * \param[in] source_node If not NULL, source must match this node
+ * \param[in] target_node If not NULL, target must match this node
+ * \param[out] source_name Where to store migration source node name
+ * \param[out] target_name Where to store migration target node name
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
+ const pe_node_t *target_node,
+ const char **source_name, const char **target_name)
+{
+ *source_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_SOURCE);
+ *target_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_TARGET);
+ if ((*source_name == NULL) || (*target_name == NULL)) {
+ crm_err("Ignoring resource history entry %s without "
+ XML_LRM_ATTR_MIGRATE_SOURCE " and " XML_LRM_ATTR_MIGRATE_TARGET,
+ ID(entry));
+ return pcmk_rc_unpack_error;
+ }
+
+ if ((source_node != NULL)
+ && !pcmk__str_eq(*source_name, source_node->details->uname,
+ pcmk__str_casei|pcmk__str_null_matches)) {
+ crm_err("Ignoring resource history entry %s because "
+ XML_LRM_ATTR_MIGRATE_SOURCE "='%s' does not match %s",
+ ID(entry), *source_name, pe__node_name(source_node));
+ return pcmk_rc_unpack_error;
+ }
+
+ if ((target_node != NULL)
+ && !pcmk__str_eq(*target_name, target_node->details->uname,
+ pcmk__str_casei|pcmk__str_null_matches)) {
+ crm_err("Ignoring resource history entry %s because "
+ XML_LRM_ATTR_MIGRATE_TARGET "='%s' does not match %s",
+ ID(entry), *target_name, pe__node_name(target_node));
+ return pcmk_rc_unpack_error;
+ }
+
+ return pcmk_rc_ok;
+}
+
+/*
+ * \internal
+ * \brief Add a migration source to a resource's list of dangling migrations
+ *
+ * If the migrate_to and migrate_from actions in a live migration both
+ * succeeded, but there is no stop on the source, the migration is considered
+ * "dangling." Add the source to the resource's dangling migration list, which
+ * will be used to schedule a stop on the source without affecting the target.
+ *
+ * \param[in,out] rsc Resource involved in migration
+ * \param[in] node Migration source
+ */
+static void
+add_dangling_migration(pe_resource_t *rsc, const pe_node_t *node)
+{
+ pe_rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
+ rsc->id, pe__node_name(node));
+ rsc->role = RSC_ROLE_STOPPED;
+ rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
+ (gpointer) node);
+}
+
+/*!
+ * \internal
+ * \brief Update resource role etc. after a successful migrate_to action
+ *
+ * \param[in,out] history Parsed action result history
+ */
+static void
+unpack_migrate_to_success(struct action_history *history)
+{
+ /* A complete migration sequence is:
+ * 1. migrate_to on source node (which succeeded if we get to this function)
+ * 2. migrate_from on target node
+ * 3. stop on source node
+ *
+ * If no migrate_from has happened, the migration is considered to be
+ * "partial". If the migrate_from succeeded but no stop has happened, the
+ * migration is considered to be "dangling".
+ *
+ * If a successful migrate_to and stop have happened on the source node, we
+ * still need to check for a partial migration, due to scenarios (easier to
+ * produce with batch-limit=1) like:
+ *
+ * - A resource is migrating from node1 to node2, and a migrate_to is
+ * initiated for it on node1.
+ *
+ * - node2 goes into standby mode while the migrate_to is pending, which
+ * aborts the transition.
+ *
+ * - Upon completion of the migrate_to, a new transition schedules a stop
+ * on both nodes and a start on node1.
+ *
+ * - If the new transition is aborted for any reason while the resource is
+ * stopping on node1, the transition after that stop completes will see
+ * the migrate_to and stop on the source, but it's still a partial
+ * migration, and the resource must be stopped on node2 because it is
+ * potentially active there due to the migrate_to.
+ *
+ * We also need to take into account that either node's history may be
+ * cleared at any point in the migration process.
+ */
+ int from_rc = PCMK_OCF_OK;
+ int from_status = PCMK_EXEC_PENDING;
+ pe_node_t *target_node = NULL;
+ xmlNode *migrate_from = NULL;
+ const char *source = NULL;
+ const char *target = NULL;
+ bool source_newer_op = false;
+ bool target_newer_state = false;
+ bool active_on_target = false;
+
+ // Get source and target node names from XML
+ if (get_migration_node_names(history->xml, history->node, NULL, &source,
+ &target) != pcmk_rc_ok) {
+ return;
+ }
+
+ // Check for newer state on the source
+ source_newer_op = non_monitor_after(history->rsc->id, source, history->xml,
+ true, history->rsc->cluster);
+
+ // Check for a migrate_from action from this source on the target
+ migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED, target,
+ source, -1, history->rsc->cluster);
+ if (migrate_from != NULL) {
+ if (source_newer_op) {
+ /* There's a newer non-monitor operation on the source and a
+ * migrate_from on the target, so this migrate_to is irrelevant to
+ * the resource's state.
+ */
+ return;
+ }
+ crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
+ crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS,
+ &from_status);
+ }
+
+ /* If the resource has newer state on both the source and target after the
+ * migration events, this migrate_to is irrelevant to the resource's state.
+ */
+ target_newer_state = newer_state_after_migrate(history->rsc->id, target,
+ history->xml, migrate_from,
+ history->rsc->cluster);
+ if (source_newer_op && target_newer_state) {
+ return;
+ }
+
+ /* Check for dangling migration (migrate_from succeeded but stop not done).
+ * We know there's no stop because we already returned if the target has a
+ * migrate_from and the source has any newer non-monitor operation.
+ */
+ if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) {
+ add_dangling_migration(history->rsc, history->node);
+ return;
+ }
+
+ /* Without newer state, this migrate_to implies the resource is active.
+ * (Clones are not allowed to migrate, so role can't be promoted.)
+ */
+ history->rsc->role = RSC_ROLE_STARTED;
+
+ target_node = pe_find_node(history->rsc->cluster->nodes, target);
+ active_on_target = !target_newer_state && (target_node != NULL)
+ && target_node->details->online;
+
+ if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
+ if (active_on_target) {
+ native_add_running(history->rsc, target_node, history->rsc->cluster,
+ TRUE);
+ } else {
+ // Mark resource as failed, require recovery, and prevent migration
+ pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ }
+ return;
+ }
+
+ // The migrate_from is pending, complete but erased, or to be scheduled
+
+ /* If there is no history at all for the resource on an online target, then
+ * it was likely cleaned. Just return, and we'll schedule a probe. Once we
+ * have the probe result, it will be reflected in target_newer_state.
+ */
+ if ((target_node != NULL) && target_node->details->online
+ && unknown_on_node(history->rsc, target)) {
+ return;
+ }
+
+ if (active_on_target) {
+ pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
+
+ native_add_running(history->rsc, target_node, history->rsc->cluster,
+ FALSE);
+ if ((source_node != NULL) && source_node->details->online) {
+ /* This is a partial migration: the migrate_to completed
+ * successfully on the source, but the migrate_from has not
+ * completed. Remember the source and target; if the newly
+ * chosen target remains the same when we schedule actions
+ * later, we may continue with the migration.
+ */
+ history->rsc->partial_migration_target = target_node;
+ history->rsc->partial_migration_source = source_node;
+ }
+
+ } else if (!source_newer_op) {
+ // Mark resource as failed, require recovery, and prevent migration
+ pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Update resource role etc. after a failed migrate_to action
+ *
+ * \param[in,out] history Parsed action result history
+ */
+static void
+unpack_migrate_to_failure(struct action_history *history)
+{
+ xmlNode *target_migrate_from = NULL;
+ const char *source = NULL;
+ const char *target = NULL;
+
+ // Get source and target node names from XML
+ if (get_migration_node_names(history->xml, history->node, NULL, &source,
+ &target) != pcmk_rc_ok) {
+ return;
+ }
+
+ /* If a migration failed, we have to assume the resource is active. Clones
+ * are not allowed to migrate, so role can't be promoted.
+ */
+ history->rsc->role = RSC_ROLE_STARTED;
+
+ // Check for migrate_from on the target
+ target_migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED,
+ target, source, PCMK_OCF_OK,
+ history->rsc->cluster);
+
+ if (/* If the resource state is unknown on the target, it will likely be
+ * probed there.
+ * Don't just consider it running there. We will get back here anyway in
+ * case the probe detects it's running there.
+ */
+ !unknown_on_node(history->rsc, target)
+ /* If the resource has newer state on the target after the migration
+ * events, this migrate_to no longer matters for the target.
+ */
+ && !newer_state_after_migrate(history->rsc->id, target, history->xml,
+ target_migrate_from,
+ history->rsc->cluster)) {
+ /* The resource has no newer state on the target, so assume it's still
+ * active there.
+ * (if it is up).
+ */
+ pe_node_t *target_node = pe_find_node(history->rsc->cluster->nodes,
+ target);
+
+ if (target_node && target_node->details->online) {
+ native_add_running(history->rsc, target_node, history->rsc->cluster,
+ FALSE);
+ }
+
+ } else if (!non_monitor_after(history->rsc->id, source, history->xml, true,
+ history->rsc->cluster)) {
+ /* We know the resource has newer state on the target, but this
+ * migrate_to still matters for the source as long as there's no newer
+ * non-monitor operation there.
+ */
+
+ // Mark node as having dangling migration so we can force a stop later
+ history->rsc->dangling_migrations =
+ g_list_prepend(history->rsc->dangling_migrations,
+ (gpointer) history->node);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Update resource role etc. after a failed migrate_from action
+ *
+ * \param[in,out] history Parsed action result history
+ */
+static void
+unpack_migrate_from_failure(struct action_history *history)
+{
+ xmlNode *source_migrate_to = NULL;
+ const char *source = NULL;
+ const char *target = NULL;
+
+ // Get source and target node names from XML
+ if (get_migration_node_names(history->xml, NULL, history->node, &source,
+ &target) != pcmk_rc_ok) {
+ return;
+ }
+
+ /* If a migration failed, we have to assume the resource is active. Clones
+ * are not allowed to migrate, so role can't be promoted.
+ */
+ history->rsc->role = RSC_ROLE_STARTED;
+
+ // Check for a migrate_to on the source
+ source_migrate_to = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATE,
+ source, target, PCMK_OCF_OK,
+ history->rsc->cluster);
+
+ if (/* If the resource state is unknown on the source, it will likely be
+ * probed there.
+ * Don't just consider it running there. We will get back here anyway in
+ * case the probe detects it's running there.
+ */
+ !unknown_on_node(history->rsc, source)
+ /* If the resource has newer state on the source after the migration
+ * events, this migrate_from no longer matters for the source.
+ */
+ && !newer_state_after_migrate(history->rsc->id, source,
+ source_migrate_to, history->xml,
+ history->rsc->cluster)) {
+ /* The resource has no newer state on the source, so assume it's still
+ * active there (if it is up).
+ */
+ pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
+
+ if (source_node && source_node->details->online) {
+ native_add_running(history->rsc, source_node, history->rsc->cluster,
+ TRUE);
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Add an action to cluster's list of failed actions
+ *
+ * \param[in,out] history Parsed action result history
+ */
+static void
+record_failed_op(struct action_history *history)
+{
+ if (!(history->node->details->online)) {
+ return;
+ }
+
+ for (const xmlNode *xIter = history->rsc->cluster->failed->children;
+ xIter != NULL; xIter = xIter->next) {
+
+ const char *key = pe__xe_history_key(xIter);
+ const char *uname = crm_element_value(xIter, XML_ATTR_UNAME);
+
+ if (pcmk__str_eq(history->key, key, pcmk__str_none)
+ && pcmk__str_eq(uname, history->node->details->uname,
+ pcmk__str_casei)) {
+ crm_trace("Skipping duplicate entry %s on %s",
+ history->key, pe__node_name(history->node));
+ return;
+ }
+ }
+
+ crm_trace("Adding entry for %s on %s to failed action list",
+ history->key, pe__node_name(history->node));
+ crm_xml_add(history->xml, XML_ATTR_UNAME, history->node->details->uname);
+ crm_xml_add(history->xml, XML_LRM_ATTR_RSCID, history->rsc->id);
+ add_node_copy(history->rsc->cluster->failed, history->xml);
+}
+
+static char *
+last_change_str(const xmlNode *xml_op)
+{
+ time_t when;
+ char *result = NULL;
+
+ if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
+ &when) == pcmk_ok) {
+ char *when_s = pcmk__epoch2str(&when, 0);
+ const char *p = strchr(when_s, ' ');
+
+ // Skip day of week to make message shorter
+ if ((p != NULL) && (*(++p) != '\0')) {
+ result = strdup(p);
+ CRM_ASSERT(result != NULL);
+ }
+ free(when_s);
+ }
+
+ if (result == NULL) {
+ result = strdup("unknown time");
+ CRM_ASSERT(result != NULL);
+ }
+
+ return result;
+}
+
+/*!
+ * \internal
+ * \brief Compare two on-fail values
+ *
+ * \param[in] first One on-fail value to compare
+ * \param[in] second The other on-fail value to compare
+ *
+ * \return A negative number if second is more severe than first, zero if they
+ * are equal, or a positive number if first is more severe than second.
+ * \note This is only needed until the action_fail_response values can be
+ * renumbered at the next API compatibility break.
+ */
+static int
+cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
+{
+ switch (first) {
+ case action_fail_demote:
+ switch (second) {
+ case action_fail_ignore:
+ return 1;
+ case action_fail_demote:
+ return 0;
+ default:
+ return -1;
+ }
+ break;
+
+ case action_fail_reset_remote:
+ switch (second) {
+ case action_fail_ignore:
+ case action_fail_demote:
+ case action_fail_recover:
+ return 1;
+ case action_fail_reset_remote:
+ return 0;
+ default:
+ return -1;
+ }
+ break;
+
+ case action_fail_restart_container:
+ switch (second) {
+ case action_fail_ignore:
+ case action_fail_demote:
+ case action_fail_recover:
+ case action_fail_reset_remote:
+ return 1;
+ case action_fail_restart_container:
+ return 0;
+ default:
+ return -1;
+ }
+ break;
+
+ default:
+ break;
+ }
+ switch (second) {
+ case action_fail_demote:
+ return (first == action_fail_ignore)? -1 : 1;
+
+ case action_fail_reset_remote:
+ switch (first) {
+ case action_fail_ignore:
+ case action_fail_demote:
+ case action_fail_recover:
+ return -1;
+ default:
+ return 1;
+ }
+ break;
+
+ case action_fail_restart_container:
+ switch (first) {
+ case action_fail_ignore:
+ case action_fail_demote:
+ case action_fail_recover:
+ case action_fail_reset_remote:
+ return -1;
+ default:
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return first - second;
+}
+
+/*!
+ * \internal
+ * \brief Ban a resource (or its clone if an anonymous instance) from all nodes
+ *
+ * \param[in,out] rsc Resource to ban
+ */
+static void
+ban_from_all_nodes(pe_resource_t *rsc)
+{
+ int score = -INFINITY;
+ pe_resource_t *fail_rsc = rsc;
+
+ if (fail_rsc->parent != NULL) {
+ pe_resource_t *parent = uber_parent(fail_rsc);
+
+ if (pe_rsc_is_anon_clone(parent)) {
+ /* For anonymous clones, if an operation with on-fail=stop fails for
+ * any instance, the entire clone must stop.
+ */
+ fail_rsc = parent;
+ }
+ }
+
+ // Ban the resource from all nodes
+ crm_notice("%s will not be started under current conditions", fail_rsc->id);
+ if (fail_rsc->allowed_nodes != NULL) {
+ g_hash_table_destroy(fail_rsc->allowed_nodes);
+ }
+ fail_rsc->allowed_nodes = pe__node_list2table(rsc->cluster->nodes);
+ g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
+}
+
+/*!
+ * \internal
+ * \brief Update resource role, failure handling, etc., after a failed action
+ *
+ * \param[in,out] history Parsed action result history
+ * \param[out] last_failure Set this to action XML
+ * \param[in,out] on_fail What should be done about the result
+ */
+static void
+unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
+ enum action_fail_response *on_fail)
+{
+ bool is_probe = false;
+ pe_action_t *action = NULL;
+ char *last_change_s = NULL;
+
+ *last_failure = history->xml;
+
+ is_probe = pcmk_xe_is_probe(history->xml);
+ last_change_s = last_change_str(history->xml);
+
+ if (!pcmk_is_set(history->rsc->cluster->flags, pe_flag_symmetric_cluster)
+ && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
+ crm_trace("Unexpected result (%s%s%s) was recorded for "
+ "%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
+ services_ocf_exitcode_str(history->exit_status),
+ (pcmk__str_empty(history->exit_reason)? "" : ": "),
+ pcmk__s(history->exit_reason, ""),
+ (is_probe? "probe" : history->task), history->rsc->id,
+ pe__node_name(history->node), last_change_s,
+ history->exit_status, history->id);
+ } else {
+ crm_warn("Unexpected result (%s%s%s) was recorded for "
+ "%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
+ services_ocf_exitcode_str(history->exit_status),
+ (pcmk__str_empty(history->exit_reason)? "" : ": "),
+ pcmk__s(history->exit_reason, ""),
+ (is_probe? "probe" : history->task), history->rsc->id,
+ pe__node_name(history->node), last_change_s,
+ history->exit_status, history->id);
+
+ if (is_probe && (history->exit_status != PCMK_OCF_OK)
+ && (history->exit_status != PCMK_OCF_NOT_RUNNING)
+ && (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) {
+
+ /* A failed (not just unexpected) probe result could mean the user
+ * didn't know resources will be probed even where they can't run.
+ */
+ crm_notice("If it is not possible for %s to run on %s, see "
+ "the resource-discovery option for location constraints",
+ history->rsc->id, pe__node_name(history->node));
+ }
+
+ record_failed_op(history);
+ }
+
+ free(last_change_s);
+
+ action = custom_action(history->rsc, strdup(history->key), history->task,
+ NULL, TRUE, FALSE, history->rsc->cluster);
+ if (cmp_on_fail(*on_fail, action->on_fail) < 0) {
+ pe_rsc_trace(history->rsc, "on-fail %s -> %s for %s (%s)",
+ fail2text(*on_fail), fail2text(action->on_fail),
+ action->uuid, history->key);
+ *on_fail = action->on_fail;
+ }
+
+ if (strcmp(history->task, CRMD_ACTION_STOP) == 0) {
+ resource_location(history->rsc, history->node, -INFINITY,
+ "__stop_fail__", history->rsc->cluster);
+
+ } else if (strcmp(history->task, CRMD_ACTION_MIGRATE) == 0) {
+ unpack_migrate_to_failure(history);
+
+ } else if (strcmp(history->task, CRMD_ACTION_MIGRATED) == 0) {
+ unpack_migrate_from_failure(history);
+
+ } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
+ history->rsc->role = RSC_ROLE_PROMOTED;
+
+ } else if (strcmp(history->task, CRMD_ACTION_DEMOTE) == 0) {
+ if (action->on_fail == action_fail_block) {
+ history->rsc->role = RSC_ROLE_PROMOTED;
+ pe__set_next_role(history->rsc, RSC_ROLE_STOPPED,
+ "demote with on-fail=block");
+
+ } else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
+ history->rsc->role = RSC_ROLE_STOPPED;
+
+ } else {
+ /* Staying in the promoted role would put the scheduler and
+ * controller into a loop. Setting the role to unpromoted is not
+ * dangerous because the resource will be stopped as part of
+ * recovery, and any promotion will be ordered after that stop.
+ */
+ history->rsc->role = RSC_ROLE_UNPROMOTED;
+ }
+ }
+
+ if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
+ /* leave stopped */
+ pe_rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
+ history->rsc->role = RSC_ROLE_STOPPED;
+
+ } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ pe_rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
+ set_active(history->rsc);
+ }
+
+ pe_rsc_trace(history->rsc,
+ "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
+ history->rsc->id, role2text(history->rsc->role),
+ pcmk__btoa(history->node->details->unclean),
+ fail2text(action->on_fail), role2text(action->fail_role));
+
+ if ((action->fail_role != RSC_ROLE_STARTED)
+ && (history->rsc->next_role < action->fail_role)) {
+ pe__set_next_role(history->rsc, action->fail_role, "failure");
+ }
+
+ if (action->fail_role == RSC_ROLE_STOPPED) {
+ ban_from_all_nodes(history->rsc);
+ }
+
+ pe_free_action(action);
+}
+
+/*!
+ * \internal
+ * \brief Block a resource with a failed action if it cannot be recovered
+ *
+ * If resource action is a failed stop and fencing is not possible, mark the
+ * resource as unmanaged and blocked, since recovery cannot be done.
+ *
+ * \param[in,out] history Parsed action history entry
+ */
+static void
+block_if_unrecoverable(struct action_history *history)
+{
+ char *last_change_s = NULL;
+
+ if (strcmp(history->task, CRMD_ACTION_STOP) != 0) {
+ return; // All actions besides stop are always recoverable
+ }
+ if (pe_can_fence(history->node->details->data_set, history->node)) {
+ return; // Failed stops are recoverable via fencing
+ }
+
+ last_change_s = last_change_str(history->xml);
+ pe_proc_err("No further recovery can be attempted for %s "
+ "because %s on %s failed (%s%s%s) at %s "
+ CRM_XS " rc=%d id=%s",
+ history->rsc->id, history->task, pe__node_name(history->node),
+ services_ocf_exitcode_str(history->exit_status),
+ (pcmk__str_empty(history->exit_reason)? "" : ": "),
+ pcmk__s(history->exit_reason, ""),
+ last_change_s, history->exit_status, history->id);
+
+ free(last_change_s);
+
+ pe__clear_resource_flags(history->rsc, pe_rsc_managed);
+ pe__set_resource_flags(history->rsc, pe_rsc_block);
+}
+
+/*!
+ * \internal
+ * \brief Update action history's execution status and why
+ *
+ * \param[in,out] history Parsed action history entry
+ * \param[out] why Where to store reason for update
+ * \param[in] value New value
+ * \param[in] reason Description of why value was changed
+ */
+static inline void
+remap_because(struct action_history *history, const char **why, int value,
+ const char *reason)
+{
+ if (history->execution_status != value) {
+ history->execution_status = value;
+ *why = reason;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Remap informational monitor results and operation status
+ *
+ * For the monitor results, certain OCF codes are for providing extended information
+ * to the user about services that aren't yet failed but not entirely healthy either.
+ * These must be treated as the "normal" result by Pacemaker.
+ *
+ * For operation status, the action result can be used to determine an appropriate
+ * status for the purposes of responding to the action. The status provided by the
+ * executor is not directly usable since the executor does not know what was expected.
+ *
+ * \param[in,out] history Parsed action history entry
+ * \param[in,out] on_fail What should be done about the result
+ * \param[in] expired Whether result is expired
+ *
+ * \note If the result is remapped and the node is not shutting down or failed,
+ * the operation will be recorded in the data set's list of failed operations
+ * to highlight it for the user.
+ *
+ * \note This may update the resource's current and next role.
+ */
+static void
+remap_operation(struct action_history *history,
+ enum action_fail_response *on_fail, bool expired)
+{
+ bool is_probe = false;
+ int orig_exit_status = history->exit_status;
+ int orig_exec_status = history->execution_status;
+ const char *why = NULL;
+ const char *task = history->task;
+
+ // Remap degraded results to their successful counterparts
+ history->exit_status = pcmk__effective_rc(history->exit_status);
+ if (history->exit_status != orig_exit_status) {
+ why = "degraded result";
+ if (!expired && (!history->node->details->shutdown
+ || history->node->details->online)) {
+ record_failed_op(history);
+ }
+ }
+
+ if (!pe_rsc_is_bundled(history->rsc)
+ && pcmk_xe_mask_probe_failure(history->xml)
+ && ((history->execution_status != PCMK_EXEC_DONE)
+ || (history->exit_status != PCMK_OCF_NOT_RUNNING))) {
+ history->execution_status = PCMK_EXEC_DONE;
+ history->exit_status = PCMK_OCF_NOT_RUNNING;
+ why = "equivalent probe result";
+ }
+
+ /* If the executor reported an execution status of anything but done or
+ * error, consider that final. But for done or error, we know better whether
+ * it should be treated as a failure or not, because we know the expected
+ * result.
+ */
+ switch (history->execution_status) {
+ case PCMK_EXEC_DONE:
+ case PCMK_EXEC_ERROR:
+ break;
+
+ // These should be treated as node-fatal
+ case PCMK_EXEC_NO_FENCE_DEVICE:
+ case PCMK_EXEC_NO_SECRETS:
+ remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
+ "node-fatal error");
+ goto remap_done;
+
+ default:
+ goto remap_done;
+ }
+
+ is_probe = pcmk_xe_is_probe(history->xml);
+ if (is_probe) {
+ task = "probe";
+ }
+
+ if (history->expected_exit_status < 0) {
+ /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
+ * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
+ * expected exit status in the transition key, which (along with the
+ * similar case of a corrupted transition key in the CIB) will be
+ * reported to this function as -1. Pacemaker 2.0+ does not support
+ * rolling upgrades from those versions or processing of saved CIB files
+ * from those versions, so we do not need to care much about this case.
+ */
+ remap_because(history, &why, PCMK_EXEC_ERROR,
+ "obsolete history format");
+ crm_warn("Expected result not found for %s on %s "
+ "(corrupt or obsolete CIB?)",
+ history->key, pe__node_name(history->node));
+
+ } else if (history->exit_status == history->expected_exit_status) {
+ remap_because(history, &why, PCMK_EXEC_DONE, "expected result");
+
+ } else {
+ remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result");
+ pe_rsc_debug(history->rsc,
+ "%s on %s: expected %d (%s), got %d (%s%s%s)",
+ history->key, pe__node_name(history->node),
+ history->expected_exit_status,
+ services_ocf_exitcode_str(history->expected_exit_status),
+ history->exit_status,
+ services_ocf_exitcode_str(history->exit_status),
+ (pcmk__str_empty(history->exit_reason)? "" : ": "),
+ pcmk__s(history->exit_reason, ""));
+ }
+
+ switch (history->exit_status) {
+ case PCMK_OCF_OK:
+ if (is_probe
+ && (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) {
+ char *last_change_s = last_change_str(history->xml);
+
+ remap_because(history, &why, PCMK_EXEC_DONE, "probe");
+ pe_rsc_info(history->rsc, "Probe found %s active on %s at %s",
+ history->rsc->id, pe__node_name(history->node),
+ last_change_s);
+ free(last_change_s);
+ }
+ break;
+
+ case PCMK_OCF_NOT_RUNNING:
+ if (is_probe
+ || (history->expected_exit_status == history->exit_status)
+ || !pcmk_is_set(history->rsc->flags, pe_rsc_managed)) {
+
+ /* For probes, recurring monitors for the Stopped role, and
+ * unmanaged resources, "not running" is not considered a
+ * failure.
+ */
+ remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
+ history->rsc->role = RSC_ROLE_STOPPED;
+ *on_fail = action_fail_ignore;
+ pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ "not running");
+ }
+ break;
+
+ case PCMK_OCF_RUNNING_PROMOTED:
+ if (is_probe
+ && (history->exit_status != history->expected_exit_status)) {
+ char *last_change_s = last_change_str(history->xml);
+
+ remap_because(history, &why, PCMK_EXEC_DONE, "probe");
+ pe_rsc_info(history->rsc,
+ "Probe found %s active and promoted on %s at %s",
+ history->rsc->id, pe__node_name(history->node),
+ last_change_s);
+ free(last_change_s);
+ }
+ if (!expired
+ || (history->exit_status == history->expected_exit_status)) {
+ history->rsc->role = RSC_ROLE_PROMOTED;
+ }
+ break;
+
+ case PCMK_OCF_FAILED_PROMOTED:
+ if (!expired) {
+ history->rsc->role = RSC_ROLE_PROMOTED;
+ }
+ remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
+ break;
+
+ case PCMK_OCF_NOT_CONFIGURED:
+ remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status");
+ break;
+
+ case PCMK_OCF_UNIMPLEMENT_FEATURE:
+ {
+ guint interval_ms = 0;
+ crm_element_value_ms(history->xml, XML_LRM_ATTR_INTERVAL_MS,
+ &interval_ms);
+
+ if (interval_ms == 0) {
+ if (!expired) {
+ block_if_unrecoverable(history);
+ }
+ remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
+ "exit status");
+ } else {
+ remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED,
+ "exit status");
+ }
+ }
+ break;
+
+ case PCMK_OCF_NOT_INSTALLED:
+ case PCMK_OCF_INVALID_PARAM:
+ case PCMK_OCF_INSUFFICIENT_PRIV:
+ if (!expired) {
+ block_if_unrecoverable(history);
+ }
+ remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status");
+ break;
+
+ default:
+ if (history->execution_status == PCMK_EXEC_DONE) {
+ char *last_change_s = last_change_str(history->xml);
+
+ crm_info("Treating unknown exit status %d from %s of %s "
+ "on %s at %s as failure",
+ history->exit_status, task, history->rsc->id,
+ pe__node_name(history->node), last_change_s);
+ remap_because(history, &why, PCMK_EXEC_ERROR,
+ "unknown exit status");
+ free(last_change_s);
+ }
+ break;
+ }
+
+remap_done:
+ if (why != NULL) {
+ pe_rsc_trace(history->rsc,
+ "Remapped %s result from [%s: %s] to [%s: %s] "
+ "because of %s",
+ history->key, pcmk_exec_status_str(orig_exec_status),
+ crm_exit_str(orig_exit_status),
+ pcmk_exec_status_str(history->execution_status),
+ crm_exit_str(history->exit_status), why);
+ }
+}
+
+// return TRUE if start or monitor last failure but parameters changed
+static bool
+should_clear_for_param_change(const xmlNode *xml_op, const char *task,
+ pe_resource_t *rsc, pe_node_t *node)
+{
+ if (!strcmp(task, "start") || !strcmp(task, "monitor")) {
+
+ if (pe__bundle_needs_remote_name(rsc)) {
+ /* We haven't allocated resources yet, so we can't reliably
+ * substitute addr parameters for the REMOTE_CONTAINER_HACK.
+ * When that's needed, defer the check until later.
+ */
+ pe__add_param_check(xml_op, rsc, node, pe_check_last_failure,
+ rsc->cluster);
+
+ } else {
+ op_digest_cache_t *digest_data = NULL;
+
+ digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
+ rsc->cluster);
+ switch (digest_data->rc) {
+ case RSC_DIGEST_UNKNOWN:
+ crm_trace("Resource %s history entry %s on %s"
+ " has no digest to compare",
+ rsc->id, pe__xe_history_key(xml_op),
+ node->details->id);
+ break;
+ case RSC_DIGEST_MATCH:
+ break;
+ default:
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+}
+
+// Order action after fencing of remote node, given connection rsc
+static void
+order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn,
+ pe_working_set_t *data_set)
+{
+ pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id);
+
+ if (remote_node) {
+ pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
+ FALSE, data_set);
+
+ order_actions(fence, action, pe_order_implies_then);
+ }
+}
+
+static bool
+should_ignore_failure_timeout(const pe_resource_t *rsc, const char *task,
+ guint interval_ms, bool is_last_failure)
+{
+ /* Clearing failures of recurring monitors has special concerns. The
+ * executor reports only changes in the monitor result, so if the
+ * monitor is still active and still getting the same failure result,
+ * that will go undetected after the failure is cleared.
+ *
+ * Also, the operation history will have the time when the recurring
+ * monitor result changed to the given code, not the time when the
+ * result last happened.
+ *
+ * @TODO We probably should clear such failures only when the failure
+ * timeout has passed since the last occurrence of the failed result.
+ * However we don't record that information. We could maybe approximate
+ * that by clearing only if there is a more recent successful monitor or
+ * stop result, but we don't even have that information at this point
+ * since we are still unpacking the resource's operation history.
+ *
+ * This is especially important for remote connection resources with a
+ * reconnect interval, so in that case, we skip clearing failures
+ * if the remote node hasn't been fenced.
+ */
+ if (rsc->remote_reconnect_ms
+ && pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
+ && (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+
+ pe_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
+
+ if (remote_node && !remote_node->details->remote_was_fenced) {
+ if (is_last_failure) {
+ crm_info("Waiting to clear monitor failure for remote node %s"
+ " until fencing has occurred", rsc->id);
+ }
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+/*!
+ * \internal
+ * \brief Check operation age and schedule failure clearing when appropriate
+ *
+ * This function has two distinct purposes. The first is to check whether an
+ * operation history entry is expired (i.e. the resource has a failure timeout,
+ * the entry is older than the timeout, and the resource either has no fail
+ * count or its fail count is entirely older than the timeout). The second is to
+ * schedule fail count clearing when appropriate (i.e. the operation is expired
+ * and either the resource has an expired fail count or the operation is a
+ * last_failure for a remote connection resource with a reconnect interval,
+ * or the operation is a last_failure for a start or monitor operation and the
+ * resource's parameters have changed since the operation).
+ *
+ * \param[in,out] history Parsed action result history
+ *
+ * \return true if operation history entry is expired, otherwise false
+ */
+static bool
+check_operation_expiry(struct action_history *history)
+{
+ bool expired = false;
+ bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0");
+ time_t last_run = 0;
+ int unexpired_fail_count = 0;
+ const char *clear_reason = NULL;
+
+ if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) {
+ pe_rsc_trace(history->rsc,
+ "Resource history entry %s on %s is not expired: "
+ "Not Installed does not expire",
+ history->id, pe__node_name(history->node));
+ return false; // "Not installed" must always be cleared manually
+ }
+
+ if ((history->rsc->failure_timeout > 0)
+ && (crm_element_value_epoch(history->xml, XML_RSC_OP_LAST_CHANGE,
+ &last_run) == 0)) {
+
+ // Resource has a failure-timeout, and history entry has a timestamp
+
+ time_t now = get_effective_time(history->rsc->cluster);
+ time_t last_failure = 0;
+
+ // Is this particular operation history older than the failure timeout?
+ if ((now >= (last_run + history->rsc->failure_timeout))
+ && !should_ignore_failure_timeout(history->rsc, history->task,
+ history->interval_ms,
+ is_last_failure)) {
+ expired = true;
+ }
+
+ // Does the resource as a whole have an unexpired fail count?
+ unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
+ &last_failure, pe_fc_effective,
+ history->xml);
+
+ // Update scheduler recheck time according to *last* failure
+ crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds"
+ " last-failure@%lld",
+ history->id, (long long) last_run, (expired? "" : "not "),
+ (long long) now, unexpired_fail_count,
+ history->rsc->failure_timeout, (long long) last_failure);
+ last_failure += history->rsc->failure_timeout + 1;
+ if (unexpired_fail_count && (now < last_failure)) {
+ pe__update_recheck_time(last_failure, history->rsc->cluster);
+ }
+ }
+
+ if (expired) {
+ if (pe_get_failcount(history->node, history->rsc, NULL, pe_fc_default,
+ history->xml)) {
+ // There is a fail count ignoring timeout
+
+ if (unexpired_fail_count == 0) {
+ // There is no fail count considering timeout
+ clear_reason = "it expired";
+
+ } else {
+ /* This operation is old, but there is an unexpired fail count.
+ * In a properly functioning cluster, this should only be
+ * possible if this operation is not a failure (otherwise the
+ * fail count should be expired too), so this is really just a
+ * failsafe.
+ */
+ pe_rsc_trace(history->rsc,
+ "Resource history entry %s on %s is not expired: "
+ "Unexpired fail count",
+ history->id, pe__node_name(history->node));
+ expired = false;
+ }
+
+ } else if (is_last_failure
+ && (history->rsc->remote_reconnect_ms != 0)) {
+ /* Clear any expired last failure when reconnect interval is set,
+ * even if there is no fail count.
+ */
+ clear_reason = "reconnect interval is set";
+ }
+ }
+
+ if (!expired && is_last_failure
+ && should_clear_for_param_change(history->xml, history->task,
+ history->rsc, history->node)) {
+ clear_reason = "resource parameters have changed";
+ }
+
+ if (clear_reason != NULL) {
+ // Schedule clearing of the fail count
+ pe_action_t *clear_op = pe__clear_failcount(history->rsc, history->node,
+ clear_reason,
+ history->rsc->cluster);
+
+ if (pcmk_is_set(history->rsc->cluster->flags, pe_flag_stonith_enabled)
+ && (history->rsc->remote_reconnect_ms != 0)) {
+ /* If we're clearing a remote connection due to a reconnect
+ * interval, we want to wait until any scheduled fencing
+ * completes.
+ *
+ * We could limit this to remote_node->details->unclean, but at
+ * this point, that's always true (it won't be reliable until
+ * after unpack_node_history() is done).
+ */
+ crm_info("Clearing %s failure will wait until any scheduled "
+ "fencing of %s completes",
+ history->task, history->rsc->id);
+ order_after_remote_fencing(clear_op, history->rsc,
+ history->rsc->cluster);
+ }
+ }
+
+ if (expired && (history->interval_ms == 0)
+ && pcmk__str_eq(history->task, CRMD_ACTION_STATUS, pcmk__str_none)) {
+ switch (history->exit_status) {
+ case PCMK_OCF_OK:
+ case PCMK_OCF_NOT_RUNNING:
+ case PCMK_OCF_RUNNING_PROMOTED:
+ case PCMK_OCF_DEGRADED:
+ case PCMK_OCF_DEGRADED_PROMOTED:
+ // Don't expire probes that return these values
+ pe_rsc_trace(history->rsc,
+ "Resource history entry %s on %s is not expired: "
+ "Probe result",
+ history->id, pe__node_name(history->node));
+ expired = false;
+ break;
+ }
+ }
+
+ return expired;
+}
+
+int
+pe__target_rc_from_xml(const xmlNode *xml_op)
+{
+ int target_rc = 0;
+ const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY);
+
+ if (key == NULL) {
+ return -1;
+ }
+ decode_transition_key(key, NULL, NULL, NULL, &target_rc);
+ return target_rc;
+}
+
+/*!
+ * \internal
+ * \brief Get the failure handling for an action
+ *
+ * \param[in,out] history Parsed action history entry
+ *
+ * \return Failure handling appropriate to action
+ */
+static enum action_fail_response
+get_action_on_fail(struct action_history *history)
+{
+ enum action_fail_response result = action_fail_recover;
+ pe_action_t *action = custom_action(history->rsc, strdup(history->key),
+ history->task, NULL, TRUE, FALSE,
+ history->rsc->cluster);
+
+ result = action->on_fail;
+ pe_free_action(action);
+ return result;
+}
+
+/*!
+ * \internal
+ * \brief Update a resource's state for an action result
+ *
+ * \param[in,out] history Parsed action history entry
+ * \param[in] exit_status Exit status to base new state on
+ * \param[in] last_failure Resource's last_failure entry, if known
+ * \param[in,out] on_fail Resource's current failure handling
+ */
+static void
+update_resource_state(struct action_history *history, int exit_status,
+ const xmlNode *last_failure,
+ enum action_fail_response *on_fail)
+{
+ bool clear_past_failure = false;
+
+ if ((exit_status == PCMK_OCF_NOT_INSTALLED)
+ || (!pe_rsc_is_bundled(history->rsc)
+ && pcmk_xe_mask_probe_failure(history->xml))) {
+ history->rsc->role = RSC_ROLE_STOPPED;
+
+ } else if (exit_status == PCMK_OCF_NOT_RUNNING) {
+ clear_past_failure = true;
+
+ } else if (pcmk__str_eq(history->task, CRMD_ACTION_STATUS,
+ pcmk__str_none)) {
+ if ((last_failure != NULL)
+ && pcmk__str_eq(history->key, pe__xe_history_key(last_failure),
+ pcmk__str_none)) {
+ clear_past_failure = true;
+ }
+ if (history->rsc->role < RSC_ROLE_STARTED) {
+ set_active(history->rsc);
+ }
+
+ } else if (pcmk__str_eq(history->task, CRMD_ACTION_START, pcmk__str_none)) {
+ history->rsc->role = RSC_ROLE_STARTED;
+ clear_past_failure = true;
+
+ } else if (pcmk__str_eq(history->task, CRMD_ACTION_STOP, pcmk__str_none)) {
+ history->rsc->role = RSC_ROLE_STOPPED;
+ clear_past_failure = true;
+
+ } else if (pcmk__str_eq(history->task, CRMD_ACTION_PROMOTE,
+ pcmk__str_none)) {
+ history->rsc->role = RSC_ROLE_PROMOTED;
+ clear_past_failure = true;
+
+ } else if (pcmk__str_eq(history->task, CRMD_ACTION_DEMOTE,
+ pcmk__str_none)) {
+ if (*on_fail == action_fail_demote) {
+ // Demote clears an error only if on-fail=demote
+ clear_past_failure = true;
+ }
+ history->rsc->role = RSC_ROLE_UNPROMOTED;
+
+ } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATED,
+ pcmk__str_none)) {
+ history->rsc->role = RSC_ROLE_STARTED;
+ clear_past_failure = true;
+
+ } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATE,
+ pcmk__str_none)) {
+ unpack_migrate_to_success(history);
+
+ } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ pe_rsc_trace(history->rsc, "%s active on %s",
+ history->rsc->id, pe__node_name(history->node));
+ set_active(history->rsc);
+ }
+
+ if (!clear_past_failure) {
+ return;
+ }
+
+ switch (*on_fail) {
+ case action_fail_stop:
+ case action_fail_fence:
+ case action_fail_migrate:
+ case action_fail_standby:
+ pe_rsc_trace(history->rsc,
+ "%s (%s) is not cleared by a completed %s",
+ history->rsc->id, fail2text(*on_fail), history->task);
+ break;
+
+ case action_fail_block:
+ case action_fail_ignore:
+ case action_fail_demote:
+ case action_fail_recover:
+ case action_fail_restart_container:
+ *on_fail = action_fail_ignore;
+ pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ "clear past failures");
+ break;
+
+ case action_fail_reset_remote:
+ if (history->rsc->remote_reconnect_ms == 0) {
+ /* With no reconnect interval, the connection is allowed to
+ * start again after the remote node is fenced and
+ * completely stopped. (With a reconnect interval, we wait
+ * for the failure to be cleared entirely before attempting
+ * to reconnect.)
+ */
+ *on_fail = action_fail_ignore;
+ pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ "clear past failures and reset remote");
+ }
+ break;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given history entry matters for resource state
+ *
+ * \param[in] history Parsed action history entry
+ *
+ * \return true if action can affect resource state, otherwise false
+ */
+static inline bool
+can_affect_state(struct action_history *history)
+{
+#if 0
+ /* @COMPAT It might be better to parse only actions we know we're interested
+ * in, rather than exclude a couple we don't. However that would be a
+ * behavioral change that should be done at a major or minor series release.
+ * Currently, unknown operations can affect whether a resource is considered
+ * active and/or failed.
+ */
+ return pcmk__str_any_of(history->task, CRMD_ACTION_STATUS,
+ CRMD_ACTION_START, CRMD_ACTION_STOP,
+ CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE,
+ CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED,
+ "asyncmon", NULL);
+#else
+ return !pcmk__str_any_of(history->task, CRMD_ACTION_NOTIFY,
+ CRMD_ACTION_METADATA, NULL);
+#endif
+}
+
+/*!
+ * \internal
+ * \brief Unpack execution/exit status and exit reason from a history entry
+ *
+ * \param[in,out] history Action history entry to unpack
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+unpack_action_result(struct action_history *history)
+{
+ if ((crm_element_value_int(history->xml, XML_LRM_ATTR_OPSTATUS,
+ &(history->execution_status)) < 0)
+ || (history->execution_status < PCMK_EXEC_PENDING)
+ || (history->execution_status > PCMK_EXEC_MAX)
+ || (history->execution_status == PCMK_EXEC_CANCELLED)) {
+ crm_err("Ignoring resource history entry %s for %s on %s "
+ "with invalid " XML_LRM_ATTR_OPSTATUS " '%s'",
+ history->id, history->rsc->id, pe__node_name(history->node),
+ pcmk__s(crm_element_value(history->xml, XML_LRM_ATTR_OPSTATUS),
+ ""));
+ return pcmk_rc_unpack_error;
+ }
+ if ((crm_element_value_int(history->xml, XML_LRM_ATTR_RC,
+ &(history->exit_status)) < 0)
+ || (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) {
+#if 0
+ /* @COMPAT We should ignore malformed entries, but since that would
+ * change behavior, it should be done at a major or minor series
+ * release.
+ */
+ crm_err("Ignoring resource history entry %s for %s on %s "
+ "with invalid " XML_LRM_ATTR_RC " '%s'",
+ history->id, history->rsc->id, pe__node_name(history->node),
+ pcmk__s(crm_element_value(history->xml, XML_LRM_ATTR_RC),
+ ""));
+ return pcmk_rc_unpack_error;
+#else
+ history->exit_status = CRM_EX_ERROR;
+#endif
+ }
+ history->exit_reason = crm_element_value(history->xml,
+ XML_LRM_ATTR_EXIT_REASON);
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Process an action history entry whose result expired
+ *
+ * \param[in,out] history Parsed action history entry
+ * \param[in] orig_exit_status Action exit status before remapping
+ *
+ * \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the
+ * entry needs no further processing)
+ */
+static int
+process_expired_result(struct action_history *history, int orig_exit_status)
+{
+ if (!pe_rsc_is_bundled(history->rsc)
+ && pcmk_xe_mask_probe_failure(history->xml)
+ && (orig_exit_status != history->expected_exit_status)) {
+
+ if (history->rsc->role <= RSC_ROLE_STOPPED) {
+ history->rsc->role = RSC_ROLE_UNKNOWN;
+ }
+ crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
+ "Masked failure expired",
+ history->id, history->rsc->id,
+ pe__node_name(history->node));
+ return pcmk_rc_ok;
+ }
+
+ if (history->exit_status == history->expected_exit_status) {
+ return pcmk_rc_undetermined; // Only failures expire
+ }
+
+ if (history->interval_ms == 0) {
+ crm_notice("Ignoring resource history entry %s for %s of %s on %s: "
+ "Expired failure",
+ history->id, history->task, history->rsc->id,
+ pe__node_name(history->node));
+ return pcmk_rc_ok;
+ }
+
+ if (history->node->details->online && !history->node->details->unclean) {
+ /* Reschedule the recurring action. schedule_cancel() won't work at
+ * this stage, so as a hacky workaround, forcibly change the restart
+ * digest so pcmk__check_action_config() does what we want later.
+ *
+ * @TODO We should skip this if there is a newer successful monitor.
+ * Also, this causes rescheduling only if the history entry
+ * has an op-digest (which the expire-non-blocked-failure
+ * scheduler regression test doesn't, but that may not be a
+ * realistic scenario in production).
+ */
+ crm_notice("Rescheduling %s-interval %s of %s on %s "
+ "after failure expired",
+ pcmk__readable_interval(history->interval_ms), history->task,
+ history->rsc->id, pe__node_name(history->node));
+ crm_xml_add(history->xml, XML_LRM_ATTR_RESTART_DIGEST,
+ "calculated-failure-timeout");
+ return pcmk_rc_ok;
+ }
+
+ return pcmk_rc_undetermined;
+}
+
+/*!
+ * \internal
+ * \brief Process a masked probe failure
+ *
+ * \param[in,out] history Parsed action history entry
+ * \param[in] orig_exit_status Action exit status before remapping
+ * \param[in] last_failure Resource's last_failure entry, if known
+ * \param[in,out] on_fail Resource's current failure handling
+ */
+static void
+mask_probe_failure(struct action_history *history, int orig_exit_status,
+ const xmlNode *last_failure,
+ enum action_fail_response *on_fail)
+{
+ pe_resource_t *ban_rsc = history->rsc;
+
+ if (!pcmk_is_set(history->rsc->flags, pe_rsc_unique)) {
+ ban_rsc = uber_parent(history->rsc);
+ }
+
+ crm_notice("Treating probe result '%s' for %s on %s as 'not running'",
+ services_ocf_exitcode_str(orig_exit_status), history->rsc->id,
+ pe__node_name(history->node));
+ update_resource_state(history, history->expected_exit_status, last_failure,
+ on_fail);
+ crm_xml_add(history->xml, XML_ATTR_UNAME, history->node->details->uname);
+
+ record_failed_op(history);
+ resource_location(ban_rsc, history->node, -INFINITY, "masked-probe-failure",
+ history->rsc->cluster);
+}
+
+/*!
+ * \internal Check whether a given failure is for a given pending action
+ *
+ * \param[in] history Parsed history entry for pending action
+ * \param[in] last_failure Resource's last_failure entry, if known
+ *
+ * \return true if \p last_failure is failure of pending action in \p history,
+ * otherwise false
+ * \note Both \p history and \p last_failure must come from the same
+ * lrm_resource block, as node and resource are assumed to be the same.
+ */
+static bool
+failure_is_newer(const struct action_history *history,
+ const xmlNode *last_failure)
+{
+ guint failure_interval_ms = 0U;
+ long long failure_change = 0LL;
+ long long this_change = 0LL;
+
+ if (last_failure == NULL) {
+ return false; // Resource has no last_failure entry
+ }
+
+ if (!pcmk__str_eq(history->task,
+ crm_element_value(last_failure, XML_LRM_ATTR_TASK),
+ pcmk__str_none)) {
+ return false; // last_failure is for different action
+ }
+
+ if ((crm_element_value_ms(last_failure, XML_LRM_ATTR_INTERVAL_MS,
+ &failure_interval_ms) != pcmk_ok)
+ || (history->interval_ms != failure_interval_ms)) {
+ return false; // last_failure is for action with different interval
+ }
+
+ if ((pcmk__scan_ll(crm_element_value(history->xml, XML_RSC_OP_LAST_CHANGE),
+ &this_change, 0LL) != pcmk_rc_ok)
+ || (pcmk__scan_ll(crm_element_value(last_failure,
+ XML_RSC_OP_LAST_CHANGE),
+ &failure_change, 0LL) != pcmk_rc_ok)
+ || (failure_change < this_change)) {
+ return false; // Failure is not known to be newer
+ }
+
+ return true;
+}
+
+/*!
+ * \internal
+ * \brief Update a resource's role etc. for a pending action
+ *
+ * \param[in,out] history Parsed history entry for pending action
+ * \param[in] last_failure Resource's last_failure entry, if known
+ */
+static void
+process_pending_action(struct action_history *history,
+ const xmlNode *last_failure)
+{
+ /* For recurring monitors, a failure is recorded only in RSC_last_failure_0,
+ * and there might be a RSC_monitor_INTERVAL entry with the last successful
+ * or pending result.
+ *
+ * If last_failure contains the failure of the pending recurring monitor
+ * we're processing here, and is newer, the action is no longer pending.
+ * (Pending results have call ID -1, which sorts last, so the last failure
+ * if any should be known.)
+ */
+ if (failure_is_newer(history, last_failure)) {
+ return;
+ }
+
+ if (strcmp(history->task, CRMD_ACTION_START) == 0) {
+ pe__set_resource_flags(history->rsc, pe_rsc_start_pending);
+ set_active(history->rsc);
+
+ } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
+ history->rsc->role = RSC_ROLE_PROMOTED;
+
+ } else if ((strcmp(history->task, CRMD_ACTION_MIGRATE) == 0)
+ && history->node->details->unclean) {
+ /* A migrate_to action is pending on a unclean source, so force a stop
+ * on the target.
+ */
+ const char *migrate_target = NULL;
+ pe_node_t *target = NULL;
+
+ migrate_target = crm_element_value(history->xml,
+ XML_LRM_ATTR_MIGRATE_TARGET);
+ target = pe_find_node(history->rsc->cluster->nodes, migrate_target);
+ if (target != NULL) {
+ stop_action(history->rsc, target, FALSE);
+ }
+ }
+
+ if (history->rsc->pending_task != NULL) {
+ /* There should never be multiple pending actions, but as a failsafe,
+ * just remember the first one processed for display purposes.
+ */
+ return;
+ }
+
+ if (pcmk_is_probe(history->task, history->interval_ms)) {
+ /* Pending probes are currently never displayed, even if pending
+ * operations are requested. If we ever want to change that,
+ * enable the below and the corresponding part of
+ * native.c:native_pending_task().
+ */
+#if 0
+ history->rsc->pending_task = strdup("probe");
+ history->rsc->pending_node = history->node;
+#endif
+ } else {
+ history->rsc->pending_task = strdup(history->task);
+ history->rsc->pending_node = history->node;
+ }
+}
+
+static void
+unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
+ xmlNode **last_failure, enum action_fail_response *on_fail)
+{
+ int old_rc = 0;
+ bool expired = false;
+ pe_resource_t *parent = rsc;
+ enum action_fail_response failure_strategy = action_fail_recover;
+
+ struct action_history history = {
+ .rsc = rsc,
+ .node = node,
+ .xml = xml_op,
+ .execution_status = PCMK_EXEC_UNKNOWN,
+ };
+
+ CRM_CHECK(rsc && node && xml_op, return);
+
+ history.id = ID(xml_op);
+ if (history.id == NULL) {
+ crm_err("Ignoring resource history entry for %s on %s without ID",
+ rsc->id, pe__node_name(node));
+ return;
+ }
+
+ // Task and interval
+ history.task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
+ if (history.task == NULL) {
+ crm_err("Ignoring resource history entry %s for %s on %s without "
+ XML_LRM_ATTR_TASK, history.id, rsc->id, pe__node_name(node));
+ return;
+ }
+ crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS,
+ &(history.interval_ms));
+ if (!can_affect_state(&history)) {
+ pe_rsc_trace(rsc,
+ "Ignoring resource history entry %s for %s on %s "
+ "with irrelevant action '%s'",
+ history.id, rsc->id, pe__node_name(node), history.task);
+ return;
+ }
+
+ if (unpack_action_result(&history) != pcmk_rc_ok) {
+ return; // Error already logged
+ }
+
+ history.expected_exit_status = pe__target_rc_from_xml(xml_op);
+ history.key = pe__xe_history_key(xml_op);
+ crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &(history.call_id));
+
+ pe_rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)",
+ history.id, history.task, history.call_id, pe__node_name(node),
+ pcmk_exec_status_str(history.execution_status),
+ crm_exit_str(history.exit_status));
+
+ if (node->details->unclean) {
+ pe_rsc_trace(rsc,
+ "%s is running on %s, which is unclean (further action "
+ "depends on value of stop's on-fail attribute)",
+ rsc->id, pe__node_name(node));
+ }
+
+ expired = check_operation_expiry(&history);
+ old_rc = history.exit_status;
+
+ remap_operation(&history, on_fail, expired);
+
+ if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) {
+ goto done;
+ }
+
+ if (!pe_rsc_is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
+ mask_probe_failure(&history, old_rc, *last_failure, on_fail);
+ goto done;
+ }
+
+ if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ parent = uber_parent(rsc);
+ }
+
+ switch (history.execution_status) {
+ case PCMK_EXEC_PENDING:
+ process_pending_action(&history, *last_failure);
+ goto done;
+
+ case PCMK_EXEC_DONE:
+ update_resource_state(&history, history.exit_status, *last_failure,
+ on_fail);
+ goto done;
+
+ case PCMK_EXEC_NOT_INSTALLED:
+ failure_strategy = get_action_on_fail(&history);
+ if (failure_strategy == action_fail_ignore) {
+ crm_warn("Cannot ignore failed %s of %s on %s: "
+ "Resource agent doesn't exist "
+ CRM_XS " status=%d rc=%d id=%s",
+ history.task, rsc->id, pe__node_name(node),
+ history.execution_status, history.exit_status,
+ history.id);
+ /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
+ *on_fail = action_fail_migrate;
+ }
+ resource_location(parent, node, -INFINITY, "hard-error",
+ rsc->cluster);
+ unpack_rsc_op_failure(&history, last_failure, on_fail);
+ goto done;
+
+ case PCMK_EXEC_NOT_CONNECTED:
+ if (pe__is_guest_or_remote_node(node)
+ && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) {
+ /* We should never get into a situation where a managed remote
+ * connection resource is considered OK but a resource action
+ * behind the connection gets a "not connected" status. But as a
+ * fail-safe in case a bug or unusual circumstances do lead to
+ * that, ensure the remote connection is considered failed.
+ */
+ pe__set_resource_flags(node->details->remote_rsc,
+ pe_rsc_failed|pe_rsc_stop);
+ }
+ break; // Not done, do error handling
+
+ case PCMK_EXEC_ERROR:
+ case PCMK_EXEC_ERROR_HARD:
+ case PCMK_EXEC_ERROR_FATAL:
+ case PCMK_EXEC_TIMEOUT:
+ case PCMK_EXEC_NOT_SUPPORTED:
+ case PCMK_EXEC_INVALID:
+ break; // Not done, do error handling
+
+ default: // No other value should be possible at this point
+ break;
+ }
+
+ failure_strategy = get_action_on_fail(&history);
+ if ((failure_strategy == action_fail_ignore)
+ || (failure_strategy == action_fail_restart_container
+ && (strcmp(history.task, CRMD_ACTION_STOP) == 0))) {
+
+ char *last_change_s = last_change_str(xml_op);
+
+ crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded "
+ CRM_XS " %s",
+ history.task, services_ocf_exitcode_str(history.exit_status),
+ (pcmk__str_empty(history.exit_reason)? "" : ": "),
+ pcmk__s(history.exit_reason, ""), rsc->id, pe__node_name(node),
+ last_change_s, history.id);
+ free(last_change_s);
+
+ update_resource_state(&history, history.expected_exit_status,
+ *last_failure, on_fail);
+ crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
+ pe__set_resource_flags(rsc, pe_rsc_failure_ignored);
+
+ record_failed_op(&history);
+
+ if ((failure_strategy == action_fail_restart_container)
+ && cmp_on_fail(*on_fail, action_fail_recover) <= 0) {
+ *on_fail = failure_strategy;
+ }
+
+ } else {
+ unpack_rsc_op_failure(&history, last_failure, on_fail);
+
+ if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
+ uint8_t log_level = LOG_ERR;
+
+ if (history.exit_status == PCMK_OCF_NOT_INSTALLED) {
+ log_level = LOG_NOTICE;
+ }
+ do_crm_log(log_level,
+ "Preventing %s from restarting on %s because "
+ "of hard failure (%s%s%s) " CRM_XS " %s",
+ parent->id, pe__node_name(node),
+ services_ocf_exitcode_str(history.exit_status),
+ (pcmk__str_empty(history.exit_reason)? "" : ": "),
+ pcmk__s(history.exit_reason, ""), history.id);
+ resource_location(parent, node, -INFINITY, "hard-error",
+ rsc->cluster);
+
+ } else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) {
+ crm_err("Preventing %s from restarting anywhere because "
+ "of fatal failure (%s%s%s) " CRM_XS " %s",
+ parent->id, services_ocf_exitcode_str(history.exit_status),
+ (pcmk__str_empty(history.exit_reason)? "" : ": "),
+ pcmk__s(history.exit_reason, ""), history.id);
+ resource_location(parent, NULL, -INFINITY, "fatal-error",
+ rsc->cluster);
+ }
+ }
+
+done:
+ pe_rsc_trace(rsc, "%s role on %s after %s is %s (next %s)",
+ rsc->id, pe__node_name(node), history.id,
+ role2text(rsc->role), role2text(rsc->next_role));
+}
+
+static void
+add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
+ pe_working_set_t *data_set)
+{
+ const char *cluster_name = NULL;
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = NULL,
+ .role = RSC_ROLE_UNKNOWN,
+ .now = data_set->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ g_hash_table_insert(node->details->attrs,
+ strdup(CRM_ATTR_UNAME), strdup(node->details->uname));
+
+ g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID),
+ strdup(node->details->id));
+ if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) {
+ data_set->dc_node = node;
+ node->details->is_dc = TRUE;
+ g_hash_table_insert(node->details->attrs,
+ strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE));
+ } else {
+ g_hash_table_insert(node->details->attrs,
+ strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE));
+ }
+
+ cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
+ if (cluster_name) {
+ g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME),
+ strdup(cluster_name));
+ }
+
+ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
+ node->details->attrs, NULL, overwrite, data_set);
+
+ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
+ node->details->utilization, NULL,
+ FALSE, data_set);
+
+ if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
+ const char *site_name = pe_node_attribute_raw(node, "site-name");
+
+ if (site_name) {
+ g_hash_table_insert(node->details->attrs,
+ strdup(CRM_ATTR_SITE_NAME),
+ strdup(site_name));
+
+ } else if (cluster_name) {
+ /* Default to cluster-name if unset */
+ g_hash_table_insert(node->details->attrs,
+ strdup(CRM_ATTR_SITE_NAME),
+ strdup(cluster_name));
+ }
+ }
+}
+
+static GList *
+extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
+{
+ int counter = -1;
+ int stop_index = -1;
+ int start_index = -1;
+
+ xmlNode *rsc_op = NULL;
+
+ GList *gIter = NULL;
+ GList *op_list = NULL;
+ GList *sorted_op_list = NULL;
+
+ /* extract operations */
+ op_list = NULL;
+ sorted_op_list = NULL;
+
+ for (rsc_op = pcmk__xe_first_child(rsc_entry);
+ rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
+
+ if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP,
+ pcmk__str_none)) {
+ crm_xml_add(rsc_op, "resource", rsc);
+ crm_xml_add(rsc_op, XML_ATTR_UNAME, node);
+ op_list = g_list_prepend(op_list, rsc_op);
+ }
+ }
+
+ if (op_list == NULL) {
+ /* if there are no operations, there is nothing to do */
+ return NULL;
+ }
+
+ sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
+
+ /* create active recurring operations as optional */
+ if (active_filter == FALSE) {
+ return sorted_op_list;
+ }
+
+ op_list = NULL;
+
+ calculate_active_ops(sorted_op_list, &start_index, &stop_index);
+
+ for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
+ xmlNode *rsc_op = (xmlNode *) gIter->data;
+
+ counter++;
+
+ if (start_index < stop_index) {
+ crm_trace("Skipping %s: not active", ID(rsc_entry));
+ break;
+
+ } else if (counter < start_index) {
+ crm_trace("Skipping %s: old", ID(rsc_op));
+ continue;
+ }
+ op_list = g_list_append(op_list, rsc_op);
+ }
+
+ g_list_free(sorted_op_list);
+ return op_list;
+}
+
+GList *
+find_operations(const char *rsc, const char *node, gboolean active_filter,
+ pe_working_set_t * data_set)
+{
+ GList *output = NULL;
+ GList *intermediate = NULL;
+
+ xmlNode *tmp = NULL;
+ xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
+
+ pe_node_t *this_node = NULL;
+
+ xmlNode *node_state = NULL;
+
+ for (node_state = pcmk__xe_first_child(status); node_state != NULL;
+ node_state = pcmk__xe_next(node_state)) {
+
+ if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
+ const char *uname = crm_element_value(node_state, XML_ATTR_UNAME);
+
+ if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
+ continue;
+ }
+
+ this_node = pe_find_node(data_set->nodes, uname);
+ if(this_node == NULL) {
+ CRM_LOG_ASSERT(this_node != NULL);
+ continue;
+
+ } else if (pe__is_guest_or_remote_node(this_node)) {
+ determine_remote_online_status(data_set, this_node);
+
+ } else {
+ determine_online_status(node_state, this_node, data_set);
+ }
+
+ if (this_node->details->online
+ || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ /* offline nodes run no resources...
+ * unless stonith is enabled in which case we need to
+ * make sure rsc start events happen after the stonith
+ */
+ xmlNode *lrm_rsc = NULL;
+
+ tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
+ tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE);
+
+ for (lrm_rsc = pcmk__xe_first_child(tmp); lrm_rsc != NULL;
+ lrm_rsc = pcmk__xe_next(lrm_rsc)) {
+
+ if (pcmk__str_eq((const char *)lrm_rsc->name,
+ XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
+
+ const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID);
+
+ if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
+ continue;
+ }
+
+ intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
+ output = g_list_concat(output, intermediate);
+ }
+ }
+ }
+ }
+ }
+
+ return output;
+}
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
new file mode 100644
index 0000000..ef0a092
--- /dev/null
+++ b/lib/pengine/utils.c
@@ -0,0 +1,938 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <glib.h>
+#include <stdbool.h>
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/pengine/rules.h>
+#include <crm/pengine/internal.h>
+
+#include "pe_status_private.h"
+
+extern bool pcmk__is_daemon;
+
+gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
+
+/*!
+ * \internal
+ * \brief Check whether we can fence a particular node
+ *
+ * \param[in] data_set Working set for cluster
+ * \param[in] node Name of node to check
+ *
+ * \return true if node can be fenced, false otherwise
+ */
+bool
+pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
+{
+ if (pe__is_guest_node(node)) {
+ /* Guest nodes are fenced by stopping their container resource. We can
+ * do that if the container's host is either online or fenceable.
+ */
+ pe_resource_t *rsc = node->details->remote_rsc->container;
+
+ for (GList *n = rsc->running_on; n != NULL; n = n->next) {
+ pe_node_t *container_node = n->data;
+
+ if (!container_node->details->online
+ && !pe_can_fence(data_set, container_node)) {
+ return false;
+ }
+ }
+ return true;
+
+ } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ return false; /* Turned off */
+
+ } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+ return false; /* No devices */
+
+ } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ return true;
+
+ } else if (data_set->no_quorum_policy == no_quorum_ignore) {
+ return true;
+
+ } else if(node == NULL) {
+ return false;
+
+ } else if(node->details->online) {
+ crm_notice("We can fence %s without quorum because they're in our membership",
+ pe__node_name(node));
+ return true;
+ }
+
+ crm_trace("Cannot fence %s", pe__node_name(node));
+ return false;
+}
+
+/*!
+ * \internal
+ * \brief Copy a node object
+ *
+ * \param[in] this_node Node object to copy
+ *
+ * \return Newly allocated shallow copy of this_node
+ * \note This function asserts on errors and is guaranteed to return non-NULL.
+ */
+pe_node_t *
+pe__copy_node(const pe_node_t *this_node)
+{
+ pe_node_t *new_node = NULL;
+
+ CRM_ASSERT(this_node != NULL);
+
+ new_node = calloc(1, sizeof(pe_node_t));
+ CRM_ASSERT(new_node != NULL);
+
+ new_node->rsc_discover_mode = this_node->rsc_discover_mode;
+ new_node->weight = this_node->weight;
+ new_node->fixed = this_node->fixed; // @COMPAT deprecated and unused
+ new_node->details = this_node->details;
+
+ return new_node;
+}
+
+/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
+void
+node_list_exclude(GHashTable * hash, GList *list, gboolean merge_scores)
+{
+ GHashTable *result = hash;
+ pe_node_t *other_node = NULL;
+ GList *gIter = list;
+
+ GHashTableIter iter;
+ pe_node_t *node = NULL;
+
+ g_hash_table_iter_init(&iter, hash);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
+
+ other_node = pe_find_node_id(list, node->details->id);
+ if (other_node == NULL) {
+ node->weight = -INFINITY;
+ crm_trace("Banning dependent from %s (no primary instance)",
+ pe__node_name(node));
+ } else if (merge_scores) {
+ node->weight = pcmk__add_scores(node->weight, other_node->weight);
+ crm_trace("Added primary's score %s to dependent's score for %s "
+ "(now %s)", pcmk_readable_score(other_node->weight),
+ pe__node_name(node), pcmk_readable_score(node->weight));
+ }
+ }
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node = (pe_node_t *) gIter->data;
+
+ other_node = pe_hash_table_lookup(result, node->details->id);
+
+ if (other_node == NULL) {
+ pe_node_t *new_node = pe__copy_node(node);
+
+ new_node->weight = -INFINITY;
+ g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Create a node hash table from a node list
+ *
+ * \param[in] list Node list
+ *
+ * \return Hash table equivalent of node list
+ */
+GHashTable *
+pe__node_list2table(const GList *list)
+{
+ GHashTable *result = NULL;
+
+ result = pcmk__strkey_table(NULL, free);
+ for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *new_node = pe__copy_node((const pe_node_t *) gIter->data);
+
+ g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
+ }
+ return result;
+}
+
+/*!
+ * \internal
+ * \brief Compare two nodes by name, with numeric portions sorted numerically
+ *
+ * Sort two node names case-insensitively like strcasecmp(), but with any
+ * numeric portions of the name sorted numerically. For example, "node10" will
+ * sort higher than "node9" but lower than "remotenode9".
+ *
+ * \param[in] a First node to compare (can be \c NULL)
+ * \param[in] b Second node to compare (can be \c NULL)
+ *
+ * \retval -1 \c a comes before \c b (or \c a is \c NULL and \c b is not)
+ * \retval 0 \c a and \c b are equal (or both are \c NULL)
+ * \retval 1 \c a comes after \c b (or \c b is \c NULL and \c a is not)
+ */
+gint
+pe__cmp_node_name(gconstpointer a, gconstpointer b)
+{
+ const pe_node_t *node1 = (const pe_node_t *) a;
+ const pe_node_t *node2 = (const pe_node_t *) b;
+
+ if ((node1 == NULL) && (node2 == NULL)) {
+ return 0;
+ }
+
+ if (node1 == NULL) {
+ return -1;
+ }
+
+ if (node2 == NULL) {
+ return 1;
+ }
+
+ return pcmk__numeric_strcasecmp(node1->details->uname,
+ node2->details->uname);
+}
+
+/*!
+ * \internal
+ * \brief Output node weights to stdout
+ *
+ * \param[in] rsc Use allowed nodes for this resource
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes If rsc is not specified, use these nodes
+ * \param[in,out] data_set Cluster working set
+ */
+static void
+pe__output_node_weights(const pe_resource_t *rsc, const char *comment,
+ GHashTable *nodes, pe_working_set_t *data_set)
+{
+ pcmk__output_t *out = data_set->priv;
+
+ // Sort the nodes so the output is consistent for regression tests
+ GList *list = g_list_sort(g_hash_table_get_values(nodes),
+ pe__cmp_node_name);
+
+ for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
+ const pe_node_t *node = (const pe_node_t *) gIter->data;
+
+ out->message(out, "node-weight", rsc, comment, node->details->uname,
+ pcmk_readable_score(node->weight));
+ }
+ g_list_free(list);
+}
+
+/*!
+ * \internal
+ * \brief Log node weights at trace level
+ *
+ * \param[in] file Caller's filename
+ * \param[in] function Caller's function name
+ * \param[in] line Caller's line number
+ * \param[in] rsc If not NULL, include this resource's ID in logs
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes Nodes whose scores should be logged
+ */
+static void
+pe__log_node_weights(const char *file, const char *function, int line,
+ const pe_resource_t *rsc, const char *comment,
+ GHashTable *nodes)
+{
+ GHashTableIter iter;
+ pe_node_t *node = NULL;
+
+ // Don't waste time if we're not tracing at this point
+ pcmk__if_tracing({}, return);
+
+ g_hash_table_iter_init(&iter, nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
+ if (rsc) {
+ qb_log_from_external_source(function, file,
+ "%s: %s allocation score on %s: %s",
+ LOG_TRACE, line, 0,
+ comment, rsc->id,
+ pe__node_name(node),
+ pcmk_readable_score(node->weight));
+ } else {
+ qb_log_from_external_source(function, file, "%s: %s = %s",
+ LOG_TRACE, line, 0,
+ comment, pe__node_name(node),
+ pcmk_readable_score(node->weight));
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Log or output node weights
+ *
+ * \param[in] file Caller's filename
+ * \param[in] function Caller's function name
+ * \param[in] line Caller's line number
+ * \param[in] to_log Log if true, otherwise output
+ * \param[in] rsc If not NULL, use this resource's ID in logs,
+ * and show scores recursively for any children
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes Nodes whose scores should be shown
+ * \param[in,out] data_set Cluster working set
+ */
+void
+pe__show_node_weights_as(const char *file, const char *function, int line,
+ bool to_log, const pe_resource_t *rsc,
+ const char *comment, GHashTable *nodes,
+ pe_working_set_t *data_set)
+{
+ if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ // Don't show allocation scores for orphans
+ return;
+ }
+ if (nodes == NULL) {
+ // Nothing to show
+ return;
+ }
+
+ if (to_log) {
+ pe__log_node_weights(file, function, line, rsc, comment, nodes);
+ } else {
+ pe__output_node_weights(rsc, comment, nodes, data_set);
+ }
+
+ // If this resource has children, repeat recursively for each
+ if (rsc && rsc->children) {
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child = (pe_resource_t *) gIter->data;
+
+ pe__show_node_weights_as(file, function, line, to_log, child,
+ comment, child->allowed_nodes, data_set);
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Compare two resources by priority
+ *
+ * \param[in] a First resource to compare (can be \c NULL)
+ * \param[in] b Second resource to compare (can be \c NULL)
+ *
+ * \retval -1 \c a->priority > \c b->priority (or \c b is \c NULL and \c a is
+ * not)
+ * \retval 0 \c a->priority == \c b->priority (or both \c a and \c b are
+ * \c NULL)
+ * \retval 1 \c a->priority < \c b->priority (or \c a is \c NULL and \c b is
+ * not)
+ */
+gint
+pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
+{
+ const pe_resource_t *resource1 = (const pe_resource_t *)a;
+ const pe_resource_t *resource2 = (const pe_resource_t *)b;
+
+ if (a == NULL && b == NULL) {
+ return 0;
+ }
+ if (a == NULL) {
+ return 1;
+ }
+ if (b == NULL) {
+ return -1;
+ }
+
+ if (resource1->priority > resource2->priority) {
+ return -1;
+ }
+
+ if (resource1->priority < resource2->priority) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
+ const char *tag)
+{
+ pe_node_t *match = NULL;
+
+ if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
+ && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
+ /* This string comparision may be fragile, but exclusive resources and
+ * exclusive nodes should not have the symmetric_default constraint
+ * applied to them.
+ */
+ return;
+
+ } else if (rsc->children) {
+ GList *gIter = rsc->children;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ resource_node_score(child_rsc, node, score, tag);
+ }
+ }
+
+ match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
+ if (match == NULL) {
+ match = pe__copy_node(node);
+ g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
+ }
+ match->weight = pcmk__add_scores(match->weight, score);
+ pe_rsc_trace(rsc,
+ "Enabling %s preference (%s) for %s on %s (now %s)",
+ tag, pcmk_readable_score(score), rsc->id, pe__node_name(node),
+ pcmk_readable_score(match->weight));
+}
+
+void
+resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
+ const char *tag, pe_working_set_t *data_set)
+{
+ if (node != NULL) {
+ resource_node_score(rsc, node, score, tag);
+
+ } else if (data_set != NULL) {
+ GList *gIter = data_set->nodes;
+
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_node_t *node_iter = (pe_node_t *) gIter->data;
+
+ resource_node_score(rsc, node_iter, score, tag);
+ }
+
+ } else {
+ GHashTableIter iter;
+ pe_node_t *node_iter = NULL;
+
+ g_hash_table_iter_init(&iter, rsc->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
+ resource_node_score(rsc, node_iter, score, tag);
+ }
+ }
+
+ if (node == NULL && score == -INFINITY) {
+ if (rsc->allocated_to) {
+ crm_info("Deallocating %s from %s",
+ rsc->id, pe__node_name(rsc->allocated_to));
+ free(rsc->allocated_to);
+ rsc->allocated_to = NULL;
+ }
+ }
+}
+
+time_t
+get_effective_time(pe_working_set_t * data_set)
+{
+ if(data_set) {
+ if (data_set->now == NULL) {
+ crm_trace("Recording a new 'now'");
+ data_set->now = crm_time_new(NULL);
+ }
+ return crm_time_get_seconds_since_epoch(data_set->now);
+ }
+
+ crm_trace("Defaulting to 'now'");
+ return time(NULL);
+}
+
+gboolean
+get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
+{
+ enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
+ const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+
+ CRM_CHECK(role != NULL, return FALSE);
+
+ if (pcmk__str_eq(value, "started", pcmk__str_null_matches | pcmk__str_casei)
+ || pcmk__str_eq("default", value, pcmk__str_casei)) {
+ return FALSE;
+ }
+
+ local_role = text2role(value);
+ if (local_role == RSC_ROLE_UNKNOWN) {
+ pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
+ "because '%s' is not valid", rsc->id, value);
+ return FALSE;
+
+ } else if (local_role > RSC_ROLE_STARTED) {
+ if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
+ pe_rsc_promotable)) {
+ if (local_role > RSC_ROLE_UNPROMOTED) {
+ /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
+ return FALSE;
+ }
+
+ } else {
+ pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
+ "because '%s' only makes sense for promotable "
+ "clones", rsc->id, value);
+ return FALSE;
+ }
+ }
+
+ *role = local_role;
+ return TRUE;
+}
+
+gboolean
+order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
+{
+ GList *gIter = NULL;
+ pe_action_wrapper_t *wrapper = NULL;
+ GList *list = NULL;
+
+ if (order == pe_order_none) {
+ return FALSE;
+ }
+
+ if (lh_action == NULL || rh_action == NULL) {
+ return FALSE;
+ }
+
+ crm_trace("Creating action wrappers for ordering: %s then %s",
+ lh_action->uuid, rh_action->uuid);
+
+ /* Ensure we never create a dependency on ourselves... it's happened */
+ CRM_ASSERT(lh_action != rh_action);
+
+ /* Filter dups, otherwise update_action_states() has too much work to do */
+ gIter = lh_action->actions_after;
+ for (; gIter != NULL; gIter = gIter->next) {
+ pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
+
+ if (after->action == rh_action && (after->type & order)) {
+ return FALSE;
+ }
+ }
+
+ wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper->action = rh_action;
+ wrapper->type = order;
+ list = lh_action->actions_after;
+ list = g_list_prepend(list, wrapper);
+ lh_action->actions_after = list;
+
+ wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper->action = lh_action;
+ wrapper->type = order;
+ list = rh_action->actions_before;
+ list = g_list_prepend(list, wrapper);
+ rh_action->actions_before = list;
+ return TRUE;
+}
+
+void
+destroy_ticket(gpointer data)
+{
+ pe_ticket_t *ticket = data;
+
+ if (ticket->state) {
+ g_hash_table_destroy(ticket->state);
+ }
+ free(ticket->id);
+ free(ticket);
+}
+
+pe_ticket_t *
+ticket_new(const char *ticket_id, pe_working_set_t * data_set)
+{
+ pe_ticket_t *ticket = NULL;
+
+ if (pcmk__str_empty(ticket_id)) {
+ return NULL;
+ }
+
+ if (data_set->tickets == NULL) {
+ data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ }
+
+ ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ if (ticket == NULL) {
+
+ ticket = calloc(1, sizeof(pe_ticket_t));
+ if (ticket == NULL) {
+ crm_err("Cannot allocate ticket '%s'", ticket_id);
+ return NULL;
+ }
+
+ crm_trace("Creaing ticket entry for %s", ticket_id);
+
+ ticket->id = strdup(ticket_id);
+ ticket->granted = FALSE;
+ ticket->last_granted = -1;
+ ticket->standby = FALSE;
+ ticket->state = pcmk__strkey_table(free, free);
+
+ g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
+ }
+
+ return ticket;
+}
+
+const char *
+rsc_printable_id(const pe_resource_t *rsc)
+{
+ return pcmk_is_set(rsc->flags, pe_rsc_unique)? rsc->id : ID(rsc->xml);
+}
+
+void
+pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+{
+ pe__clear_resource_flags(rsc, flags);
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ }
+}
+
+void
+pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag)
+{
+ for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
+ pe_resource_t *r = (pe_resource_t *) lpc->data;
+ pe__clear_resource_flags_recursive(r, flag);
+ }
+}
+
+void
+pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+{
+ pe__set_resource_flags(rsc, flags);
+ for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ }
+}
+
+void
+trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
+ pe_action_t *dependency, pe_working_set_t *data_set)
+{
+ if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ /* No resources require it */
+ return;
+
+ } else if ((rsc != NULL)
+ && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ /* Wasn't a stonith device */
+ return;
+
+ } else if(node
+ && node->details->online
+ && node->details->unclean == FALSE
+ && node->details->shutdown == FALSE) {
+ pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
+
+ if(dependency) {
+ order_actions(unfence, dependency, pe_order_optional);
+ }
+
+ } else if(rsc) {
+ GHashTableIter iter;
+
+ g_hash_table_iter_init(&iter, rsc->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
+ if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
+ trigger_unfencing(rsc, node, reason, dependency, data_set);
+ }
+ }
+ }
+}
+
+gboolean
+add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
+{
+ pe_tag_t *tag = NULL;
+ GList *gIter = NULL;
+ gboolean is_existing = FALSE;
+
+ CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
+
+ tag = g_hash_table_lookup(tags, tag_name);
+ if (tag == NULL) {
+ tag = calloc(1, sizeof(pe_tag_t));
+ if (tag == NULL) {
+ return FALSE;
+ }
+ tag->id = strdup(tag_name);
+ tag->refs = NULL;
+ g_hash_table_insert(tags, strdup(tag_name), tag);
+ }
+
+ for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
+ const char *existing_ref = (const char *) gIter->data;
+
+ if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){
+ is_existing = TRUE;
+ break;
+ }
+ }
+
+ if (is_existing == FALSE) {
+ tag->refs = g_list_append(tag->refs, strdup(obj_ref));
+ crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
+ }
+
+ return TRUE;
+}
+
+/*!
+ * \internal
+ * \brief Check whether shutdown has been requested for a node
+ *
+ * \param[in] node Node to check
+ *
+ * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
+ * \note This differs from simply using node->details->shutdown in that it can
+ * be used before that has been determined (and in fact to determine it),
+ * and it can also be used to distinguish requested shutdown from implicit
+ * shutdown of remote nodes by virtue of their connection stopping.
+ */
+bool
+pe__shutdown_requested(const pe_node_t *node)
+{
+ const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
+
+ return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches);
+}
+
+/*!
+ * \internal
+ * \brief Update a data set's "recheck by" time
+ *
+ * \param[in] recheck Epoch time when recheck should happen
+ * \param[in,out] data_set Current working set
+ */
+void
+pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
+{
+ if ((recheck > get_effective_time(data_set))
+ && ((data_set->recheck_by == 0)
+ || (data_set->recheck_by > recheck))) {
+ data_set->recheck_by = recheck;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Extract nvpair blocks contained by a CIB XML element into a hash table
+ *
+ * \param[in] xml_obj XML element containing blocks of nvpair elements
+ * \param[in] set_name If not NULL, only use blocks of this element
+ * \param[in] rule_data Matching parameters to use when unpacking
+ * \param[out] hash Where to store extracted name/value pairs
+ * \param[in] always_first If not NULL, process block with this ID first
+ * \param[in] overwrite Whether to replace existing values with same name
+ * \param[in,out] data_set Cluster working set containing \p xml_obj
+ */
+void
+pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
+ const pe_rule_eval_data_t *rule_data,
+ GHashTable *hash, const char *always_first,
+ gboolean overwrite, pe_working_set_t *data_set)
+{
+ crm_time_t *next_change = crm_time_new_undefined();
+
+ pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
+ always_first, overwrite, next_change);
+ if (crm_time_is_defined(next_change)) {
+ time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
+
+ pe__update_recheck_time(recheck, data_set);
+ }
+ crm_time_free(next_change);
+}
+
+bool
+pe__resource_is_disabled(const pe_resource_t *rsc)
+{
+ const char *target_role = NULL;
+
+ CRM_CHECK(rsc != NULL, return false);
+ target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
+ if (target_role) {
+ enum rsc_role_e target_role_e = text2role(target_role);
+
+ if ((target_role_e == RSC_ROLE_STOPPED)
+ || ((target_role_e == RSC_ROLE_UNPROMOTED)
+ && pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
+ pe_rsc_promotable))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a resource is running only on given node
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] node Node to check
+ *
+ * \return true if \p rsc is running only on \p node, otherwise false
+ */
+bool
+pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node)
+{
+ return (rsc != NULL) && pcmk__list_of_1(rsc->running_on)
+ && pe__same_node((const pe_node_t *) rsc->running_on->data, node);
+}
+
+bool
+pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
+{
+ for (GList *ele = rsc->running_on; ele; ele = ele->next) {
+ pe_node_t *node = (pe_node_t *) ele->data;
+ if (pcmk__str_in_list(node->details->uname, node_list,
+ pcmk__str_star_matches|pcmk__str_casei)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool
+pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node)
+{
+ return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node));
+}
+
+GList *
+pe__filter_rsc_list(GList *rscs, GList *filter)
+{
+ GList *retval = NULL;
+
+ for (GList *gIter = rscs; gIter; gIter = gIter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+
+ /* I think the second condition is safe here for all callers of this
+ * function. If not, it needs to move into pe__node_text.
+ */
+ if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) ||
+ (rsc->parent && pcmk__str_in_list(rsc_printable_id(rsc->parent), filter, pcmk__str_star_matches))) {
+ retval = g_list_prepend(retval, rsc);
+ }
+ }
+
+ return retval;
+}
+
+GList *
+pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
+ GList *nodes = NULL;
+
+ if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
+ /* Nothing was given so return a list of all node names. Or, '*' was
+ * given. This would normally fall into the pe__unames_with_tag branch
+ * where it will return an empty list. Catch it here instead.
+ */
+ nodes = g_list_prepend(nodes, strdup("*"));
+ } else {
+ pe_node_t *node = pe_find_node(data_set->nodes, s);
+
+ if (node) {
+ /* The given string was a valid uname for a node. Return a
+ * singleton list containing just that uname.
+ */
+ nodes = g_list_prepend(nodes, strdup(s));
+ } else {
+ /* The given string was not a valid uname. It's either a tag or
+ * it's a typo or something. In the first case, we'll return a
+ * list of all the unames of the nodes with the given tag. In the
+ * second case, we'll return a NULL pointer and nothing will
+ * get displayed.
+ */
+ nodes = pe__unames_with_tag(data_set, s);
+ }
+ }
+
+ return nodes;
+}
+
+GList *
+pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
+ GList *resources = NULL;
+
+ if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
+ resources = g_list_prepend(resources, strdup("*"));
+ } else {
+ pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
+ pe_find_renamed|pe_find_any);
+
+ if (rsc) {
+ /* A colon in the name we were given means we're being asked to filter
+ * on a specific instance of a cloned resource. Put that exact string
+ * into the filter list. Otherwise, use the printable ID of whatever
+ * resource was found that matches what was asked for.
+ */
+ if (strstr(s, ":") != NULL) {
+ resources = g_list_prepend(resources, strdup(rsc->id));
+ } else {
+ resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
+ }
+ } else {
+ /* The given string was not a valid resource name. It's a tag or a
+ * typo or something. See pe__build_node_name_list() for more
+ * detail.
+ */
+ resources = pe__rscs_with_tag(data_set, s);
+ }
+ }
+
+ return resources;
+}
+
+xmlNode *
+pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name)
+{
+ const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ const char *rsc_id = rsc->id;
+
+ if (parent->variant == pe_clone) {
+ rsc_id = pe__clone_child_id(parent);
+ }
+
+ for (xmlNode *xml_op = pcmk__xml_first_child(rsc->cluster->failed); xml_op != NULL;
+ xml_op = pcmk__xml_next(xml_op)) {
+ const char *value = NULL;
+ char *op_id = NULL;
+
+ /* This resource operation is not a failed probe. */
+ if (!pcmk_xe_mask_probe_failure(xml_op)) {
+ continue;
+ }
+
+ /* This resource operation was not run on the given node. Note that if name is
+ * NULL, this will always succeed.
+ */
+ value = crm_element_value(xml_op, XML_LRM_ATTR_TARGET);
+ if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) {
+ continue;
+ }
+
+ if (!parse_op_key(pe__xe_history_key(xml_op), &op_id, NULL, NULL)) {
+ continue; // This history entry is missing an operation key
+ }
+
+ /* This resource operation's ID does not match the rsc_id we are looking for. */
+ if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) {
+ free(op_id);
+ continue;
+ }
+
+ free(op_id);
+ return xml_op;
+ }
+
+ return NULL;
+}
diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h
new file mode 100644
index 0000000..daa3781
--- /dev/null
+++ b/lib/pengine/variant.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2004-2022 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PE_VARIANT__H
+# define PE_VARIANT__H
+
+# if PE__VARIANT_BUNDLE
+
+typedef struct {
+ int offset;
+ char *ipaddr;
+ pe_node_t *node;
+ pe_resource_t *ip;
+ pe_resource_t *child;
+ pe_resource_t *container;
+ pe_resource_t *remote;
+} pe__bundle_replica_t;
+
+enum pe__bundle_mount_flags {
+ pe__bundle_mount_none = 0x00,
+
+ // mount instance-specific subdirectory rather than source directly
+ pe__bundle_mount_subdir = 0x01
+};
+
+typedef struct {
+ char *source;
+ char *target;
+ char *options;
+ uint32_t flags; // bitmask of pe__bundle_mount_flags
+} pe__bundle_mount_t;
+
+typedef struct {
+ char *source;
+ char *target;
+} pe__bundle_port_t;
+
+enum pe__container_agent {
+ PE__CONTAINER_AGENT_UNKNOWN,
+ PE__CONTAINER_AGENT_DOCKER,
+ PE__CONTAINER_AGENT_RKT,
+ PE__CONTAINER_AGENT_PODMAN,
+};
+
+#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
+#define PE__CONTAINER_AGENT_DOCKER_S "docker"
+#define PE__CONTAINER_AGENT_RKT_S "rkt"
+#define PE__CONTAINER_AGENT_PODMAN_S "podman"
+
+typedef struct pe__bundle_variant_data_s {
+ int promoted_max;
+ int nreplicas;
+ int nreplicas_per_host;
+ char *prefix;
+ char *image;
+ const char *ip_last;
+ char *host_network;
+ char *host_netmask;
+ char *control_port;
+ char *container_network;
+ char *ip_range_start;
+ gboolean add_host;
+ gchar *container_host_options;
+ char *container_command;
+ char *launcher_options;
+ const char *attribute_target;
+
+ pe_resource_t *child;
+
+ GList *replicas; // pe__bundle_replica_t *
+ GList *ports; // pe__bundle_port_t *
+ GList *mounts; // pe__bundle_mount_t *
+
+ enum pe__container_agent agent_type;
+} pe__bundle_variant_data_t;
+
+# define get_bundle_variant_data(data, rsc) \
+ CRM_ASSERT(rsc != NULL); \
+ CRM_ASSERT(rsc->variant == pe_container); \
+ CRM_ASSERT(rsc->variant_opaque != NULL); \
+ data = (pe__bundle_variant_data_t *)rsc->variant_opaque; \
+
+# endif
+
+#endif