diff options
Diffstat (limited to '')
-rw-r--r-- | tools/Makefile.am | 171 | ||||
-rw-r--r-- | tools/attrd_updater.c | 16 | ||||
-rw-r--r-- | tools/cibadmin.c | 12 | ||||
-rwxr-xr-x | tools/cluster-helper.in | 2 | ||||
-rwxr-xr-x | tools/cluster-init.in | 537 | ||||
-rw-r--r-- | tools/crm_attribute.c | 4 | ||||
-rw-r--r-- | tools/crm_diff.c | 43 | ||||
-rw-r--r-- | tools/crm_mon.c | 6 | ||||
-rw-r--r-- | tools/crm_mon.h | 2 | ||||
-rw-r--r-- | tools/crm_mon_curses.c | 8 | ||||
-rw-r--r-- | tools/crm_node.c | 751 | ||||
-rw-r--r-- | tools/crm_resource.c | 339 | ||||
-rw-r--r-- | tools/crm_resource.h | 61 | ||||
-rw-r--r-- | tools/crm_resource_ban.c | 75 | ||||
-rw-r--r-- | tools/crm_resource_print.c | 96 | ||||
-rw-r--r-- | tools/crm_resource_runtime.c | 540 | ||||
-rw-r--r-- | tools/crm_shadow.c | 49 | ||||
-rw-r--r-- | tools/crm_simulate.c | 31 | ||||
-rw-r--r-- | tools/crm_ticket.c | 74 | ||||
-rw-r--r-- | tools/crm_verify.c | 53 | ||||
-rw-r--r-- | tools/stonith_admin.c | 18 |
21 files changed, 1302 insertions, 1586 deletions
diff --git a/tools/Makefile.am b/tools/Makefile.am index 36bd3ae..3efa938 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -13,53 +13,42 @@ if BUILD_SYSTEMD systemdsystemunit_DATA = crm_mon.service endif -noinst_HEADERS = crm_mon.h crm_resource.h +noinst_HEADERS = crm_mon.h \ + crm_resource.h pcmkdir = $(datadir)/$(PACKAGE) -pcmk_DATA = report.common report.collector +pcmk_DATA = report.common \ + report.collector -sbin_SCRIPTS = crm_report crm_standby crm_master crm_failcount +sbin_SCRIPTS = crm_report \ + crm_standby \ + crm_master \ + crm_failcount if BUILD_CIBSECRETS sbin_SCRIPTS += cibsecret endif noinst_SCRIPTS = cluster-clean \ - cluster-init \ cluster-helper \ pcmk_simtimes -EXTRA_DIST = attrd_updater.8.inc \ - cibadmin.8.inc \ - crm_attribute.8.inc \ - crm_diff.8.inc \ - crm_error.8.inc \ - crm_mon.8.inc \ - crm_node.8.inc \ - crm_resource.8.inc \ - crm_rule.8.inc \ - crm_shadow.8.inc \ - crm_simulate.8.inc \ - crm_ticket.8.inc \ - crm_verify.8.inc \ - crmadmin.8.inc \ - fix-manpages \ - iso8601.8.inc \ - stonith_admin.8.inc +EXTRA_DIST = $(wildcard *.inc) \ + fix-manpages sbin_PROGRAMS = attrd_updater \ - cibadmin \ - crmadmin \ - crm_simulate \ + cibadmin \ + crmadmin \ + crm_simulate \ crm_attribute \ - crm_diff \ - crm_error \ - crm_mon \ - crm_node \ - crm_resource \ - crm_rule \ - crm_shadow \ - crm_verify \ - crm_ticket \ - iso8601 \ + crm_diff \ + crm_error \ + crm_mon \ + crm_node \ + crm_resource \ + crm_rule \ + crm_shadow \ + crm_verify \ + crm_ticket \ + iso8601 \ stonith_admin ## SOURCES @@ -70,96 +59,96 @@ sbin_PROGRAMS = attrd_updater \ MAN8DEPS = crm_attribute crmadmin_SOURCES = crmadmin.c -crmadmin_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la +crmadmin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crmadmin_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +crmadmin_LDADD += $(top_builddir)/lib/cib/libcib.la +crmadmin_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_error_SOURCES = crm_error.c -crm_error_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_error_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_error_LDADD += $(top_builddir)/lib/common/libcrmcommon.la cibadmin_SOURCES = cibadmin.c -cibadmin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +cibadmin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +cibadmin_LDADD += $(top_builddir)/lib/cib/libcib.la +cibadmin_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_shadow_SOURCES = crm_shadow.c -crm_shadow_LDADD = $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_shadow_LDADD = $(top_builddir)/lib/cib/libcib.la +crm_shadow_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_node_SOURCES = crm_node.c -crm_node_LDADD = $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_node_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_node_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_node_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_simulate_SOURCES = crm_simulate.c - -crm_simulate_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_simulate_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_simulate_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +crm_simulate_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_simulate_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_diff_SOURCES = crm_diff.c -crm_diff_LDADD = $(top_builddir)/lib/common/libcrmcommon.la +crm_diff_LDADD = $(top_builddir)/lib/common/libcrmcommon.la crm_mon_SOURCES = crm_mon.c crm_mon_curses.c -crm_mon_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/fencing/libstonithd.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la \ - $(CURSESLIBS) +crm_mon_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_mon_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +crm_mon_LDADD += $(top_builddir)/lib/fencing/libstonithd.la +crm_mon_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_mon_LDADD += $(top_builddir)/lib/common/libcrmcommon.la +crm_mon_LDADD += $(CURSESLIBS) crm_verify_SOURCES = crm_verify.c -crm_verify_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_verify_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_verify_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +crm_verify_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_verify_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_attribute_SOURCES = crm_attribute.c -crm_attribute_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_attribute_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_attribute_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_attribute_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_resource_SOURCES = crm_resource.c \ crm_resource_ban.c \ crm_resource_print.c \ crm_resource_runtime.c -crm_resource_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \ - $(top_builddir)/lib/fencing/libstonithd.la \ - $(top_builddir)/lib/lrmd/liblrmd.la \ - $(top_builddir)/lib/services/libcrmservice.la \ - $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_resource_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_resource_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +crm_resource_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_resource_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la +crm_resource_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la +crm_resource_LDADD += $(top_builddir)/lib/fencing/libstonithd.la +crm_resource_LDADD += $(top_builddir)/lib/services/libcrmservice.la +crm_resource_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_rule_SOURCES = crm_rule.c -crm_rule_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/pengine/libpe_rules.la \ - $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_rule_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_rule_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +crm_rule_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_rule_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la +crm_rule_LDADD += $(top_builddir)/lib/common/libcrmcommon.la iso8601_SOURCES = iso8601.c iso8601_LDADD = $(top_builddir)/lib/common/libcrmcommon.la attrd_updater_SOURCES = attrd_updater.c -attrd_updater_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/common/libcrmcommon.la +attrd_updater_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +attrd_updater_LDADD += $(top_builddir)/lib/common/libcrmcommon.la crm_ticket_SOURCES = crm_ticket.c -crm_ticket_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \ - $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/common/libcrmcommon.la +crm_ticket_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +crm_ticket_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +crm_ticket_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la +crm_ticket_LDADD += $(top_builddir)/lib/cib/libcib.la +crm_ticket_LDADD += $(top_builddir)/lib/common/libcrmcommon.la stonith_admin_SOURCES = stonith_admin.c -stonith_admin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \ - $(top_builddir)/lib/cib/libcib.la \ - $(top_builddir)/lib/pengine/libpe_status.la \ - $(top_builddir)/lib/fencing/libstonithd.la \ - $(top_builddir)/lib/common/libcrmcommon.la +stonith_admin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la +stonith_admin_LDADD += $(top_builddir)/lib/pengine/libpe_status.la +stonith_admin_LDADD += $(top_builddir)/lib/cib/libcib.la +stonith_admin_LDADD += $(top_builddir)/lib/fencing/libstonithd.la +stonith_admin_LDADD += $(top_builddir)/lib/common/libcrmcommon.la CLEANFILES = $(man8_MANS) diff --git a/tools/attrd_updater.c b/tools/attrd_updater.c index 60e4cc7..5f91356 100644 --- a/tools/attrd_updater.c +++ b/tools/attrd_updater.c @@ -145,8 +145,11 @@ static GOptionEntry required_entries[] = { static GOptionEntry command_entries[] = { { "update", 'U', 0, G_OPTION_ARG_CALLBACK, command_cb, - "Update attribute's value in pacemaker-attrd. If this causes the value\n" - INDENT "to change, it will also be updated in the cluster configuration.", + "Update attribute's value. Required: -n/--name or -P/--pattern.\n" + INDENT "Optional: -d/--delay (if specified, the delay will be used if\n" + INDENT "the attribute needs to be created, but ignored if the\n" + INDENT "attribute already exists), -s/--set, -p/--private, -W/--wait,\n" + INDENT "-z/--utilization.", "VALUE" }, { "update-both", 'B', 0, G_OPTION_ARG_CALLBACK, command_cb, @@ -446,10 +449,11 @@ send_attrd_query(pcmk__output_t *out, const char *attr_name, pcmk_register_ipc_callback(attrd_api, attrd_event_cb, out); // Connect to attrd (without main loop) - rc = pcmk_connect_ipc(attrd_api, pcmk_ipc_dispatch_sync); + rc = pcmk__connect_ipc(attrd_api, pcmk_ipc_dispatch_sync, 5); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, - "Could not connect to attrd: %s", pcmk_rc_str(rc)); + "Could not connect to %s: %s", + pcmk_ipc_name(attrd_api, true), pcmk_rc_str(rc)); pcmk_free_ipc_api(attrd_api); return rc; } @@ -463,7 +467,7 @@ send_attrd_query(pcmk__output_t *out, const char *attr_name, if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Could not query value of %s: %s (%d)", - attr_name, pcmk_strerror(rc), rc); + attr_name, pcmk_rc_str(rc), rc); } else if (!printed_values) { rc = pcmk_rc_schema_validation; g_set_error(&error, PCMK__RC_ERROR, rc, @@ -500,7 +504,7 @@ send_attrd_update(char command, const char *attr_node, const char *attr_name, case 'U': rc = pcmk__attrd_api_update(NULL, attr_node, attr_name, attr_value, - NULL, attr_set, NULL, + attr_dampen, attr_set, NULL, attr_options | pcmk__node_attr_value); break; diff --git a/tools/cibadmin.c b/tools/cibadmin.c index f80afae..44488b5 100644 --- a/tools/cibadmin.c +++ b/tools/cibadmin.c @@ -72,8 +72,6 @@ void cibadmin_op_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, static void print_xml_output(xmlNode * xml) { - char *buffer; - if (!xml) { return; } else if (xml->type != XML_ELEMENT_NODE) { @@ -95,8 +93,8 @@ print_xml_output(xmlNode * xml) } } else { - buffer = dump_xml_formatted(xml); - fprintf(stdout, "%s", pcmk__s(buffer, "<null>\n")); + char *buffer = dump_xml_formatted(xml); + fprintf(stdout, "%s", buffer); free(buffer); } } @@ -574,7 +572,7 @@ main(int argc, char **argv) output = createEmptyCib(1); crm_xml_add(output, XML_ATTR_VALIDATION, options.validate_with); buf = dump_xml_formatted(output); - fprintf(stdout, "%s", pcmk__s(buf, "<null>\n")); + fprintf(stdout, "%s", buf); free(buf); goto done; } @@ -726,7 +724,7 @@ main(int argc, char **argv) goto done; } - if (strcmp(options.cib_action, "md5-sum") == 0) { + if (pcmk__str_eq(options.cib_action, "md5-sum", pcmk__str_casei)) { char *digest = NULL; if (input == NULL) { @@ -885,7 +883,7 @@ do_work(xmlNode *input, xmlNode **output) /* construct the request */ the_cib->call_timeout = options.message_timeout_sec; if ((strcmp(options.cib_action, PCMK__CIB_REQUEST_REPLACE) == 0) - && pcmk__str_eq(crm_element_name(input), XML_TAG_CIB, pcmk__str_casei)) { + && pcmk__xe_is(input, XML_TAG_CIB)) { xmlNode *status = pcmk_find_cib_element(input, XML_CIB_TAG_STATUS); if (status == NULL) { diff --git a/tools/cluster-helper.in b/tools/cluster-helper.in index d8dac6e..5bfe890 100755 --- a/tools/cluster-helper.in +++ b/tools/cluster-helper.in @@ -72,7 +72,7 @@ while true ; do -I) replace=$2; shift; shift;; --list|list) format=$2; command=list; shift; shift;; --add|add) command=group-add; shift;; - --create|create) group="$2", command=group-create; shift; shift;; + --create|create) group="$2"; command=group-create; shift; shift;; --run|run) command=run; shift;; --copy|copy) command=copy; shift; break ;; --key|key) command=key; shift; break ;; diff --git a/tools/cluster-init.in b/tools/cluster-init.in deleted file mode 100755 index 1485c81..0000000 --- a/tools/cluster-init.in +++ /dev/null @@ -1,537 +0,0 @@ -#!@BASH_PATH@ -# -# Copyright 2011-2023 the Pacemaker project contributors -# -# The version control history for this file may have further details. -# -# This source code is licensed under the GNU General Public License version 2 -# or later (GPLv2+) WITHOUT ANY WARRANTY. -# - -accept_defaults=0 -do_raw=0 -ETCHOSTS=0 -nodelist=0 -limit=0 - -pkgs="corosync xinetd nmap abrt-cli fence-agents perl-TimeDate gdb" - -transport="multicast" -inaddr_any="no" - -INSTALL= -cs_conf= -fence_conf= - -dsh_group=0 -if [ ! -z $cluster_name ]; then - cluster=$cluster_name -else - cluster=dummy0 -fi - -# Corosync Settings -cs_port=666 - -# Settings that work great on nXX -join=60 -#token=3000 -consensus=1500 - -# Official settings -join=2000 -token=5000 -consensus=2500 - -# Testing -join=1000 -consensus=7500 -do_debug=off - -function ip_for_node() { - ping -c 1 $1 | grep "bytes from" | head -n 1 | sed -e 's/.*bytes from//' -e 's/: icmp.*//' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' -# if [ $do_raw = 1 ]; then -# echo $1 -# else -# #host $1 | grep "has address" | head -n 1 | awk '{print $NF}' | sed 's:(::' | sed 's:)::' -# fi -} -function id_for_node() { - ip_for_node $* | tr '.' ' ' | awk '{print $4}' -} -function name_for_node() { - echo $1 | awk -F. '{print $1}' -} - -function helptext() { - echo "cluster-init - Configure cluster communication for the infrastructures supported by Pacemaker" - echo "" - echo "-g, --group Specify the group to operate on/with" - echo "-w, --host Specify a host to operate on/with. May be specified multiple times" - echo "-r, --raw-ip Supplied nodes were listed as their IP addresses" - echo "" - echo "-c, --corosync configure for corosync" - echo "-C, --nodelist configure for corosync with a node list" - echo "-u, --unicast configure point-to-point communication instead of multicast" - echo "" - echo "-I, --install Install packages" - echo "" - echo "-d, --debug Enable debug logging for the cluster" - echo "--hosts Copy the local /etc/hosts file to all nodes" - echo "-e, --extra list Whitespace separated list of extra packages to install" - echo "-l, --limit N Use the first N hosts from the named group" - echo " Extra packages to install" - exit $1 -} - -host_input="" -while true; do - case "$1" in - -g) cluster=$2; - shift; shift;; - -w|--host) - for h in $2; do - host_input="$host_input -w $h"; - done - shift; shift;; - -w) host_input="$host_input -w $2" - shift; shift;; - -r|--raw-ip) do_raw=1; shift;; - - -d|--debug) do_debug=on; shift;; - - -I|--install) INSTALL=Yes; shift;; - --hosts) ETCHOSTS=1; shift;; - - -c|--corosync) CTYPE=corosync; shift;; - -C|--nodelist) CTYPE=corosync; nodelist=1; shift;; - -u|--unicast) nodelist=1; transport=udpu; inaddr_any="yes"; shift;; - -e|--extra) pkgs="$pkgs $2"; shift; shift;; - -t|--test) pkgs="$pkgs valgrind"; shift;; - -l|--limit) limit=$2; shift; shift;; - - r*[0-9]) - rhel=`echo $1 | sed -e s/rhel// -e s/-// -e s/r//` - pkgs="$pkgs qarsh-server"; - case $rhel in - 7) CTYPE=corosync;; - esac - shift - ;; - - f*[0-9][0-9]) - CTYPE=corosync; - shift - ;; - - -y|--yes|--defaults) accept_defaults=1; shift;; - -x) set -x; shift;; - -\?|--help) helptext 0; shift;; - "") break;; - *) echo "unknown option: $1"; exit 1;; - esac -done - -if [ ! -z $cluster ]; then - host_input="-g $cluster" - # use the last digit present in the variable (if any) - dsh_group=`echo $cluster | sed 's/[^0-9][^0-9]*//g;s/.*\([0-9]\)$/\1/'` -fi - -if [ -z $dsh_group ]; then - dsh_group=1 -fi - -if [ x = "x$host_input" -a x = "x$cluster" ]; then - if [ -d $HOME/.dsh/group ]; then - read -p "Please specify a dsh group you'd like to configure as a cluster? [] " -t 60 cluster - else - read -p "Please specify a whitespace delimetered list of nodes you'd like to configure as a cluster? [] " -t 60 host_list - - for h in $2; do - host_input="$host_input -w $h"; - done - fi -fi - -if [ -z "$host_input" ]; then - echo "You didn't specify any nodes or groups to configure" - exit 1 -fi - -if [ $limit -gt 0 ]; then - echo "Using only the first $limit hosts in $cluster group" - host_list=`cluster-helper --list bullet $host_input | head -n $limit | tr '\n*' ' '` -else - host_list=`cluster-helper --list short $host_input` -fi -num_hosts=`echo $host_list | wc -w` - -if [ $num_hosts -gt 9 ]; then - cs_port=66 -fi - -for h in $host_list; do - ping -c 1 -q $h - if [ $? != 0 ]; then - echo "Using long names..." - host_list=`cluster-helper --list long $host_input` - break - fi -done - -if [ -z $CTYPE ]; then - echo "" - read -p "Where should Pacemaker obtain membership and quorum from? [corosync] (corosync) " -t 60 CTYPE -fi - -case $CTYPE in - corosync) cs_conf="@PCMK__COROSYNC_CONF@" ;; -esac - -function get_defaults() -{ - if [ -z $SSH ]; then - SSH="No" - fi - - if [ -z $SELINUX ]; then - SELINUX="No" - fi - - if [ -z $IPTABLES ]; then - IPTABLES="Yes" - fi - - if [ -z $DOMAIN ]; then - DOMAIN="No" - fi - if [ -z $INSTALL ]; then - INSTALL="Yes" - fi - if [ -z $DATE ]; then - DATE="No" - fi -} - -get_defaults -if [ $accept_defaults = 0 ]; then - echo "" - read -p "Shall I install an ssh key to cluster nodes? [$SSH] " -t 60 SSH - echo "" - echo "SELinux prevent many things, including password-less ssh logins" - read -p "Shall I disable selinux? [$SELINUX] " -t 60 SELINUX - echo "" - echo "Incorrectly configured firewalls will prevent corosync from starting up" - read -p "Shall I disable iptables? [$IPTABLES] " -t 60 IPTABLES - - echo "" - read -p "Shall I install/update the relevant packages? [$INSTALL] " -t 60 INSTALL - - echo "" - read -p "Shall I sync the date/time? [$DATE] " -t 60 DATE -fi -get_defaults - -echo "" -echo "Detecting possible fencing options" -if [ -e /etc/cluster/fence_xvm.key ]; then - echo "* Found fence_xvm" - fence_conf=/etc/cluster/fence_xvm.key - pkgs="$pkgs fence-virt" -fi - -if [ ! -z ${OS_AUTH_URL} ]; then - echo "* Found openstack credentials" - fence_conf=/sbin/fence_openstack - pkgs="$pkgs python-novaclient" -fi -echo "" -echo "Beginning cluster configuration" -echo "" - -case $SSH in - [Yy][Ee][Ss]|[Yy]) - for host in $host_list; do - echo "Installing our ssh key on ${host}" - ssh-copy-id root@${host} >/dev/null 2>&1 - # Fix selinux labeling - ssh -l root ${host} -- restorecon -R -v . - done - ;; -esac - -case $DATE in - [Yy][Ee][Ss]|[Yy]) - for host in $host_list; do - echo "Setting time on ${host}" - scp /etc/localtime root@${host}:/etc - now=`date +%s` - ssh -l root ${host} -- date -s @$now - echo "" - done - ;; -esac - -init=`mktemp` -cat<<-END>$init -verbose=0 -pkgs="$pkgs" - -lhost=\`uname -n\` -lshort=\`echo \$lhost | awk -F. '{print \$1}'\` - -log() { - printf "%-10s \$*\n" "\$lshort:" 1>&2 -} - -debug() { - if [ \$verbose -gt 0 ]; then - log "Debug: \$*" - fi -} - -info() { - log "\$*" -} - -warning() { - log "WARN: \$*" -} - -fatal() { - log "ERROR: \$*" - exit 1 -} - -case $SELINUX in - [Yy][Ee][Ss]|[Yy]) - sed -i.sed "s/enforcing/disabled/g" /etc/selinux/config - ;; -esac - -case $IPTABLES in - [Yy][Ee][Ss]|[Yy]|"") - service iptables stop - chkconfig iptables off - service firewalld stop - chkconfig firewalld off - ;; -esac - -case $DOMAIN in - [Nn][Oo]|"") - ;; - *.*) - if - ! grep domain /etc/resolv.conf - then - sed -i.sed "s/nameserver/domain\ $DOMAIN\\\nnameserver/g" /etc/resolv.conf - fi - ;; - *) echo "Unknown domain: $DOMAIN";; -esac - -case $INSTALL in - [Yy][Ee][Ss]|[Yy]|"") - info Installing cluster software - yum install -y $pkgs pacemaker - ;; -esac - -info "Configuring services" -chkconfig xinetd on -service xinetd start &>/dev/null - -chkconfig corosync off &> /dev/null -mkdir -p /etc/cluster - -info "Turning on core files" -grep -q "unlimited" /etc/bashrc -if [ $? = 1 ]; then - sed -i.sed "s/bashrc/bashrc\\\nulimit\ -c\ unlimited/g" /etc/bashrc -fi - -function patch_cs_config() { - test $num_hosts != 2 - two_node=$? - - priority="info" - if [ $do_debug = 1 ]; then - priority="debug" - fi - - ssh -l root ${host} -- sed -i.sed "s/.*mcastaddr:.*/mcastaddr:\ 226.94.1.1/g" $cs_conf - ssh -l root ${host} -- sed -i.sed "s/.*mcastport:.*/mcastport:\ $cs_port$dsh_group/g" $cs_conf - ssh -l root ${host} -- sed -i.sed "s/.*bindnetaddr:.*/bindnetaddr:\ $ip/g" $cs_conf - ssh -l root ${host} -- sed -i.sed "s/.*syslog_facility:.*/syslog_facility:\ daemon/g" $cs_conf - ssh -l root ${host} -- sed -i.sed "s/.*logfile_priority:.*/logfile_priority:\ $priority/g" $cs_conf - - if [ ! -z $token ]; then - ssh -l root ${host} -- sed -i.sed "s/.*token:.*/token:\ $token/g" $cs_conf - fi - if [ ! -z $consensus ]; then - ssh -l root ${host} -- sed -i.sed "s/.*consensus:.*/consensus:\ $consensus/g" $cs_conf - fi - if [ ! -z $join ]; then - ssh -l root ${host} -- sed -i.sed "s/^join:.*/join:\ $join/g" $cs_conf - ssh -l root ${host} -- sed -i.sed "s/\\\Wjoin:.*/join:\ $join/g" $cs_conf - fi - - ssh -l root ${host} -- grep -q "corosync_votequorum" $cs_conf 2>&1 > /dev/null - if [ $? -eq 0 ]; then - ssh -l root ${host} -- sed -i.sed "s/\\\Wexpected_votes:.*/expected_votes:\ $num_hosts/g" $cs_conf - ssh -l root ${host} -- sed -i.sed "s/\\\Wtwo_node:.*/two_node:\ $two_node/g" $cs_conf - else - printf "%-10s Wrong quorum provider: installing $cs_conf for corosync instead\n" ${host} - create_cs_config - fi -} - -function create_cs_config() { - cs_tmp=/tmp/cs_conf.$$ - test $num_hosts != 2 - two_node=$? - - # Base config - priority="info" - if [ $do_debug = 1 ]; then - priority="debug" - fi - - cat <<-END >$cs_tmp -# Please read the corosync.conf.5 manual page -totem { - version: 2 - - # cypto_cipher and crypto_hash: Used for mutual node authentication. - # If you choose to enable this, then do remember to create a shared - # secret with "corosync-keygen". - crypto_cipher: none - crypto_hash: none - - # Assign a fixed node id - nodeid: $id - - # Disable encryption - secauth: off - - transport: $transport - inaddr_any: $inaddr_any - - # interface: define at least one interface to communicate - # over. If you define more than one interface stanza, you must - # also set rrp_mode. - interface { - # Rings must be consecutively numbered, starting at 0. - ringnumber: 0 - - # This is normally the *network* address of the - # interface to bind to. This ensures that you can use - # identical instances of this configuration file - # across all your cluster nodes, without having to - # modify this option. - bindnetaddr: $ip - - # However, if you have multiple physical network - # interfaces configured for the same subnet, then the - # network address alone is not sufficient to identify - # the interface Corosync should bind to. In that case, - # configure the *host* address of the interface - # instead: - # bindnetaddr: 192.168.1.1 - # When selecting a multicast address, consider RFC - # 2365 (which, among other things, specifies that - # 239.255.x.x addresses are left to the discretion of - # the network administrator). Do not reuse multicast - # addresses across multiple Corosync clusters sharing - # the same network. - - # Corosync uses the port you specify here for UDP - # messaging, and also the immediately preceding - # port. Thus if you set this to 5405, Corosync sends - # messages over UDP ports 5405 and 5404. - mcastport: $cs_port$dsh_group - - # Time-to-live for cluster communication packets. The - # number of hops (routers) that this ring will allow - # itself to pass. Note that multicast routing must be - # specifically enabled on most network routers. - ttl: 1 - mcastaddr: 226.94.1.1 - } -} - -logging { - debug: off - fileline: off - to_syslog: yes - to_stderr: no - syslog_facility: daemon - timestamp: on - to_logfile: yes - logfile: /var/log/corosync.log - logfile_priority: $priority -} - -amf { - mode: disabled -} - -quorum { - provider: corosync_votequorum - expected_votes: $num_hosts - votes: 1 - two_node: $two_node - wait_for_all: 0 - last_man_standing: 0 - auto_tie_breaker: 0 -} -END - scp -q $cs_tmp root@${host}:$cs_conf - rm -f $cs_tmp -} - -for host in $host_list; do - echo "" - echo "" - echo "* Configuring $host" - - cs_short_host=`name_for_node $host` - ip=`ip_for_node $host` - id=`id_for_node $host` - - echo $ip | grep -qis NXDOMAIN - if [ $? = 0 ]; then - echo "Couldn't find resolve $host to an IP address" - exit 1 - fi - - if [ `uname -n` = $host ]; then - bash $init - else - cat $init | ssh -l root -T $host -- "cat > $init; bash $init" - fi - - if [ "x$fence_conf" != x ]; then - if [ -e $fence_conf ]; then - scp $fence_conf root@${host}:$fence_conf - fi - fi - - if [ $ETCHOSTS = 1 ]; then - scp /etc/hosts root@${host}:/etc/hosts - fi - - ssh -l root ${host} -- grep -q "token:" $cs_conf 2>&1 > /dev/null - new_config=$? - new_config=1 - - if [ $new_config = 0 ]; then - printf "%-10s Updating $cs_conf\n" ${host}: - patch_cs_config - else - printf "%-10s Installing $cs_conf\n" ${host}: - create_cs_config - fi -done diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c index 358b150..defe294 100644 --- a/tools/crm_attribute.c +++ b/tools/crm_attribute.c @@ -565,7 +565,7 @@ command_query(pcmk__output_t *out, cib_t *cib) } else if (rc != pcmk_rc_ok) { // Don't do anything. - } else if (xml_has_children(result)) { + } else if (result->children != NULL) { struct output_data_s od = { out, use_pattern, false }; pcmk__xe_foreach_child(result, NULL, output_one_attribute, &od); @@ -852,7 +852,7 @@ main(int argc, char **argv) } else if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - "Error performing operation: %s", pcmk_strerror(rc)); + "Error performing operation: %s", pcmk_rc_str(rc)); } done: diff --git a/tools/crm_diff.c b/tools/crm_diff.c index efe2fcf..9925ea7 100644 --- a/tools/crm_diff.c +++ b/tools/crm_diff.c @@ -108,7 +108,7 @@ print_patch(xmlNode *patch) { char *buffer = dump_xml_formatted(patch); - printf("%s", pcmk__s(buffer, "<null>\n")); + printf("%s", buffer); free(buffer); fflush(stdout); } @@ -152,7 +152,7 @@ log_patch_cib_versions(xmlNode *patch) const char *digest = NULL; xml_patch_versions(patch, add, del); - fmt = crm_element_value(patch, "format"); + fmt = crm_element_value(patch, PCMK_XA_FORMAT); digest = crm_element_value(patch, XML_ATTR_DIGEST); if (add[2] != del[2] || add[1] != del[1] || add[0] != del[0]) { @@ -166,7 +166,7 @@ strip_patch_cib_version(xmlNode *patch, const char **vfields, size_t nvfields) { int format = 1; - crm_element_value_int(patch, "format", &format); + crm_element_value_int(patch, PCMK_XA_FORMAT, &format); if (format == 2) { xmlNode *version_xml = find_xml_node(patch, "version", FALSE); @@ -208,21 +208,13 @@ static int generate_patch(xmlNode *object_1, xmlNode *object_2, const char *xml_file_2, gboolean as_cib, gboolean no_version) { - xmlNode *output = NULL; - int rc = pcmk_rc_ok; - - pcmk__output_t *logger_out = NULL; - int out_rc = pcmk_rc_no_output; - int temp_rc = pcmk_rc_no_output; - const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; - rc = pcmk__log_output_new(&logger_out); - CRM_CHECK(rc == pcmk_rc_ok, return rc); + xmlNode *output = NULL; /* If we're ignoring the version, make the version information * identical, so it isn't detected as a change. */ @@ -244,21 +236,13 @@ generate_patch(xmlNode *object_1, xmlNode *object_2, const char *xml_file_2, output = xml_create_patchset(0, object_1, object_2, NULL, FALSE); - pcmk__output_set_log_level(logger_out, LOG_INFO); - out_rc = pcmk__xml_show_changes(logger_out, object_2); - + pcmk__log_xml_changes(LOG_INFO, object_2); xml_accept_changes(object_2); if (output == NULL) { - goto done; // rc == pcmk_rc_ok + return pcmk_rc_ok; // No changes } - /* pcmk_rc_error means there's non-empty diff. - * @COMPAT: Choose a more descriptive return code, like one that maps to - * CRM_EX_DIGEST? - */ - rc = pcmk_rc_error; - patchset_process_digest(output, object_1, object_2, as_cib); if (as_cib) { @@ -268,18 +252,15 @@ generate_patch(xmlNode *object_1, xmlNode *object_2, const char *xml_file_2, strip_patch_cib_version(output, vfields, PCMK__NELEM(vfields)); } - pcmk__output_set_log_level(logger_out, LOG_NOTICE); - temp_rc = logger_out->message(logger_out, "xml-patchset", output); - out_rc = pcmk__output_select_rc(out_rc, temp_rc); - + pcmk__log_xml_patchset(LOG_NOTICE, output); print_patch(output); free_xml(output); -done: - logger_out->finish(logger_out, pcmk_rc2exitc(out_rc), true, NULL); - pcmk__output_free(logger_out); - - return rc; + /* pcmk_rc_error means there's a non-empty diff. + * @COMPAT Choose a more descriptive return code, like one that maps to + * CRM_EX_DIGEST? + */ + return pcmk_rc_error; } static GOptionContext * diff --git a/tools/crm_mon.c b/tools/crm_mon.c index c20766c..dbe76fc 100644 --- a/tools/crm_mon.c +++ b/tools/crm_mon.c @@ -1780,7 +1780,7 @@ send_custom_trap(const char *node, const char *rsc, const char *task, int target pid = fork(); if (pid == -1) { - crm_perror(LOG_ERR, "notification fork() failed."); + out->err(out, "notification fork() failed: %s", strerror(errno)); } if (pid == 0) { /* crm_debug("notification: I am the child. Executing the nofitication program."); */ @@ -1840,7 +1840,7 @@ handle_rsc_op(xmlNode *xml, void *userdata) node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET); - while (n != NULL && !pcmk__str_eq(XML_CIB_TAG_STATE, TYPE(n), pcmk__str_casei)) { + while ((n != NULL) && !pcmk__xe_is(n, XML_CIB_TAG_STATE)) { n = n->parent; } @@ -2051,7 +2051,7 @@ crm_diff_update(const char *event, xmlNode * msg) if (options.external_agent) { int format = 0; - crm_element_value_int(diff, "format", &format); + crm_element_value_int(diff, PCMK_XA_FORMAT, &format); switch(format) { case 1: crm_diff_update_v1(event, msg); diff --git a/tools/crm_mon.h b/tools/crm_mon.h index a505f50..c87432d 100644 --- a/tools/crm_mon.h +++ b/tools/crm_mon.h @@ -14,8 +14,8 @@ #include <glib.h> +#include <crm/common/scheduler.h> #include <crm/common/output_internal.h> -#include <crm/pengine/pe_types.h> #include <crm/stonith-ng.h> /* diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c index 769c7c9..212a400 100644 --- a/tools/crm_mon_curses.c +++ b/tools/crm_mon_curses.c @@ -1,5 +1,5 @@ /* - * Copyright 2019-2022 the Pacemaker project contributors + * Copyright 2019-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -425,11 +425,11 @@ static int cluster_maint_mode_console(pcmk__output_t *out, va_list args) { unsigned long long flags = va_arg(args, unsigned long long); - if (pcmk_is_set(flags, pe_flag_maintenance_mode)) { + if (pcmk_is_set(flags, pcmk_sched_in_maintenance)) { curses_formatted_printf(out, "\n *** Resource management is DISABLED ***\n"); curses_formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n"); return pcmk_rc_ok; - } else if (pcmk_is_set(flags, pe_flag_stop_everything)) { + } else if (pcmk_is_set(flags, pcmk_sched_stop_all)) { curses_formatted_printf(out, "\n *** Resource management is DISABLED ***\n"); curses_formatted_printf(out, " The cluster will keep all resources stopped\n"); return pcmk_rc_ok; @@ -438,7 +438,7 @@ cluster_maint_mode_console(pcmk__output_t *out, va_list args) { } } -PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", +PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") diff --git a/tools/crm_node.c b/tools/crm_node.c index ac2a190..1e7ce6c 100644 --- a/tools/crm_node.c +++ b/tools/crm_node.c @@ -9,6 +9,7 @@ #include <crm_internal.h> +#include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> @@ -24,6 +25,8 @@ #include <crm/common/ipc_controld.h> #include <crm/common/attrd_internal.h> +#include <pacemaker-internal.h> + #define SUMMARY "crm_node - Tool for displaying low-level node information" struct { @@ -42,8 +45,10 @@ gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data gboolean name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); +static GError *error = NULL; static GMainLoop *mainloop = NULL; static crm_exit_t exit_code = CRM_EX_OK; +static pcmk__output_t *out = NULL; #define INDENT " " @@ -91,6 +96,13 @@ static GOptionEntry addl_entries[] = { { NULL } }; +static pcmk__supported_format_t formats[] = { + PCMK__SUPPORTED_FORMAT_NONE, + PCMK__SUPPORTED_FORMAT_TEXT, + PCMK__SUPPORTED_FORMAT_XML, + { NULL, NULL, NULL } +}; + gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_eq("-i", option_name, pcmk__str_casei) || pcmk__str_eq("--cluster-id", option_name, pcmk__str_casei)) { @@ -104,7 +116,7 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError } else if (pcmk__str_eq("-q", option_name, pcmk__str_casei) || pcmk__str_eq("--quorum", option_name, pcmk__str_casei)) { options.command = 'q'; } else { - g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Unknown param passed to command_cb: %s\n", option_name); + g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Unknown param passed to command_cb: %s", option_name); return FALSE; } @@ -121,7 +133,6 @@ name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **e gboolean remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (optarg == NULL) { - crm_err("-R option requires an argument"); g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "-R option requires an argument"); return FALSE; } @@ -132,6 +143,184 @@ remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError * return TRUE; } +PCMK__OUTPUT_ARGS("node-id", "uint32_t") +static int +node_id_default(pcmk__output_t *out, va_list args) { + uint32_t node_id = va_arg(args, uint32_t); + + out->info(out, "%" PRIu32, node_id); + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("node-id", "uint32_t") +static int +node_id_xml(pcmk__output_t *out, va_list args) { + uint32_t node_id = va_arg(args, uint32_t); + + char *id_s = crm_strdup_printf("%" PRIu32, node_id); + + pcmk__output_create_xml_node(out, "node-info", + "nodeid", id_s, + NULL); + + free(id_s); + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("node-list", "GList *") +static int +node_list_default(pcmk__output_t *out, va_list args) +{ + GList *nodes = va_arg(args, GList *); + + for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) { + pcmk_controld_api_node_t *node = node_iter->data; + out->info(out, "%" PRIu32 " %s %s", node->id, pcmk__s(node->uname, ""), + pcmk__s(node->state, "")); + } + + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("node-list", "GList *") +static int +node_list_xml(pcmk__output_t *out, va_list args) +{ + GList *nodes = va_arg(args, GList *); + + out->begin_list(out, NULL, NULL, "nodes"); + + for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) { + pcmk_controld_api_node_t *node = node_iter->data; + char *id_s = crm_strdup_printf("%" PRIu32, node->id); + + pcmk__output_create_xml_node(out, "node", + "id", id_s, + "name", node->uname, + "state", node->state, + NULL); + + free(id_s); + } + + out->end_list(out); + + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *") +static int +node_name_default(pcmk__output_t *out, va_list args) { + uint32_t node_id G_GNUC_UNUSED = va_arg(args, uint32_t); + const char *node_name = va_arg(args, const char *); + + out->info(out, "%s", node_name); + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *") +static int +node_name_xml(pcmk__output_t *out, va_list args) { + uint32_t node_id = va_arg(args, uint32_t); + const char *node_name = va_arg(args, const char *); + + char *id_s = crm_strdup_printf("%" PRIu32, node_id); + + pcmk__output_create_xml_node(out, "node-info", + "nodeid", id_s, + XML_ATTR_UNAME, node_name, + NULL); + + free(id_s); + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("partition-list", "GList *") +static int +partition_list_default(pcmk__output_t *out, va_list args) +{ + GList *nodes = va_arg(args, GList *); + + GString *buffer = NULL; + + for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) { + pcmk_controld_api_node_t *node = node_iter->data; + if (pcmk__str_eq(node->state, "member", pcmk__str_none)) { + pcmk__add_separated_word(&buffer, 128, pcmk__s(node->uname, ""), " "); + } + } + + if (buffer != NULL) { + out->info(out, "%s", buffer->str); + g_string_free(buffer, TRUE); + return pcmk_rc_ok; + } + + return pcmk_rc_no_output; +} + +PCMK__OUTPUT_ARGS("partition-list", "GList *") +static int +partition_list_xml(pcmk__output_t *out, va_list args) +{ + GList *nodes = va_arg(args, GList *); + + out->begin_list(out, NULL, NULL, "nodes"); + + for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) { + pcmk_controld_api_node_t *node = node_iter->data; + + if (pcmk__str_eq(node->state, "member", pcmk__str_none)) { + char *id_s = crm_strdup_printf("%" PRIu32, node->id); + + pcmk__output_create_xml_node(out, "node", + "id", id_s, + "name", node->uname, + "state", node->state, + NULL); + free(id_s); + } + } + + out->end_list(out); + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("quorum", "bool") +static int +quorum_default(pcmk__output_t *out, va_list args) { + bool have_quorum = va_arg(args, int); + + out->info(out, "%d", have_quorum); + return pcmk_rc_ok; +} + +PCMK__OUTPUT_ARGS("quorum", "bool") +static int +quorum_xml(pcmk__output_t *out, va_list args) { + bool have_quorum = va_arg(args, int); + + pcmk__output_create_xml_node(out, "cluster-info", + "quorum", have_quorum ? "true" : "false", + NULL); + return pcmk_rc_ok; +} + +static pcmk__message_entry_t fmt_functions[] = { + { "node-id", "default", node_id_default }, + { "node-id", "xml", node_id_xml }, + { "node-list", "default", node_list_default }, + { "node-list", "xml", node_list_xml }, + { "node-name", "default", node_name_default }, + { "node-name", "xml", node_name_xml }, + { "quorum", "default", quorum_default }, + { "quorum", "xml", quorum_xml }, + { "partition-list", "default", partition_list_default }, + { "partition-list", "xml", partition_list_xml }, + + { NULL, NULL, NULL } +}; + static gint sort_node(gconstpointer a, gconstpointer b) { @@ -152,7 +341,8 @@ controller_event_cb(pcmk_ipc_api_t *controld_api, switch (event_type) { case pcmk_ipc_event_disconnect: if (exit_code == CRM_EX_DISCONNECT) { // Unexpected - fprintf(stderr, "error: Lost connection to controller\n"); + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Lost connection to controller"); } goto done; break; @@ -165,93 +355,26 @@ controller_event_cb(pcmk_ipc_api_t *controld_api, } if (status != CRM_EX_OK) { - fprintf(stderr, "error: Bad reply from controller: %s\n", - crm_exit_str(status)); + exit_code = status; + g_set_error(&error, PCMK__EXITC_ERROR, status, + "Bad reply from controller: %s", + crm_exit_str(status)); goto done; } - // Parse desired info from reply and display to user - switch (options.command) { - case 'i': - if (reply->reply_type != pcmk_controld_reply_info) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - if (reply->data.node_info.id == 0) { - fprintf(stderr, - "error: Controller reply did not contain node ID\n"); - exit_code = CRM_EX_PROTOCOL; - goto done; - } - printf("%d\n", reply->data.node_info.id); - break; - - case 'n': - case 'N': - if (reply->reply_type != pcmk_controld_reply_info) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - if (reply->data.node_info.uname == NULL) { - fprintf(stderr, "Node is not known to cluster\n"); - exit_code = CRM_EX_NOHOST; - goto done; - } - printf("%s\n", reply->data.node_info.uname); - break; - - case 'q': - if (reply->reply_type != pcmk_controld_reply_info) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - printf("%d\n", reply->data.node_info.have_quorum); - if (!(reply->data.node_info.have_quorum)) { - exit_code = CRM_EX_QUORUM; - goto done; - } - break; + if (reply->reply_type != pcmk_controld_reply_nodes) { + g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_INDETERMINATE, + "Unknown reply type %d from controller", + reply->reply_type); + goto done; + } - case 'l': - case 'p': - if (reply->reply_type != pcmk_controld_reply_nodes) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - reply->data.nodes = g_list_sort(reply->data.nodes, sort_node); - for (GList *node_iter = reply->data.nodes; - node_iter != NULL; node_iter = node_iter->next) { - - pcmk_controld_api_node_t *node = node_iter->data; - const char *uname = (node->uname? node->uname : ""); - const char *state = (node->state? node->state : ""); - - if (options.command == 'l') { - printf("%lu %s %s\n", - (unsigned long) node->id, uname, state); - - // i.e. CRM_NODE_MEMBER, but we don't want to include cluster.h - } else if (!strcmp(state, "member")) { - printf("%s ", uname); - } - } - if (options.command == 'p') { - printf("\n"); - } - break; + reply->data.nodes = g_list_sort(reply->data.nodes, sort_node); - default: - fprintf(stderr, "internal error: Controller reply not expected\n"); - exit_code = CRM_EX_SOFTWARE; - goto done; + if (options.command == 'p') { + out->message(out, "partition-list", reply->data.nodes); + } else if (options.command == 'l') { + out->message(out, "node-list", reply->data.nodes); } // Success @@ -262,7 +385,7 @@ done: } static void -run_controller_mainloop(uint32_t nodeid, bool list_nodes) +run_controller_mainloop(void) { pcmk_ipc_api_t *controld_api = NULL; int rc; @@ -273,31 +396,30 @@ run_controller_mainloop(uint32_t nodeid, bool list_nodes) // Create controller IPC object rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); + g_set_error(&error, PCMK__RC_ERROR, rc, + "Could not connect to controller: %s", + pcmk_rc_str(rc)); return; } pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL); // Connect to controller - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); + rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5); if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); exit_code = pcmk_rc2exitc(rc); + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Could not connect to %s: %s", + pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc)); return; } - if (list_nodes) { - rc = pcmk_controld_api_list_nodes(controld_api); - } else { - rc = pcmk_controld_api_node_info(controld_api, nodeid); - } + rc = pcmk_controld_api_list_nodes(controld_api); + if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not ping controller: %s\n", - pcmk_rc_str(rc)); pcmk_disconnect_ipc(controld_api); exit_code = pcmk_rc2exitc(rc); + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Could not ping controller: %s", pcmk_rc_str(rc)); return; } @@ -310,169 +432,295 @@ run_controller_mainloop(uint32_t nodeid, bool list_nodes) } static void -print_node_name(void) +print_node_id(void) { - // Check environment first (i.e. when called by resource agent) - const char *name = getenv("OCF_RESKEY_" CRM_META "_" XML_LRM_ATTR_TARGET); + uint32_t nodeid; + int rc = pcmk__query_node_info(out, &nodeid, NULL, NULL, NULL, NULL, NULL, + false, 0); - if (name != NULL) { - printf("%s\n", name); - exit_code = CRM_EX_OK; + if (rc != pcmk_rc_ok) { + /* pcmk__query_node_info already sets an error message on the output object, + * so there's no need to call g_set_error here. That would just create a + * duplicate error message in the output. + */ + exit_code = pcmk_rc2exitc(rc); return; + } - } else { - /* Otherwise ask the controller. - * FIXME: Use pcmk__query_node_name() after conversion to formatted - * output. - */ - run_controller_mainloop(0, false); + rc = out->message(out, "node-id", nodeid); + + if (rc != pcmk_rc_ok) { + g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node ID: %s", + pcmk_rc_str(rc)); } + + exit_code = pcmk_rc2exitc(rc); } -static int -cib_remove_node(long id, const char *name) +static void +print_node_name(uint32_t nodeid) { - int rc; - cib_t *cib = NULL; - xmlNode *node = NULL; - xmlNode *node_state = NULL; + int rc = pcmk_rc_ok; + char *node_name = NULL; + + if (nodeid == 0) { + // Check environment first (i.e. when called by resource agent) + const char *name = getenv("OCF_RESKEY_" CRM_META "_" XML_LRM_ATTR_TARGET); + + if (name != NULL) { + rc = out->message(out, "node-name", 0, name); + goto done; + } + } - crm_trace("Removing %s from the CIB", name); + // Otherwise ask the controller - if(name == NULL && id == 0) { - return -ENOTUNIQ; + /* pcmk__query_node_name already sets an error message on the output object, + * so there's no need to call g_set_error here. That would just create a + * duplicate error message in the output. + */ + rc = pcmk__query_node_name(out, nodeid, &node_name, 0); + if (rc != pcmk_rc_ok) { + exit_code = pcmk_rc2exitc(rc); + return; } - node = create_xml_node(NULL, XML_CIB_TAG_NODE); - node_state = create_xml_node(NULL, XML_CIB_TAG_STATE); + rc = out->message(out, "node-name", 0, node_name); - crm_xml_add(node, XML_ATTR_UNAME, name); - crm_xml_add(node_state, XML_ATTR_UNAME, name); - if (id > 0) { - crm_xml_set_id(node, "%ld", id); - crm_xml_add(node_state, XML_ATTR_ID, ID(node)); +done: + if (node_name != NULL) { + free(node_name); } - cib = cib_new(); - cib->cmds->signon(cib, crm_system_name, cib_command); + if (rc != pcmk_rc_ok) { + g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node name: %s", + pcmk_rc_str(rc)); + } - rc = cib->cmds->remove(cib, XML_CIB_TAG_NODES, node, cib_sync_call); - if (rc != pcmk_ok) { - printf("Could not remove %s[%ld] from " XML_CIB_TAG_NODES ": %s", - name, id, pcmk_strerror(rc)); + exit_code = pcmk_rc2exitc(rc); +} + +static void +print_quorum(void) +{ + bool quorum; + int rc = pcmk__query_node_info(out, NULL, NULL, NULL, NULL, &quorum, NULL, + false, 0); + + if (rc != pcmk_rc_ok) { + /* pcmk__query_node_info already sets an error message on the output object, + * so there's no need to call g_set_error here. That would just create a + * duplicate error message in the output. + */ + exit_code = pcmk_rc2exitc(rc); + return; + } + + rc = out->message(out, "quorum", quorum); + + if (rc != pcmk_rc_ok) { + g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print quorum status: %s", + pcmk_rc_str(rc)); + } + + exit_code = pcmk_rc2exitc(rc); +} + +/*! + * \internal + * \brief Extend a transaction by removing a node from a CIB section + * + * \param[in,out] cib Active CIB connection + * \param[in] element CIB element containing node name and/or ID + * \param[in] section CIB section that \p element is in + * \param[in] node_name Name of node to purge (NULL to leave unspecified) + * \param[in] node_id Node ID of node to purge (0 to leave unspecified) + * + * \note At least one of node_name and node_id must be specified. + * \return Standard Pacemaker return code + */ +static int +remove_from_section(cib_t *cib, const char *element, const char *section, + const char *node_name, long node_id) +{ + xmlNode *xml = NULL; + int rc = pcmk_rc_ok; + + xml = create_xml_node(NULL, element); + if (xml == NULL) { + return pcmk_rc_error; + } + crm_xml_add(xml, XML_ATTR_UNAME, node_name); + if (node_id > 0) { + crm_xml_set_id(xml, "%ld", node_id); + } + rc = cib->cmds->remove(cib, section, xml, cib_transaction); + free_xml(xml); + return (rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(rc); +} + +/*! + * \internal + * \brief Purge a node from CIB + * + * \param[in] node_name Name of node to purge (or NULL to leave unspecified) + * \param[in] node_id Node ID of node to purge (or 0 to leave unspecified) + * + * \note At least one of node_name and node_id must be specified. + * \return Standard Pacemaker return code + */ +static int +purge_node_from_cib(const char *node_name, long node_id) +{ + int rc = pcmk_rc_ok; + int commit_rc = pcmk_rc_ok; + cib_t *cib = NULL; + + // Connect to CIB and start a transaction + cib = cib_new(); + if (cib == NULL) { + return ENOTCONN; + } + rc = cib->cmds->signon(cib, crm_system_name, cib_command); + if (rc == pcmk_ok) { + rc = cib->cmds->init_transaction(cib); } - rc = cib->cmds->remove(cib, XML_CIB_TAG_STATUS, node_state, cib_sync_call); if (rc != pcmk_ok) { - printf("Could not remove %s[%ld] from " XML_CIB_TAG_STATUS ": %s", - name, id, pcmk_strerror(rc)); + rc = pcmk_legacy2rc(rc); + cib__clean_up_connection(&cib); + return rc; + } + + // Remove from configuration and status + rc = remove_from_section(cib, XML_CIB_TAG_NODE, XML_CIB_TAG_NODES, + node_name, node_id); + if (rc == pcmk_rc_ok) { + rc = remove_from_section(cib, XML_CIB_TAG_STATE, XML_CIB_TAG_STATUS, + node_name, node_id); } + // Commit the transaction + commit_rc = cib->cmds->end_transaction(cib, (rc == pcmk_rc_ok), + cib_sync_call); cib__clean_up_connection(&cib); + + if ((rc == pcmk_rc_ok) && (commit_rc == pcmk_ok)) { + crm_debug("Purged node %s (%ld) from CIB", + pcmk__s(node_name, "by ID"), node_id); + } return rc; } +/*! + * \internal + * \brief Purge a node from a single server's peer cache + * + * \param[in] server IPC server to send request to + * \param[in] node_name Name of node to purge (or NULL to leave unspecified) + * \param[in] node_id Node ID of node to purge (or 0 to leave unspecified) + * + * \note At least one of node_name and node_id must be specified. + * \return Standard Pacemaker return code + */ static int -controller_remove_node(const char *node_name, long nodeid) +purge_node_from(enum pcmk_ipc_server server, const char *node_name, + long node_id) { - pcmk_ipc_api_t *controld_api = NULL; + pcmk_ipc_api_t *api = NULL; int rc; - // Create controller IPC object - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); + rc = pcmk_new_ipc_api(&api, server); if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - return ENOTCONN; + goto done; } - // Connect to controller (without main loop) - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_sync); + rc = pcmk__connect_ipc(api, pcmk_ipc_dispatch_sync, 5); if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - pcmk_free_ipc_api(controld_api); - return rc; + goto done; } - rc = pcmk_ipc_purge_node(controld_api, node_name, nodeid); - if (rc != pcmk_rc_ok) { - fprintf(stderr, - "error: Could not clear node from controller's cache: %s\n", - pcmk_rc_str(rc)); + rc = pcmk_ipc_purge_node(api, node_name, node_id); +done: + if (rc != pcmk_rc_ok) { // Debug message already logged on success + g_set_error(&error, PCMK__RC_ERROR, rc, + "Could not purge node %s from %s: %s", + pcmk__s(node_name, "by ID"), pcmk_ipc_name(api, true), + pcmk_rc_str(rc)); } - - pcmk_free_ipc_api(controld_api); - return pcmk_rc_ok; + pcmk_free_ipc_api(api); + return rc; } +/*! + * \internal + * \brief Purge a node from the fencer's peer cache + * + * \param[in] node_name Name of node to purge (or NULL to leave unspecified) + * \param[in] node_id Node ID of node to purge (or 0 to leave unspecified) + * + * \note At least one of node_name and node_id must be specified. + * \return Standard Pacemaker return code + */ static int -tools_remove_node_cache(const char *node_name, long nodeid, const char *target) +purge_node_from_fencer(const char *node_name, long node_id) { - int rc = -1; + int rc = pcmk_rc_ok; crm_ipc_t *conn = NULL; xmlNode *cmd = NULL; - conn = crm_ipc_new(target, 0); - if (!conn) { - return -ENOTCONN; + conn = crm_ipc_new("stonith-ng", 0); + if (conn == NULL) { + rc = ENOTCONN; + exit_code = pcmk_rc2exitc(rc); + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Could not connect to fencer to purge node %s", + pcmk__s(node_name, "by ID")); + return rc; } - if (!crm_ipc_connect(conn)) { - crm_perror(LOG_ERR, "Connection to %s failed", target); + + rc = pcmk__connect_generic_ipc(conn); + if (rc != pcmk_rc_ok) { + exit_code = pcmk_rc2exitc(rc); + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Could not connect to fencer to purge node %s: %s", + pcmk__s(node_name, "by ID"), pcmk_rc_str(rc)); crm_ipc_destroy(conn); - return -ENOTCONN; + return rc; } - crm_trace("Removing %s[%ld] from the %s membership cache", - node_name, nodeid, target); - - if(pcmk__str_eq(target, T_ATTRD, pcmk__str_casei)) { - cmd = create_xml_node(NULL, __func__); - - crm_xml_add(cmd, F_TYPE, T_ATTRD); - crm_xml_add(cmd, F_ORIG, crm_system_name); - - crm_xml_add(cmd, PCMK__XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE); - - pcmk__xe_add_node(cmd, node_name, nodeid); - - } else { // Fencer or pacemakerd - cmd = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, target, - crm_system_name, NULL); - if (nodeid > 0) { - crm_xml_set_id(cmd, "%ld", nodeid); - } - crm_xml_add(cmd, XML_ATTR_UNAME, node_name); + cmd = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, "stonith-ng", + crm_system_name, NULL); + if (node_id > 0) { + crm_xml_set_id(cmd, "%ld", node_id); } + crm_xml_add(cmd, XML_ATTR_UNAME, node_name); rc = crm_ipc_send(conn, cmd, 0, 0, NULL); - crm_debug("%s peer cache cleanup for %s (%ld): %d", - target, node_name, nodeid, rc); - - if (rc > 0) { - // @TODO Should this be done just once after all the rest? - rc = cib_remove_node(nodeid, node_name); - } - - if (conn) { - crm_ipc_close(conn); - crm_ipc_destroy(conn); + if (rc >= 0) { + rc = pcmk_rc_ok; + crm_debug("Purged node %s (%ld) from fencer", + pcmk__s(node_name, "by ID"), node_id); + } else { + rc = pcmk_legacy2rc(rc); + fprintf(stderr, "Could not purge node %s from fencer: %s\n", + pcmk__s(node_name, "by ID"), pcmk_rc_str(rc)); } free_xml(cmd); - return rc > 0 ? 0 : rc; + crm_ipc_close(conn); + crm_ipc_destroy(conn); + return rc; } static void remove_node(const char *target_uname) { - int rc; - int d = 0; + int rc = pcmk_rc_ok; long nodeid = 0; const char *node_name = NULL; char *endptr = NULL; - const char *daemons[] = { - "stonith-ng", - T_ATTRD, - CRM_SYSTEM_MCP, + const enum pcmk_ipc_server servers[] = { + pcmk_ipc_controld, + pcmk_ipc_attrd, }; // Check whether node was specified by name or numeric ID @@ -485,25 +733,28 @@ remove_node(const char *target_uname) node_name = target_uname; } - rc = controller_remove_node(node_name, nodeid); + for (int i = 0; i < PCMK__NELEM(servers); ++i) { + rc = purge_node_from(servers[i], node_name, nodeid); + if (rc != pcmk_rc_ok) { + exit_code = pcmk_rc2exitc(rc); + return; + } + } + + // The fencer hasn't been converted to pcmk_ipc_api_t yet + rc = purge_node_from_fencer(node_name, nodeid); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); return; } - for (d = 0; d < PCMK__NELEM(daemons); d++) { - if (tools_remove_node_cache(node_name, nodeid, daemons[d])) { - crm_err("Failed to connect to %s to remove node '%s'", - daemons[d], target_uname); - exit_code = CRM_EX_ERROR; - return; - } - } - exit_code = CRM_EX_OK; + // Lastly, purge the node from the CIB itself + rc = purge_node_from_cib(node_name, nodeid); + exit_code = pcmk_rc2exitc(rc); } static GOptionContext * -build_arg_context(pcmk__common_args_t *args, GOptionGroup *group) { +build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { @@ -514,7 +765,7 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup *group) { { NULL } }; - context = pcmk__build_arg_context(args, NULL, &group, NULL); + context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); /* Add the -q option, which cannot be part of the globally supported options * because some tools use that flag for something else. @@ -531,13 +782,14 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup *group) { int main(int argc, char **argv) { - GError *error = NULL; + int rc = pcmk_rc_ok; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, "NR"); - GOptionContext *context = build_arg_context(args, output_group); + GOptionContext *context = build_arg_context(args, &output_group); + pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; @@ -545,49 +797,72 @@ main(int argc, char **argv) pcmk__cli_init_logging("crm_node", args->verbosity); + rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); + if (rc != pcmk_rc_ok) { + exit_code = pcmk_rc2exitc(rc); + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Error creating output format %s: %s", args->output_ty, + pcmk_rc_str(rc)); + goto done; + } + + if (!pcmk__force_args(context, &error, "%s --xml-simple-list", g_get_prgname())) { + exit_code = CRM_EX_SOFTWARE; + goto done; + } + if (args->version) { - g_strfreev(processed_args); - pcmk__free_arg_context(context); - /* FIXME: When crm_node is converted to use formatted output, this can go. */ - pcmk__cli_help('v'); + out->version(out, false); + goto done; } if (options.command == 0) { char *help = g_option_context_get_help(context, TRUE, NULL); - fprintf(stderr, "%s", help); + out->err(out, "%s", help); g_free(help); exit_code = CRM_EX_USAGE; goto done; } if (options.dangerous_cmd && options.force_flag == FALSE) { - fprintf(stderr, "The supplied command is considered dangerous." - " To prevent accidental destruction of the cluster," - " the --force flag is required in order to proceed.\n"); exit_code = CRM_EX_USAGE; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "The supplied command is considered dangerous." + " To prevent accidental destruction of the cluster," + " the --force flag is required in order to proceed."); goto done; } + pcmk__register_lib_messages(out); + pcmk__register_messages(out, fmt_functions); + switch (options.command) { - case 'n': - print_node_name(); + case 'i': + print_node_id(); break; - case 'R': - remove_node(options.target_uname); + + case 'n': + print_node_name(0); break; - case 'i': + case 'q': + print_quorum(); + break; + case 'N': - /* FIXME: Use pcmk__query_node_name() after conversion to formatted - * output - */ - run_controller_mainloop(options.nodeid, false); + print_node_name(options.nodeid); break; + + case 'R': + remove_node(options.target_uname); + break; + case 'l': case 'p': - run_controller_mainloop(0, true); + run_controller_mainloop(); break; + default: break; } @@ -596,6 +871,12 @@ done: g_strfreev(processed_args); pcmk__free_arg_context(context); - pcmk__output_and_clear_error(&error, NULL); + pcmk__output_and_clear_error(&error, out); + + if (out != NULL) { + out->finish(out, exit_code, true, NULL); + pcmk__output_free(out); + } + pcmk__unregister_formats(); return crm_exit(exit_code); } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index f351c26..7c4a0a1 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -76,7 +76,7 @@ struct { gboolean require_cib; // Whether command requires CIB IPC int cib_options; // Options to use with CIB IPC calls gboolean require_crmd; // Whether command requires controller IPC - gboolean require_dataset; // Whether command requires populated data set + gboolean require_scheduler; // Whether command requires scheduler data gboolean require_resource; // Whether command requires resource specified gboolean require_node; // Whether command requires node specified int find_flags; // Flags to use when searching for resource @@ -117,7 +117,7 @@ struct { .check_level = -1, .cib_options = cib_sync_call, .require_cib = TRUE, - .require_dataset = TRUE, + .require_scheduler = TRUE, .require_resource = TRUE, }; @@ -183,7 +183,7 @@ static GError *error = NULL; static GMainLoop *mainloop = NULL; static cib_t *cib_conn = NULL; static pcmk_ipc_api_t *controld_api = NULL; -static pe_working_set_t *data_set = NULL; +static pcmk_scheduler_t *scheduler = NULL; #define MESSAGE_TIMEOUT_S 60 @@ -227,8 +227,8 @@ bye(crm_exit_t ec) mainloop = NULL; } - pe_free_working_set(data_set); - data_set = NULL; + pe_free_working_set(scheduler); + scheduler = NULL; crm_exit(ec); return ec; } @@ -650,7 +650,7 @@ reset_options(void) { options.require_node = FALSE; options.require_cib = TRUE; - options.require_dataset = TRUE; + options.require_scheduler = TRUE; options.require_resource = TRUE; options.find_flags = 0; @@ -702,15 +702,15 @@ cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, if (getenv("CIB_file") == NULL) { options.require_crmd = TRUE; } - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename; return TRUE; } gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_delete); - options.require_dataset = FALSE; - options.find_flags = pe_find_renamed|pe_find_any; + options.require_scheduler = FALSE; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; return TRUE; } @@ -725,7 +725,7 @@ static void get_agent_spec(const gchar *optarg) { options.require_cib = FALSE; - options.require_dataset = FALSE; + options.require_scheduler = FALSE; options.require_resource = FALSE; pcmk__str_update(&options.agent_spec, optarg); } @@ -754,7 +754,7 @@ list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data, { SET_COMMAND(cmd_list_standards); options.require_cib = FALSE; - options.require_dataset = FALSE; + options.require_scheduler = FALSE; options.require_resource = FALSE; return TRUE; } @@ -806,30 +806,36 @@ gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) { SET_COMMAND(cmd_clear); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history + |pcmk_rsc_match_anon_basename; } else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) { SET_COMMAND(cmd_ban); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history + |pcmk_rsc_match_anon_basename; } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) { SET_COMMAND(cmd_move); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history + |pcmk_rsc_match_anon_basename; } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) { SET_COMMAND(cmd_query_xml); - options.find_flags = pe_find_renamed|pe_find_any; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) { SET_COMMAND(cmd_query_raw_xml); - options.find_flags = pe_find_renamed|pe_find_any; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) { SET_COMMAND(cmd_locate); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history + |pcmk_rsc_match_anon_basename; } else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) { SET_COMMAND(cmd_colocations); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history + |pcmk_rsc_match_anon_basename; } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) { SET_COMMAND(cmd_colocations); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history + |pcmk_rsc_match_anon_basename; options.recursive = TRUE; } @@ -845,7 +851,7 @@ get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, } pcmk__str_update(&options.prop_name, optarg); - options.find_flags = pe_find_renamed|pe_find_any; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; return TRUE; } @@ -876,16 +882,16 @@ set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data } pcmk__str_update(&options.prop_name, optarg); - options.find_flags = pe_find_renamed|pe_find_any; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; return TRUE; } gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_set_property); - options.require_dataset = FALSE; + options.require_scheduler = FALSE; pcmk__str_update(&options.prop_name, optarg); - options.find_flags = pe_find_renamed|pe_find_any; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; return TRUE; } @@ -904,7 +910,7 @@ validate_or_force_cb(const gchar *option_name, const gchar *optarg, g_free(options.operation); } options.operation = g_strdup(option_name + 2); // skip "--" - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename; if (options.override_params == NULL) { options.override_params = pcmk__strkey_table(free, free); } @@ -925,7 +931,7 @@ restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_restart); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename; return TRUE; } @@ -934,12 +940,12 @@ digests_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_digests); - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename; if (options.override_params == NULL) { options.override_params = pcmk__strkey_table(free, free); } options.require_node = TRUE; - options.require_dataset = TRUE; + options.require_scheduler = TRUE; return TRUE; } @@ -947,7 +953,7 @@ gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_wait); options.require_resource = FALSE; - options.require_dataset = FALSE; + options.require_scheduler = FALSE; return TRUE; } @@ -955,15 +961,16 @@ gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_why); options.require_resource = FALSE; - options.find_flags = pe_find_renamed|pe_find_anon; + options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename; return TRUE; } static int -ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime) +ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc, + const char *move_lifetime) { int rc = pcmk_rc_ok; - pe_node_t *current = NULL; + pcmk_node_t *current = NULL; unsigned int nactive = 0; CRM_CHECK(rsc != NULL, return EINVAL); @@ -971,27 +978,29 @@ ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime) current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { - rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, - cib_conn, options.cib_options, options.promoted_role_only); + rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, + cib_conn, options.cib_options, options.promoted_role_only, + PCMK__ROLE_PROMOTED); - } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { + } else if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) { int count = 0; GList *iter = NULL; current = NULL; for(iter = rsc->children; iter; iter = iter->next) { - pe_resource_t *child = (pe_resource_t *)iter->data; + pcmk_resource_t *child = (pcmk_resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); - if (child_role == RSC_ROLE_PROMOTED) { + if (child_role == pcmk_role_promoted) { count++; current = pe__current_node(child); } } if(count == 1 && current) { - rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, - cib_conn, options.cib_options, options.promoted_role_only); + rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, + cib_conn, options.cib_options, options.promoted_role_only, + PCMK__ROLE_PROMOTED); } else { rc = EINVAL; @@ -1017,7 +1026,7 @@ ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime) } static void -cleanup(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node) +cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { int rc = pcmk_rc_ok; @@ -1027,8 +1036,9 @@ cleanup(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node) crm_debug("Erasing failures of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); - rc = cli_resource_delete(controld_api, options.host_uname, rsc, options.operation, - options.interval_spec, TRUE, data_set, options.force); + rc = cli_resource_delete(controld_api, options.host_uname, rsc, + options.operation, options.interval_spec, TRUE, + scheduler, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped @@ -1047,20 +1057,21 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) GList *after = NULL; GList *remaining = NULL; GList *ele = NULL; - pe_node_t *dest = NULL; + pcmk_node_t *dest = NULL; int rc = pcmk_rc_ok; if (!out->is_quiet(out)) { - before = build_constraint_list(data_set->input); + before = build_constraint_list(scheduler->input); } if (options.clear_expired) { - rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options, - options.rsc_id, options.host_uname, + rc = cli_resource_clear_all_expired(scheduler->input, cib_conn, + options.cib_options, options.rsc_id, + options.host_uname, options.promoted_role_only); } else if (options.host_uname) { - dest = pe_find_node(data_set->nodes, options.host_uname); + dest = pe_find_node(scheduler->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; if (!out->is_quiet(out)) { @@ -1072,7 +1083,7 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) cib_conn, options.cib_options, TRUE, options.force); } else { - rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes, + rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes, cib_conn, options.cib_options, TRUE, options.force); } @@ -1082,17 +1093,17 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, - _("Could not get modified CIB: %s\n"), pcmk_strerror(rc)); + _("Could not get modified CIB: %s\n"), pcmk_rc_str(rc)); g_list_free(before); free_xml(*cib_xml_copy); *cib_xml_copy = NULL; return rc; } - data_set->input = *cib_xml_copy; - cluster_status(data_set); + scheduler->input = *cib_xml_copy; + cluster_status(scheduler); - after = build_constraint_list(data_set->input); + after = build_constraint_list(scheduler->input); remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); for (ele = remaining; ele != NULL; ele = ele->next) { @@ -1131,119 +1142,7 @@ delete(void) } static int -list_agents(pcmk__output_t *out, const char *agent_spec) -{ - int rc = pcmk_rc_ok; - char *provider = strchr(agent_spec, ':'); - lrmd_t *lrmd_conn = NULL; - lrmd_list_t *list = NULL; - - rc = lrmd__new(&lrmd_conn, NULL, NULL, 0); - if (rc != pcmk_rc_ok) { - goto error; - } - - if (provider) { - *provider++ = 0; - } - - rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider); - - if (rc > 0) { - rc = out->message(out, "agents-list", list, agent_spec, provider); - } else { - rc = pcmk_rc_error; - } - -error: - if (rc != pcmk_rc_ok) { - if (provider == NULL) { - g_set_error(&error, PCMK__RC_ERROR, rc, - _("No agents found for standard '%s'"), agent_spec); - } else { - g_set_error(&error, PCMK__RC_ERROR, rc, - _("No agents found for standard '%s' and provider '%s'"), - agent_spec, provider); - } - } - - lrmd_api_delete(lrmd_conn); - return rc; -} - -static int -list_providers(pcmk__output_t *out, const char *agent_spec) -{ - int rc; - const char *text = NULL; - lrmd_t *lrmd_conn = NULL; - lrmd_list_t *list = NULL; - - rc = lrmd__new(&lrmd_conn, NULL, NULL, 0); - if (rc != pcmk_rc_ok) { - goto error; - } - - switch (options.rsc_cmd) { - case cmd_list_alternatives: - rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); - - if (rc > 0) { - rc = out->message(out, "alternatives-list", list, agent_spec); - } else { - rc = pcmk_rc_error; - } - - text = "OCF providers"; - break; - case cmd_list_standards: - rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); - - if (rc > 0) { - rc = out->message(out, "standards-list", list); - } else { - rc = pcmk_rc_error; - } - - text = "standards"; - break; - case cmd_list_providers: - rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); - - if (rc > 0) { - rc = out->message(out, "providers-list", list, agent_spec); - } else { - rc = pcmk_rc_error; - } - - text = "OCF providers"; - break; - default: - g_set_error(&error, PCMK__RC_ERROR, pcmk_rc_error, "Bug"); - lrmd_api_delete(lrmd_conn); - return pcmk_rc_error; - } - -error: - if (rc != pcmk_rc_ok) { - if (agent_spec != NULL) { - rc = ENXIO; - g_set_error(&error, PCMK__RC_ERROR, rc, - _("No %s found for %s"), text, agent_spec); - - } else { - rc = ENXIO; - g_set_error(&error, PCMK__RC_ERROR, rc, - _("No %s found"), text); - } - } - - lrmd_api_delete(lrmd_conn); - return rc; -} - -static int -populate_working_set(xmlNodePtr *cib_xml_copy) +initialize_scheduler_data(xmlNodePtr *cib_xml_copy) { int rc = pcmk_rc_ok; @@ -1258,14 +1157,15 @@ populate_working_set(xmlNodePtr *cib_xml_copy) } if (rc == pcmk_rc_ok) { - data_set = pe_new_working_set(); - if (data_set == NULL) { + scheduler = pe_new_working_set(); + if (scheduler == NULL) { rc = ENOMEM; } else { - pe__set_working_set_flags(data_set, - pe_flag_no_counts|pe_flag_no_compat); - data_set->priv = out; - rc = update_working_set_xml(data_set, cib_xml_copy); + pe__set_working_set_flags(scheduler, + pcmk_sched_no_counts + |pcmk_sched_no_compat); + scheduler->priv = out; + rc = update_scheduler_input(scheduler, cib_xml_copy); } } @@ -1275,7 +1175,7 @@ populate_working_set(xmlNodePtr *cib_xml_copy) return rc; } - cluster_status(data_set); + cluster_status(scheduler); return pcmk_rc_ok; } @@ -1287,7 +1187,7 @@ refresh(pcmk__output_t *out) int attr_options = pcmk__node_attr_none; if (options.host_uname) { - pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname); + pcmk_node_t *node = pe_find_node(scheduler->nodes, options.host_uname); if (pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); @@ -1324,7 +1224,7 @@ refresh(pcmk__output_t *out) } static void -refresh_resource(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node) +refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { int rc = pcmk_rc_ok; @@ -1335,7 +1235,7 @@ refresh_resource(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node) crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0, - FALSE, data_set, options.force); + FALSE, scheduler, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped @@ -1474,7 +1374,7 @@ validate_cmdline_config(void) options.cmdline_params = pcmk__strkey_table(free, free); } options.require_resource = FALSE; - options.require_dataset = FALSE; + options.require_scheduler = FALSE; options.require_cib = FALSE; } @@ -1547,8 +1447,8 @@ int main(int argc, char **argv) { xmlNode *cib_xml_copy = NULL; - pe_resource_t *rsc = NULL; - pe_node_t *node = NULL; + pcmk_resource_t *rsc = NULL; + pcmk_node_t *node = NULL; int rc = pcmk_rc_ok; GOptionGroup *output_group = NULL; @@ -1730,7 +1630,7 @@ main(int argc, char **argv) */ if (options.find_flags && options.rsc_id) { - options.require_dataset = TRUE; + options.require_scheduler = TRUE; } // Establish a connection to the CIB if needed @@ -1752,9 +1652,9 @@ main(int argc, char **argv) } } - /* Populate working set from XML file if specified or CIB query otherwise */ - if (options.require_dataset) { - rc = populate_working_set(&cib_xml_copy); + // Populate scheduler data from XML file if specified or CIB query otherwise + if (options.require_scheduler) { + rc = initialize_scheduler_data(&cib_xml_copy); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); goto done; @@ -1763,7 +1663,7 @@ main(int argc, char **argv) // If command requires that resource exist if specified, find it if (options.find_flags && options.rsc_id) { - rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id, + rsc = pe_find_resource_with_flags(scheduler->resources, options.rsc_id, options.find_flags); if (rsc == NULL) { exit_code = CRM_EX_NOSUCH; @@ -1786,8 +1686,8 @@ main(int argc, char **argv) } // If user supplied a node name, check whether it exists - if ((options.host_uname != NULL) && (data_set != NULL)) { - node = pe_find_node(data_set->nodes, options.host_uname); + if ((options.host_uname != NULL) && (scheduler != NULL)) { + node = pe_find_node(scheduler->nodes, options.host_uname); if (node == NULL) { exit_code = CRM_EX_NOSUCH; @@ -1808,11 +1708,12 @@ main(int argc, char **argv) } pcmk_register_ipc_callback(controld_api, controller_event_callback, NULL); - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); + rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - _("Error connecting to the controller: %s"), pcmk_rc_str(rc)); + _("Error connecting to %s: %s"), + pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc)); goto done; } } @@ -1825,7 +1726,7 @@ main(int argc, char **argv) case cmd_list_resources: { GList *all = NULL; all = g_list_prepend(all, (gpointer) "*"); - rc = out->message(out, "resource-list", data_set, + rc = out->message(out, "resource-list", scheduler, pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending, true, all, all, false); g_list_free(all); @@ -1837,7 +1738,7 @@ main(int argc, char **argv) } case cmd_list_instances: - rc = out->message(out, "resource-names-list", data_set->resources); + rc = out->message(out, "resource-names-list", scheduler->resources); if (rc != pcmk_rc_ok) { rc = ENXIO; @@ -1845,14 +1746,20 @@ main(int argc, char **argv) break; - case cmd_list_standards: - case cmd_list_providers: case cmd_list_alternatives: - rc = list_providers(out, options.agent_spec); + rc = pcmk__list_alternatives(out, options.agent_spec); break; case cmd_list_agents: - rc = list_agents(out, options.agent_spec); + rc = pcmk__list_agents(out, options.agent_spec); + break; + + case cmd_list_standards: + rc = pcmk__list_standards(out); + break; + + case cmd_list_providers: + rc = pcmk__list_providers(out, options.agent_spec); break; case cmd_metadata: @@ -1860,10 +1767,10 @@ main(int argc, char **argv) break; case cmd_restart: - /* We don't pass data_set because rsc needs to stay valid for the + /* We don't pass scheduler because rsc needs to stay valid for the * entire lifetime of cli_resource_restart(), but it will reset and - * update the working set multiple times, so it needs to use its own - * copy. + * update the scheduler data multiple times, so it needs to use its + * own copy. */ rc = cli_resource_restart(out, rsc, node, options.move_lifetime, options.timeout_ms, cib_conn, @@ -1885,13 +1792,13 @@ main(int argc, char **argv) } else { exit_code = cli_resource_execute(rsc, options.rsc_id, options.operation, options.override_params, - options.timeout_ms, cib_conn, data_set, + options.timeout_ms, cib_conn, scheduler, args->verbosity, options.force, options.check_level); } goto done; case cmd_digests: - node = pe_find_node(data_set->nodes, options.host_uname); + node = pe_find_node(scheduler->nodes, options.host_uname); if (node == NULL) { rc = pcmk_rc_node_unknown; } else { @@ -1901,19 +1808,20 @@ main(int argc, char **argv) break; case cmd_colocations: - rc = out->message(out, "locations-and-colocations", rsc, data_set, + rc = out->message(out, "locations-and-colocations", rsc, options.recursive, (bool) options.force); break; case cmd_cts: rc = pcmk_rc_ok; - g_list_foreach(data_set->resources, (GFunc) cli_resource_print_cts, out); - cli_resource_print_cts_constraints(data_set); + g_list_foreach(scheduler->resources, (GFunc) cli_resource_print_cts, + out); + cli_resource_print_cts_constraints(scheduler); break; case cmd_fail: rc = cli_resource_fail(controld_api, options.host_uname, - options.rsc_id, data_set); + options.rsc_id, scheduler); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } @@ -1922,28 +1830,28 @@ main(int argc, char **argv) case cmd_list_active_ops: rc = cli_resource_print_operations(options.rsc_id, options.host_uname, TRUE, - data_set); + scheduler); break; case cmd_list_all_ops: rc = cli_resource_print_operations(options.rsc_id, options.host_uname, FALSE, - data_set); + scheduler); break; case cmd_locate: { - GList *nodes = cli_resource_search(rsc, options.rsc_id, data_set); + GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler); rc = out->message(out, "resource-search-list", nodes, options.rsc_id); g_list_free_full(nodes, free); break; } case cmd_query_xml: - rc = cli_resource_print(rsc, data_set, true); + rc = cli_resource_print(rsc, scheduler, true); break; case cmd_query_raw_xml: - rc = cli_resource_print(rsc, data_set, false); + rc = cli_resource_print(rsc, scheduler, false); break; case cmd_why: @@ -1951,7 +1859,7 @@ main(int argc, char **argv) rc = pcmk_rc_node_unknown; } else { rc = out->message(out, "resource-reasons-list", - data_set->resources, rsc, node); + scheduler->resources, rsc, node); } break; @@ -1965,7 +1873,7 @@ main(int argc, char **argv) } else { rc = cli_resource_move(rsc, options.rsc_id, options.host_uname, options.move_lifetime, cib_conn, - options.cib_options, data_set, + options.cib_options, scheduler, options.promoted_role_only, options.force); } @@ -1984,9 +1892,10 @@ main(int argc, char **argv) rc = pcmk_rc_node_unknown; } else { rc = cli_resource_ban(out, options.rsc_id, node->details->uname, - options.move_lifetime, NULL, cib_conn, + options.move_lifetime, cib_conn, options.cib_options, - options.promoted_role_only); + options.promoted_role_only, + PCMK__ROLE_PROMOTED); } if (rc == EINVAL) { @@ -2011,7 +1920,7 @@ main(int argc, char **argv) case cmd_get_param: { unsigned int count = 0; GHashTable *params = NULL; - pe_node_t *current = rsc->fns->active_node(rsc, &count, NULL); + pcmk_node_t *current = rsc->fns->active_node(rsc, &count, NULL); bool free_params = true; const char* value = NULL; @@ -2025,14 +1934,14 @@ main(int argc, char **argv) crm_debug("Looking up %s in %s", options.prop_name, rsc->id); if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_none)) { - params = pe_rsc_params(rsc, current, data_set); + params = pe_rsc_params(rsc, current, scheduler); free_params = false; value = g_hash_table_lookup(params, options.prop_name); } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_none)) { params = pcmk__strkey_table(free, free); - get_meta_attributes(params, rsc, current, data_set); + get_meta_attributes(params, rsc, current, scheduler); value = g_hash_table_lookup(params, options.prop_name); @@ -2044,7 +1953,7 @@ main(int argc, char **argv) } else { params = pcmk__strkey_table(free, free); pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params, - NULL, FALSE, data_set); + NULL, FALSE, scheduler); value = g_hash_table_lookup(params, options.prop_name); } @@ -2092,7 +2001,7 @@ main(int argc, char **argv) if (rsc == NULL) { rc = cli_cleanup_all(controld_api, options.host_uname, options.operation, options.interval_spec, - data_set); + scheduler); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } diff --git a/tools/crm_resource.h b/tools/crm_resource.h index dcd6c3d..dc86572 100644 --- a/tools/crm_resource.h +++ b/tools/crm_resource.h @@ -19,6 +19,7 @@ #include <crm/common/xml.h> #include <crm/common/mainloop.h> #include <crm/common/output_internal.h> +#include <crm/common/scheduler_internal.h> #include <crm/cib.h> #include <crm/common/attrd_internal.h> @@ -43,54 +44,56 @@ enum resource_check_flags { }; typedef struct resource_checks_s { - pe_resource_t *rsc; // Resource being checked + pcmk_resource_t *rsc; // Resource being checked uint32_t flags; // Group of enum resource_check_flags const char *lock_node; // Node that resource is shutdown-locked to, if any } resource_checks_t; -resource_checks_t *cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed); +resource_checks_t *cli_check_resource(pcmk_resource_t *rsc, char *role_s, + char *managed); /* ban */ int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host, const char *move_lifetime, cib_t * cib_conn, int cib_options, - gboolean promoted_role_only); + gboolean promoted_role_only, const char *promoted_role); int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, - const char *move_lifetime, GList *allnodes, cib_t * cib_conn, - int cib_options, gboolean promoted_role_only); + const char *move_lifetime, cib_t *cib_conn, int cib_options, + gboolean promoted_role_only, const char *promoted_role); int cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes, cib_t * cib_conn, int cib_options, bool clear_ban_constraints, gboolean force); int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, int cib_options, const char *rsc, const char *node, gboolean promoted_role_only); /* print */ -void cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out); -void cli_resource_print_cts_constraints(pe_working_set_t * data_set); +void cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out); +void cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler); -int cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded); +int cli_resource_print(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler, + bool expanded); int cli_resource_print_operations(const char *rsc_id, const char *host_uname, - bool active, pe_working_set_t * data_set); + bool active, pcmk_scheduler_t *scheduler); /* runtime */ -int cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, - pe_node_t *node); +int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, + pcmk_node_t *node); int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, - const char *rsc_id, pe_working_set_t *data_set); -GList *cli_resource_search(pe_resource_t *rsc, const char *requested_name, - pe_working_set_t *data_set); + const char *rsc_id, pcmk_scheduler_t *scheduler); +GList *cli_resource_search(pcmk_resource_t *rsc, const char *requested_name, + pcmk_scheduler_t *scheduler); int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, - const pe_resource_t *rsc, const char *operation, + const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, - pe_working_set_t *data_set, gboolean force); + pcmk_scheduler_t *scheduler, gboolean force); int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, - pe_working_set_t *data_set); -int cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, - const pe_node_t *node, const char *move_lifetime, + pcmk_scheduler_t *scheduler); +int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, + const pcmk_node_t *node, const char *move_lifetime, int timeout_ms, cib_t *cib, int cib_options, gboolean promoted_role_only, gboolean force); -int cli_resource_move(const pe_resource_t *rsc, const char *rsc_id, +int cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id, const char *host_name, const char *move_lifetime, - cib_t *cib, int cib_options, pe_working_set_t *data_set, + cib_t *cib, int cib_options, pcmk_scheduler_t *scheduler, gboolean promoted_role_only, gboolean force); crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, @@ -98,24 +101,28 @@ crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc GHashTable *params, GHashTable *override_hash, int timeout_ms, int resource_verbose, gboolean force, int check_level); -crm_exit_t cli_resource_execute(pe_resource_t *rsc, const char *requested_name, +crm_exit_t cli_resource_execute(pcmk_resource_t *rsc, + const char *requested_name, const char *rsc_action, GHashTable *override_hash, - int timeout_ms, cib_t *cib, pe_working_set_t *data_set, + int timeout_ms, cib_t *cib, + pcmk_scheduler_t *scheduler, int resource_verbose, gboolean force, int check_level); -int cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, +int cli_resource_update_attribute(pcmk_resource_t *rsc, + const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, int cib_options, gboolean force); -int cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, +int cli_resource_delete_attribute(pcmk_resource_t *rsc, + const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, int cib_options, gboolean force); -int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml); +int update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml); int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib); -bool resource_is_running_on(pe_resource_t *rsc, const char *host); +bool resource_is_running_on(pcmk_resource_t *rsc, const char *host); void crm_resource_register_messages(pcmk__output_t *out); diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c index b1edac8..3b0e4a1 100644 --- a/tools/crm_resource_ban.c +++ b/tools/crm_resource_ban.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2021 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -56,42 +56,17 @@ parse_cli_lifetime(pcmk__output_t *out, const char *move_lifetime) return later_s; } -static const char * -promoted_role_name(void) -{ - /* This is a judgment call for what string to use. @TODO Ideally we'd - * use the legacy string if the DC only supports that, and the new one - * otherwise. Basing it on --enable-compat-2.0 is a decent guess. - */ -#ifdef PCMK__COMPAT_2_0 - return RSC_ROLE_PROMOTED_LEGACY_S; -#else - return RSC_ROLE_PROMOTED_S; -#endif -} - // \return Standard Pacemaker return code int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, - const char *move_lifetime, GList *allnodes, cib_t * cib_conn, - int cib_options, gboolean promoted_role_only) + const char *move_lifetime, cib_t * cib_conn, int cib_options, + gboolean promoted_role_only, const char *promoted_role) { char *later_s = NULL; int rc = pcmk_rc_ok; xmlNode *fragment = NULL; xmlNode *location = NULL; - if(host == NULL) { - GList *n = allnodes; - for(; n && rc == pcmk_rc_ok; n = n->next) { - pe_node_t *target = n->data; - - rc = cli_resource_ban(out, rsc_id, target->details->uname, move_lifetime, - NULL, cib_conn, cib_options, promoted_role_only); - } - return rc; - } - later_s = parse_cli_lifetime(out, move_lifetime); if(move_lifetime && later_s == NULL) { return EINVAL; @@ -114,9 +89,9 @@ cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id); if(promoted_role_only) { - crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role_name()); + crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role); } else { - crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S); + crm_xml_add(location, XML_RULE_ATTR_ROLE, PCMK__ROLE_STARTED); } if (later_s == NULL) { @@ -151,14 +126,24 @@ cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, free_xml(fragment); free(later_s); + + if (rc != pcmk_rc_ok && promoted_role_only && strcmp(promoted_role, PCMK__ROLE_PROMOTED) == 0) { + int banrc = cli_resource_ban(out, rsc_id, host, move_lifetime, + cib_conn, cib_options, promoted_role_only, + PCMK__ROLE_PROMOTED_LEGACY); + if (banrc == pcmk_rc_ok) { + rc = banrc; + } + } + return rc; } // \return Standard Pacemaker return code int cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host, - const char *move_lifetime, cib_t * cib_conn, int cib_options, - gboolean promoted_role_only) + const char *move_lifetime, cib_t *cib_conn, int cib_options, + gboolean promoted_role_only, const char *promoted_role) { char *later_s = parse_cli_lifetime(out, move_lifetime); int rc = pcmk_rc_ok; @@ -181,9 +166,9 @@ cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host, crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id); if(promoted_role_only) { - crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role_name()); + crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role); } else { - crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S); + crm_xml_add(location, XML_RULE_ATTR_ROLE, PCMK__ROLE_STARTED); } if (later_s == NULL) { @@ -218,6 +203,16 @@ cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host, free_xml(fragment); free(later_s); + + if (rc != pcmk_rc_ok && promoted_role_only && strcmp(promoted_role, PCMK__ROLE_PROMOTED) == 0) { + int preferrc = cli_resource_prefer(out, rsc_id, host, move_lifetime, + cib_conn, cib_options, promoted_role_only, + PCMK__ROLE_PROMOTED_LEGACY); + if (preferrc == pcmk_rc_ok) { + rc = preferrc; + } + } + return rc; } @@ -335,7 +330,7 @@ cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes, cib_t * On the first error, abort. */ for(; n; n = n->next) { - pe_node_t *target = n->data; + pcmk_node_t *target = n->data; rc = cli_resource_clear(rsc_id, target->details->uname, NULL, cib_conn, cib_options, clear_ban_constraints, @@ -358,6 +353,9 @@ build_clear_xpath_string(GString *buf, const xmlNode *constraint_node, const char *cons_rsc = crm_element_value(constraint_node, XML_LOC_ATTR_SOURCE); GString *rsc_role_substr = NULL; + const char *promoted_role_rule = "@" XML_RULE_ATTR_ROLE "='" PCMK__ROLE_PROMOTED + "' or @" XML_RULE_ATTR_ROLE "='" + PCMK__ROLE_PROMOTED_LEGACY "'"; CRM_ASSERT(buf != NULL); g_string_truncate(buf, 0); @@ -384,8 +382,7 @@ build_clear_xpath_string(GString *buf, const xmlNode *constraint_node, rsc_role_substr = g_string_sized_new(64); pcmk__g_strcat(rsc_role_substr, "@" XML_LOC_ATTR_SOURCE "='", rsc, "' " - "and @" XML_RULE_ATTR_ROLE "='", - promoted_role_name(), "'", NULL); + "and (" , promoted_role_rule, ")", NULL); } else if (rsc != NULL) { rsc_role_substr = g_string_sized_new(64); @@ -394,9 +391,7 @@ build_clear_xpath_string(GString *buf, const xmlNode *constraint_node, } else if (promoted_role_only) { rsc_role_substr = g_string_sized_new(64); - pcmk__g_strcat(rsc_role_substr, - "@" XML_RULE_ATTR_ROLE "='", promoted_role_name(), - "'", NULL); + g_string_append(rsc_role_substr, promoted_role_rule); } if (rsc_role_substr != NULL) { diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c index c1be53c..bdf3ad9 100644 --- a/tools/crm_resource_print.c +++ b/tools/crm_resource_print.c @@ -20,8 +20,8 @@ static int print_constraint(xmlNode *xml_obj, void *userdata) { - pe_working_set_t *data_set = (pe_working_set_t *) userdata; - pcmk__output_t *out = data_set->priv; + pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) userdata; + pcmk__output_t *out = scheduler->priv; xmlNode *lifetime = NULL; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); @@ -31,16 +31,16 @@ print_constraint(xmlNode *xml_obj, void *userdata) // @COMPAT lifetime is deprecated lifetime = first_named_child(xml_obj, "lifetime"); - if (pe_evaluate_rules(lifetime, NULL, data_set->now, NULL) == FALSE) { + if (pe_evaluate_rules(lifetime, NULL, scheduler->now, NULL) == FALSE) { return pcmk_rc_ok; } - if (!pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj), pcmk__str_casei)) { + if (!pcmk__xe_is(xml_obj, XML_CONS_TAG_RSC_DEPEND)) { return pcmk_rc_ok; } out->info(out, "Constraint %s %s %s %s %s %s %s", - crm_element_name(xml_obj), + xml_obj->name, cons_string(crm_element_value(xml_obj, XML_ATTR_ID)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)), @@ -52,21 +52,22 @@ print_constraint(xmlNode *xml_obj, void *userdata) } void -cli_resource_print_cts_constraints(pe_working_set_t * data_set) +cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler) { - pcmk__xe_foreach_child(pcmk_find_cib_element(data_set->input, XML_CIB_TAG_CONSTRAINTS), - NULL, print_constraint, data_set); + pcmk__xe_foreach_child(pcmk_find_cib_element(scheduler->input, + XML_CIB_TAG_CONSTRAINTS), + NULL, print_constraint, scheduler); } void -cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out) +cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out) { const char *host = NULL; bool needs_quorum = TRUE; const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); - pe_node_t *node = pe__current_node(rsc); + pcmk_node_t *node = pe__current_node(rsc); if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { needs_quorum = FALSE; @@ -79,7 +80,7 @@ cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out) } out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld %#.16llx", - crm_element_name(rsc->xml), rsc->id, + rsc->xml->name, rsc->id, rsc->clone_name ? rsc->clone_name : rsc->id, rsc->parent ? rsc->parent->id : "NA", rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags, rsc->flags); @@ -90,11 +91,11 @@ cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out) // \return Standard Pacemaker return code int cli_resource_print_operations(const char *rsc_id, const char *host_uname, - bool active, pe_working_set_t * data_set) + bool active, pcmk_scheduler_t *scheduler) { - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; int rc = pcmk_rc_no_output; - GList *ops = find_operations(rsc_id, host_uname, active, data_set); + GList *ops = find_operations(rsc_id, host_uname, active, scheduler); if (!ops) { return rc; @@ -105,7 +106,7 @@ cli_resource_print_operations(const char *rsc_id, const char *host_uname, for (GList *lpc = ops; lpc != NULL; lpc = lpc->next) { xmlNode *xml_op = (xmlNode *) lpc->data; - out->message(out, "node-and-op", data_set, xml_op); + out->message(out, "node-and-op", scheduler, xml_op); } out->end_list(out); @@ -114,9 +115,10 @@ cli_resource_print_operations(const char *rsc_id, const char *host_uname, // \return Standard Pacemaker return code int -cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded) +cli_resource_print(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler, + bool expanded) { - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; uint32_t show_opts = pcmk_show_pending; GList *all = NULL; @@ -131,10 +133,11 @@ cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "const char *", "const char *") +PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *", + "const char *") static int attribute_list_default(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); + pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, char *); const char *value = va_arg(args, const char *); @@ -224,10 +227,11 @@ agent_status_xml(pcmk__output_t *out, va_list args) { return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "const char *", "const char *") +PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *", + "const char *") static int attribute_list_text(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); + pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, char *); const char *value = va_arg(args, const char *); @@ -276,10 +280,10 @@ override_xml(pcmk__output_t *out, va_list args) { return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "const char *") +PCMK__OUTPUT_ARGS("property-list", "pcmk_resource_t *", "const char *") static int property_list_default(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); + pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, char *); const char *value = crm_element_value(rsc->xml, attr); @@ -293,10 +297,10 @@ property_list_default(pcmk__output_t *out, va_list args) { return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "const char *") +PCMK__OUTPUT_ARGS("property-list", "pcmk_resource_t *", "const char *") static int property_list_text(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); + pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, const char *); const char *value = crm_element_value(rsc->xml, attr); @@ -346,7 +350,8 @@ resource_agent_action_default(pcmk__output_t *out, va_list args) { type, rc, exit_reason); /* hide output for validate-all if not in verbose */ - if (verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei)) { + if ((verbose == 0) + && pcmk__str_eq(action, PCMK_ACTION_VALIDATE_ALL, pcmk__str_casei)) { return pcmk_rc_ok; } @@ -441,7 +446,7 @@ static int resource_check_list_default(pcmk__output_t *out, va_list args) { resource_checks_t *checks = va_arg(args, resource_checks_t *); - const pe_resource_t *parent = pe__const_top_resource(checks->rsc, false); + const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false); if (checks->flags == 0) { return pcmk_rc_no_output; @@ -487,7 +492,7 @@ static int resource_check_list_xml(pcmk__output_t *out, va_list args) { resource_checks_t *checks = va_arg(args, resource_checks_t *); - const pe_resource_t *parent = pe__const_top_resource(checks->rsc, false); + const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false); xmlNodePtr node = pcmk__output_create_xml_node(out, "check", "id", parent->id, @@ -547,9 +552,9 @@ resource_search_list_default(pcmk__output_t *out, va_list args) if (ni->promoted) { #ifdef PCMK__COMPAT_2_0 - role_text = " " RSC_ROLE_PROMOTED_LEGACY_S; + role_text = " " PCMK__ROLE_PROMOTED_LEGACY; #else - role_text = " " RSC_ROLE_PROMOTED_S; + role_text = " " PCMK__ROLE_PROMOTED; #endif } out->list_item(out, "node", "resource %s is running on: %s%s", @@ -587,14 +592,14 @@ resource_search_list_xml(pcmk__output_t *out, va_list args) return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pe_resource_t *", - "pe_node_t *") +PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *", + "pcmk_node_t *") static int resource_reasons_list_default(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - pe_node_t *node = va_arg(args, pe_node_t *); + pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); + pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *host_uname = (node == NULL)? NULL : node->details->uname; @@ -605,7 +610,7 @@ resource_reasons_list_default(pcmk__output_t *out, va_list args) GList *hosts = NULL; for (lpc = resources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; + pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; rsc->fns->location(rsc, &hosts, TRUE); if (hosts == NULL) { @@ -638,14 +643,14 @@ resource_reasons_list_default(pcmk__output_t *out, va_list args) GList *lpc = NULL; for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; + pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; out->list_item(out, "reason", "Resource %s is running on host %s", rsc->id, host_uname); cli_resource_check(out, rsc, node); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; + pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; out->list_item(out, "reason", "Resource %s is assigned to host %s but not running", rsc->id, host_uname); cli_resource_check(out, rsc, node); @@ -669,14 +674,14 @@ resource_reasons_list_default(pcmk__output_t *out, va_list args) return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pe_resource_t *", - "pe_node_t *") +PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *", + "pcmk_node_t *") static int resource_reasons_list_xml(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - pe_node_t *node = va_arg(args, pe_node_t *); + pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); + pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *host_uname = (node == NULL)? NULL : node->details->uname; @@ -689,7 +694,7 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args) pcmk__output_xml_create_parent(out, "resources", NULL); for (lpc = resources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; + pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; rsc->fns->location(rsc, &hosts, TRUE); @@ -723,7 +728,7 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args) pcmk__output_xml_create_parent(out, "resources", NULL); for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; + pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; pcmk__output_xml_create_parent(out, "resource", "id", rsc->id, @@ -736,7 +741,7 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args) } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *rsc = (pe_resource_t *) lpc->data; + pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; pcmk__output_xml_create_parent(out, "resource", "id", rsc->id, @@ -766,7 +771,8 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args) } static void -add_resource_name(pe_resource_t *rsc, pcmk__output_t *out) { +add_resource_name(pcmk_resource_t *rsc, pcmk__output_t *out) +{ if (rsc->children == NULL) { out->list_item(out, "resource", "%s", rsc->id); } else { diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index f25dbbc..da360fd 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -16,22 +16,22 @@ #include <crm/services_internal.h> static GList * -build_node_info_list(const pe_resource_t *rsc) +build_node_info_list(const pcmk_resource_t *rsc) { GList *retval = NULL; for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { - const pe_resource_t *child = (const pe_resource_t *) iter->data; + const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data; for (const GList *iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) { - const pe_node_t *node = (const pe_node_t *) iter2->data; + const pcmk_node_t *node = (const pcmk_node_t *) iter2->data; node_info_t *ni = calloc(1, sizeof(node_info_t)); ni->node_name = node->details->uname; - ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) && - child->fns->state(child, TRUE) == RSC_ROLE_PROMOTED; + ni->promoted = pcmk_is_set(rsc->flags, pcmk_rsc_promotable) && + child->fns->state(child, TRUE) == pcmk_role_promoted; retval = g_list_prepend(retval, ni); } @@ -41,18 +41,18 @@ build_node_info_list(const pe_resource_t *rsc) } GList * -cli_resource_search(pe_resource_t *rsc, const char *requested_name, - pe_working_set_t *data_set) +cli_resource_search(pcmk_resource_t *rsc, const char *requested_name, + pcmk_scheduler_t *scheduler) { GList *retval = NULL; - const pe_resource_t *parent = pe__const_top_resource(rsc, false); + const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); if (pe_rsc_is_clone(rsc)) { retval = build_node_info_list(rsc); /* The anonymous clone children's common ID is supplied */ } else if (pe_rsc_is_clone(parent) - && !pcmk_is_set(rsc->flags, pe_rsc_unique) + && !pcmk_is_set(rsc->flags, pcmk_rsc_unique) && rsc->clone_name && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { @@ -61,10 +61,10 @@ cli_resource_search(pe_resource_t *rsc, const char *requested_name, } else if (rsc->running_on != NULL) { for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { - pe_node_t *node = (pe_node_t *) iter->data; + pcmk_node_t *node = (pcmk_node_t *) iter->data; node_info_t *ni = calloc(1, sizeof(node_info_t)); ni->node_name = node->details->uname; - ni->promoted = (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED); + ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted); retval = g_list_prepend(retval, ni); } @@ -133,7 +133,7 @@ find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, } crm_log_xml_debug(xml_search, "Match"); - if (xml_has_children(xml_search)) { + if (xml_search->children != NULL) { xmlNode *child = NULL; rc = ENOTUNIQ; @@ -159,8 +159,9 @@ find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, /* PRIVATE. Use the find_matching_attr_resources instead. */ static void -find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource_t*> */ ** result, - pe_resource_t * rsc, const char * rsc_id, +find_matching_attr_resources_recursive(pcmk__output_t *out, + GList /* <pcmk_resource_t*> */ **result, + pcmk_resource_t *rsc, const char *rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, int depth) @@ -171,18 +172,19 @@ find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource /* visit the children */ for(GList *gIter = rsc->children; gIter; gIter = gIter->next) { - find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data, + find_matching_attr_resources_recursive(out, result, + (pcmk_resource_t *) gIter->data, rsc_id, attr_set, attr_set_type, attr_id, attr_name, cib, cmd, depth+1); /* do it only once for clones */ - if(pe_clone == rsc->variant) { + if (rsc->variant == pcmk_rsc_variant_clone) { break; } } rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); - /* Post-order traversal. + /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_rc_ok == rc)) { /* push the head */ @@ -195,8 +197,8 @@ find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource /* The result is a linearized pre-ordered tree of resources. */ -static GList/*<pe_resource_t*>*/ * -find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, +static GList/*<pcmk_resource_t*>*/ * +find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, @@ -212,7 +214,8 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, if(force == TRUE) { return g_list_append(result, rsc); } - if(rsc->parent && pe_clone == rsc->parent->variant) { + if ((rsc->parent != NULL) + && (rsc->parent->variant == pcmk_rsc_variant_clone)) { int rc = pcmk_rc_ok; char *local_attr_id = NULL; rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type, @@ -225,10 +228,12 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, cmd, attr_name, rsc->id, rsc_id); } return g_list_append(result, rsc); - } else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) { - pe_resource_t *child = rsc->children->data; - if(child->variant == pe_native) { + } else if ((rsc->parent == NULL) && (rsc->children != NULL) + && (rsc->variant == pcmk_rsc_variant_clone)) { + pcmk_resource_t *child = rsc->children->data; + + if (child->variant == pcmk_rsc_variant_primitive) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); @@ -253,7 +258,7 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, // \return Standard Pacemaker return code int -cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, +cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, @@ -264,7 +269,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, char *found_attr_id = NULL; - GList/*<pe_resource_t*>*/ *resources = NULL; + GList/*<pcmk_resource_t*>*/ *resources = NULL; const char *top_id = pe__const_top_resource(rsc, false)->id; if ((attr_id == NULL) && !force) { @@ -333,7 +338,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, xmlNode *xml_obj = NULL; found_attr_id = NULL; - rsc = (pe_resource_t *) iter->data; + rsc = (pcmk_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, @@ -358,7 +363,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, rsc_attr_id = found_attr_id; } - xml_top = create_xml_node(NULL, crm_element_name(rsc->xml)); + xml_top = create_xml_node(NULL, (const char *) rsc->xml->name); crm_xml_add(xml_top, XML_ATTR_ID, lookup_id); xml_obj = create_xml_node(xml_top, attr_set_type); @@ -408,19 +413,19 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, need_init = false; pcmk__unpack_constraints(rsc->cluster); pe__clear_resource_flags_on_all(rsc->cluster, - pe_rsc_detect_loop); + pcmk_rsc_detect_loop); } /* We want to set the attribute only on resources explicitly * colocated with this one, so we use rsc->rsc_cons_lhs directly * rather than the with_this_colocations() method. */ - pe__set_resource_flags(rsc, pe_rsc_detect_loop); + pe__set_resource_flags(rsc, pcmk_rsc_detect_loop); for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; crm_debug("Checking %s %d", cons->id, cons->score); - if (!pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop) + if (!pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop) && (cons->score > 0)) { crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, cons->dependent->id); @@ -440,14 +445,14 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, // \return Standard Pacemaker return code int -cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, +cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, int cib_options, gboolean force) { pcmk__output_t *out = rsc->cluster->priv; int rc = pcmk_rc_ok; - GList/*<pe_resource_t*>*/ *resources = NULL; + GList/*<pcmk_resource_t*>*/ *resources = NULL; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, XML_ATTR_ID, @@ -482,7 +487,7 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; - rsc = (pe_resource_t *) iter->data; + rsc = (pcmk_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, @@ -534,9 +539,10 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, // \return Standard Pacemaker return code static int send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, - const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) + const char *host_uname, const char *rsc_id, + pcmk_scheduler_t *scheduler) { - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; const char *router_node = host_uname; const char *rsc_api_id = NULL; const char *rsc_long_id = NULL; @@ -544,13 +550,13 @@ send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, const char *rsc_provider = NULL; const char *rsc_type = NULL; bool cib_only = false; - pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); + pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id); if (rsc == NULL) { out->err(out, "Resource %s not found", rsc_id); return ENXIO; - } else if (rsc->variant != pe_native) { + } else if (rsc->variant != pcmk_rsc_variant_primitive) { out->err(out, "We can only process primitive resources, not %s", rsc_id); return EINVAL; } @@ -564,7 +570,7 @@ send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, } { - pe_node_t *node = pe_find_node(data_set->nodes, host_uname); + pcmk_node_t *node = pe_find_node(scheduler->nodes, host_uname); if (node == NULL) { out->err(out, "Node %s not found", host_uname); @@ -617,17 +623,20 @@ send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, * \note The caller is responsible for freeing the result. */ static inline char * -rsc_fail_name(const pe_resource_t *rsc) +rsc_fail_name(const pcmk_resource_t *rsc) { const char *name = (rsc->clone_name? rsc->clone_name : rsc->id); - return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name); + if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) { + return strdup(name); + } + return clone_strip(name); } // \return Standard Pacemaker return code static int clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, - const char *rsc_id, pe_working_set_t *data_set) + const char *rsc_id, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; @@ -636,7 +645,7 @@ clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ - rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, data_set); + rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler); if (rc != pcmk_rc_ok) { return rc; } @@ -654,7 +663,7 @@ clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, static int clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *node_name, const char *rsc_id, const char *operation, - const char *interval_spec, pe_working_set_t *data_set) + const char *interval_spec, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; const char *failed_value = NULL; @@ -675,7 +684,7 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, crm_parse_interval_spec(interval_spec)); } - for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed); + for (xmlNode *xml_op = pcmk__xml_first_child(scheduler->failed); xml_op != NULL; xml_op = pcmk__xml_next(xml_op)) { @@ -687,10 +696,12 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, // No resource specified means all resources match if (rsc_id) { - pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources, - failed_id, - pe_find_renamed|pe_find_anon); + pcmk_resource_t *fail_rsc = NULL; + fail_rsc = pe_find_resource_with_flags(scheduler->resources, + failed_id, + pcmk_rsc_match_history + |pcmk_rsc_match_anon_basename); if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) { continue; } @@ -722,7 +733,7 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { crm_debug("Erasing failures of %s on %s", failed_id, node_name); - rc = clear_rsc_history(controld_api, node_name, failed_id, data_set); + rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler); if (rc != pcmk_rc_ok) { return rc; } @@ -733,8 +744,8 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, // \return Standard Pacemaker return code static int -clear_rsc_fail_attrs(const pe_resource_t *rsc, const char *operation, - const char *interval_spec, const pe_node_t *node) +clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation, + const char *interval_spec, const pcmk_node_t *node) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; @@ -754,13 +765,13 @@ clear_rsc_fail_attrs(const pe_resource_t *rsc, const char *operation, // \return Standard Pacemaker return code int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, - const pe_resource_t *rsc, const char *operation, + const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, - pe_working_set_t *data_set, gboolean force) + pcmk_scheduler_t *scheduler, gboolean force) { - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; int rc = pcmk_rc_ok; - pe_node_t *node = NULL; + pcmk_node_t *node = NULL; if (rsc == NULL) { return ENXIO; @@ -768,10 +779,11 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, } else if (rsc->children) { for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) { - const pe_resource_t *child = (const pe_resource_t *) lpc->data; + const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data; rc = cli_resource_delete(controld_api, host_uname, child, operation, - interval_spec, just_failures, data_set, force); + interval_spec, just_failures, scheduler, + force); if (rc != pcmk_rc_ok) { return rc; } @@ -783,11 +795,11 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, GList *nodes = g_hash_table_get_values(rsc->known_on); if(nodes == NULL && force) { - nodes = pcmk__copy_node_list(data_set->nodes, false); + nodes = pcmk__copy_node_list(scheduler->nodes, false); } else if(nodes == NULL && rsc->exclusive_discover) { GHashTableIter iter; - pe_node_t *node = NULL; + pcmk_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { @@ -801,12 +813,12 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, } for (lpc = nodes; lpc != NULL; lpc = lpc->next) { - node = (pe_node_t *) lpc->data; + node = (pcmk_node_t *) lpc->data; if (node->details->online) { rc = cli_resource_delete(controld_api, node->details->uname, rsc, operation, interval_spec, just_failures, - data_set, force); + scheduler, force); } if (rc != pcmk_rc_ok) { g_list_free(nodes); @@ -818,7 +830,7 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, return pcmk_rc_ok; } - node = pe_find_node(data_set->nodes, host_uname); + node = pe_find_node(scheduler->nodes, host_uname); if (node == NULL) { out->err(out, "Unable to clean up %s because node %s not found", @@ -847,13 +859,13 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, if (just_failures) { rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation, - interval_spec, data_set); + interval_spec, scheduler); } else { - rc = clear_rsc_history(controld_api, host_uname, rsc->id, data_set); + rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler); } if (rc != pcmk_rc_ok) { out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", - rsc->id, host_uname, pcmk_strerror(rc)); + rsc->id, host_uname, pcmk_rc_str(rc)); } else { out->info(out, "Cleaned up %s on %s", rsc->id, host_uname); } @@ -864,9 +876,9 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, - pe_working_set_t *data_set) + pcmk_scheduler_t *scheduler) { - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; const char *display_name = node_name? node_name : "all nodes"; @@ -878,7 +890,7 @@ cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, } if (node_name) { - pe_node_t *node = pe_find_node(data_set->nodes, node_name); + pcmk_node_t *node = pe_find_node(scheduler->nodes, node_name); if (node == NULL) { out->err(out, "Unknown node: %s", node_name); @@ -899,21 +911,21 @@ cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, if (node_name) { rc = clear_rsc_failures(out, controld_api, node_name, NULL, - operation, interval_spec, data_set); + operation, interval_spec, scheduler); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s", - node_name, pcmk_strerror(rc)); + node_name, pcmk_rc_str(rc)); return rc; } } else { - for (GList *iter = data_set->nodes; iter; iter = iter->next) { - pe_node_t *node = (pe_node_t *) iter->data; + for (GList *iter = scheduler->nodes; iter; iter = iter->next) { + pcmk_node_t *node = (pcmk_node_t *) iter->data; rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL, - operation, interval_spec, data_set); + operation, interval_spec, scheduler); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s", - pcmk_strerror(rc)); + pcmk_rc_str(rc)); return rc; } } @@ -933,13 +945,13 @@ check_role(resource_checks_t *checks) return; } switch (text2role(role_s)) { - case RSC_ROLE_STOPPED: + case pcmk_role_stopped: checks->flags |= rsc_remain_stopped; break; - case RSC_ROLE_UNPROMOTED: + case pcmk_role_unpromoted: if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags, - pe_rsc_promotable)) { + pcmk_rsc_promotable)) { checks->flags |= rsc_unpromotable; } break; @@ -970,7 +982,7 @@ check_locked(resource_checks_t *checks) } static bool -node_is_unhealthy(pe_node_t *node) +node_is_unhealthy(pcmk_node_t *node) { switch (pe__health_strategy(node->details->data_set)) { case pcmk__health_strategy_none: @@ -1000,7 +1012,7 @@ node_is_unhealthy(pe_node_t *node) } static void -check_node_health(resource_checks_t *checks, pe_node_t *node) +check_node_health(resource_checks_t *checks, pcmk_node_t *node) { if (node == NULL) { GHashTableIter iter; @@ -1025,7 +1037,7 @@ check_node_health(resource_checks_t *checks, pe_node_t *node) } int -cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node) +cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { resource_checks_t checks = { .rsc = rsc }; @@ -1040,15 +1052,15 @@ cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node) // \return Standard Pacemaker return code int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, - const char *rsc_id, pe_working_set_t *data_set) + const char *rsc_id, pcmk_scheduler_t *scheduler) { crm_notice("Failing %s on %s", rsc_id, host_uname); - return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, data_set); + return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler); } static GHashTable * -generate_resource_params(pe_resource_t *rsc, pe_node_t *node, - pe_working_set_t *data_set) +generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node, + pcmk_scheduler_t *scheduler) { GHashTable *params = NULL; GHashTable *meta = NULL; @@ -1059,7 +1071,7 @@ generate_resource_params(pe_resource_t *rsc, pe_node_t *node, combined = pcmk__strkey_table(free, free); - params = pe_rsc_params(rsc, node, data_set); + params = pe_rsc_params(rsc, node, scheduler); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { @@ -1068,7 +1080,7 @@ generate_resource_params(pe_resource_t *rsc, pe_node_t *node, } meta = pcmk__strkey_table(free, free); - get_meta_attributes(meta, rsc, node, data_set); + get_meta_attributes(meta, rsc, node, scheduler); if (meta != NULL) { g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { @@ -1082,7 +1094,7 @@ generate_resource_params(pe_resource_t *rsc, pe_node_t *node, return combined; } -bool resource_is_running_on(pe_resource_t *rsc, const char *host) +bool resource_is_running_on(pcmk_resource_t *rsc, const char *host) { bool found = true; GList *hIter = NULL; @@ -1094,7 +1106,7 @@ bool resource_is_running_on(pe_resource_t *rsc, const char *host) rsc->fns->location(rsc, &hosts, TRUE); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { - pe_node_t *node = (pe_node_t *) hIter->data; + pcmk_node_t *node = (pcmk_node_t *) hIter->data; if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) { crm_trace("Resource %s is running on %s\n", rsc->id, host); @@ -1132,13 +1144,13 @@ get_active_resources(const char *host, GList *rsc_list) GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { - pe_resource_t *rsc = (pe_resource_t *) rIter->data; + pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ - if (rsc->variant == pe_group) { + if (rsc->variant == pcmk_rsc_variant_group) { active = g_list_concat(active, get_active_resources(host, rsc->children)); } else if (resource_is_running_on(rsc, host)) { @@ -1148,7 +1160,7 @@ get_active_resources(const char *host, GList *rsc_list) return active; } -static void dump_list(GList *items, const char *tag) +static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; @@ -1170,45 +1182,45 @@ static void display_list(pcmk__output_t *out, GList *items, const char *tag) /*! * \internal - * \brief Upgrade XML to latest schema version and use it as working set input + * \brief Upgrade XML to latest schema version and use it as scheduler input * - * This also updates the working set timestamp to the current time. + * This also updates the scheduler timestamp to the current time. * - * \param[in,out] data_set Working set instance to update - * \param[in,out] xml XML to use as input + * \param[in,out] scheduler Scheduler data to update + * \param[in,out] xml XML to use as input * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for - * data_set->now. + * scheduler->now. * \todo This follows the example of other callers of cli_config_update() * and returns ENOKEY ("Required key not available") if that fails, * but perhaps pcmk_rc_schema_validation would be better in that case. */ int -update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml) +update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml) { if (cli_config_update(xml, NULL, FALSE) == FALSE) { return ENOKEY; } - data_set->input = *xml; - data_set->now = crm_time_new(NULL); + scheduler->input = *xml; + scheduler->now = crm_time_new(NULL); return pcmk_rc_ok; } /*! * \internal - * \brief Update a working set's XML input based on a CIB query + * \brief Update scheduler XML input based on a CIB query * - * \param[in] data_set Data set instance to initialize + * \param[in] scheduler Scheduler data to initialize * \param[in] cib Connection to the CIB manager * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for - * data_set->input and data_set->now. + * scheduler->input and scheduler->now. */ static int -update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set, - cib_t *cib) +update_scheduler_input_to_cib(pcmk__output_t *out, pcmk_scheduler_t *scheduler, + cib_t *cib) { xmlNode *cib_xml_copy = NULL; int rc = pcmk_rc_ok; @@ -1217,10 +1229,10 @@ update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set, rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { - out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc); + out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_rc_str(rc), rc); return rc; } - rc = update_working_set_xml(data_set, &cib_xml_copy); + rc = update_scheduler_input(scheduler, &cib_xml_copy); if (rc != pcmk_rc_ok) { out->err(out, "Could not upgrade the current CIB XML"); free_xml(cib_xml_copy); @@ -1232,18 +1244,19 @@ update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set, // \return Standard Pacemaker return code static int -update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) +update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc = pcmk_rc_ok; - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; - pe_reset_working_set(data_set); - pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); - rc = update_working_set_from_cib(out, data_set, cib); + pe_reset_working_set(scheduler); + pe__set_working_set_flags(scheduler, + pcmk_sched_no_counts|pcmk_sched_no_compat); + rc = update_scheduler_input_to_cib(out, scheduler, cib); if (rc != pcmk_rc_ok) { return rc; } @@ -1261,7 +1274,7 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) goto done; } - rc = write_xml_file(data_set->input, shadow_file, FALSE); + rc = write_xml_file(scheduler->input, shadow_file, FALSE); if (rc < 0) { out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc); @@ -1272,26 +1285,27 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { - out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc); + out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_rc_str(rc), rc); goto done; } - pcmk__schedule_actions(data_set->input, - pe_flag_no_counts|pe_flag_no_compat, data_set); + pcmk__schedule_actions(scheduler->input, + pcmk_sched_no_counts|pcmk_sched_no_compat, + scheduler); prev_quiet = out->is_quiet(out); out->quiet = true; - pcmk__simulate_transition(data_set, shadow_cib, NULL); + pcmk__simulate_transition(scheduler, shadow_cib, NULL); out->quiet = prev_quiet; - rc = update_dataset(shadow_cib, data_set, false); + rc = update_dataset(shadow_cib, scheduler, false); } else { - cluster_status(data_set); + cluster_status(scheduler); } done: - /* Do not free data_set->input here, we need rsc->xml to be valid later on */ + // Do not free scheduler->input here, we need rsc->xml to be valid later on cib_delete(shadow_cib); free(pid); @@ -1303,64 +1317,96 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) return rc; } +/*! + * \internal + * \brief Find the maximum stop timeout of a resource and its children (if any) + * + * \param[in,out] rsc Resource to get timeout for + * + * \return Maximum stop timeout for \p rsc (in milliseconds) + */ static int -max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc) +max_rsc_stop_timeout(pcmk_resource_t *rsc) { - int delay = 0; + long long result_ll; int max_delay = 0; + xmlNode *config = NULL; + GHashTable *meta = NULL; - if(rsc && rsc->children) { - GList *iter = NULL; + if (rsc == NULL) { + return 0; + } - for(iter = rsc->children; iter; iter = iter->next) { - pe_resource_t *child = (pe_resource_t *)iter->data; + // If resource is collective, use maximum of its children's stop timeouts + if (rsc->children != NULL) { + for (GList *iter = rsc->children; iter; iter = iter->next) { + pcmk_resource_t *child = iter->data; + int delay = max_rsc_stop_timeout(child); - delay = max_delay_for_resource(data_set, child); - if(delay > max_delay) { - double seconds = delay / 1000.0; - crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id); + if (delay > max_delay) { + pe_rsc_trace(rsc, + "Maximum stop timeout for %s is now %s due to %s", + rsc->id, pcmk__readable_interval(delay), child->id); max_delay = delay; } } + return max_delay; + } - } else if(rsc) { - char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP); - pe_action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set); - const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT); - long long result_ll; + // Get resource's stop action configuration from CIB + config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true); - if ((pcmk__scan_ll(value, &result_ll, -1LL) == pcmk_rc_ok) - && (result_ll >= 0) && (result_ll <= INT_MAX)) { - max_delay = (int) result_ll; - } else { - max_delay = -1; - } - pe_free_action(stop); + /* Get configured timeout for stop action (fully evaluated for rules, + * defaults, etc.). + * + * @TODO This currently ignores node (which might matter for rules) + */ + meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config); + if ((pcmk__scan_ll(g_hash_table_lookup(meta, XML_ATTR_TIMEOUT), + &result_ll, -1LL) == pcmk_rc_ok) + && (result_ll >= 0) && (result_ll <= INT_MAX)) { + max_delay = (int) result_ll; } + g_hash_table_destroy(meta); return max_delay; } +/*! + * \internal + * \brief Find a reasonable waiting time for stopping any one resource in a list + * + * \param[in,out] scheduler Scheduler data + * \param[in] resources List of names of resources that will be stopped + * + * \return Rough estimate of a reasonable time to wait (in seconds) to stop any + * one resource in \p resources + * \note This estimate is very rough, simply the maximum stop timeout of all + * given resources and their children, plus a small fudge factor. It does + * not account for children that must be stopped in sequence, action + * throttling, or any demotions needed. It checks the stop timeout, even + * if the resources in question are actually being started. + */ static int -max_delay_in(pe_working_set_t * data_set, GList *resources) +wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources) { int max_delay = 0; - GList *item = NULL; - - for (item = resources; item != NULL; item = item->next) { - int delay = 0; - pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data); - delay = max_delay_for_resource(data_set, rsc); + // Find maximum stop timeout in milliseconds + for (const GList *item = resources; item != NULL; item = item->next) { + pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, + (const char *) item->data); + int delay = max_rsc_stop_timeout(rsc); - if(delay > max_delay) { - double seconds = delay / 1000.0; - crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id); + if (delay > max_delay) { + pe_rsc_trace(rsc, + "Wait time is now %s due to %s", + pcmk__readable_interval(delay), rsc->id); max_delay = delay; } } - return 5 + (max_delay / 1000); + return (max_delay / 1000) + 5; } #define waiting_for_starts(d, r, h) ((d != NULL) || \ @@ -1390,8 +1436,8 @@ max_delay_in(pe_working_set_t * data_set, GList *resources) * \return Standard Pacemaker return code (exits on certain failures) */ int -cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, - const pe_node_t *node, const char *move_lifetime, +cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, + const pcmk_node_t *node, const char *move_lifetime, int timeout_ms, cib_t *cib, int cib_options, gboolean promoted_role_only, gboolean force) { @@ -1412,8 +1458,8 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, GList *current_active = NULL; GList *restart_target_active = NULL; - pe_working_set_t *data_set = NULL; - pe_resource_t *parent = uber_parent(rsc); + pcmk_scheduler_t *scheduler = NULL; + pcmk_resource_t *parent = uber_parent(rsc); bool running = false; const char *id = rsc->clone_name ? rsc->clone_name : rsc->id; @@ -1435,7 +1481,9 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, lookup_id = clone_strip(rsc->id); } - rsc = parent->fns->find_rsc(parent, lookup_id, node, pe_find_any|pe_find_current); + rsc = parent->fns->find_rsc(parent, lookup_id, node, + pcmk_rsc_match_basename + |pcmk_rsc_match_current_node); free(lookup_id); running = resource_is_running_on(rsc, host); } @@ -1449,6 +1497,11 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, return ENXIO; } + if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { + out->err(out, "Unmanaged resources cannot be restarted."); + return EAGAIN; + } + rsc_id = strdup(rsc->id); if (pe_rsc_is_unique_clone(parent)) { @@ -1485,32 +1538,32 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, - Allow a --no-deps option (aka. --force-restart) */ - data_set = pe_new_working_set(); - if (data_set == NULL) { - crm_perror(LOG_ERR, "Could not allocate working set"); - rc = ENOMEM; + scheduler = pe_new_working_set(); + if (scheduler == NULL) { + rc = errno; + out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } - data_set->priv = out; - rc = update_dataset(cib, data_set, false); + scheduler->priv = out; + rc = update_dataset(cib, scheduler, false); if(rc != pcmk_rc_ok) { - out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc); + out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc); goto done; } - restart_target_active = get_active_resources(host, data_set->resources); - current_active = get_active_resources(host, data_set->resources); + restart_target_active = get_active_resources(host, scheduler->resources); + current_active = get_active_resources(host, scheduler->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ out->quiet = true; - rc = cli_resource_ban(out, lookup_id, host, move_lifetime, NULL, cib, - cib_options, promoted_role_only); - + rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib, + cib_options, promoted_role_only, + PCMK__ROLE_PROMOTED); } else { /* Stop the resource by setting target-role to Stopped. * Remember any existing target-role so we can restore it later @@ -1521,11 +1574,11 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, - RSC_STOPPED, FALSE, cib, cib_options, - force); + PCMK_ACTION_STOPPED, FALSE, cib, + cib_options, force); } if(rc != pcmk_rc_ok) { - out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); + out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; @@ -1537,13 +1590,13 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, goto done; } - rc = update_dataset(cib, data_set, true); + rc = update_dataset(cib, scheduler, true); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources would be stopped"); goto failure; } - target_active = get_active_resources(host, data_set->resources); + target_active = get_active_resources(host, scheduler->resources); dump_list(target_active, "Target"); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); @@ -1554,7 +1607,8 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { - step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; + step_timeout_s = wait_time_estimate(scheduler, list_delta) + / sleep_interval; } /* We probably don't need the entire step timeout */ @@ -1564,7 +1618,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } - rc = update_dataset(cib, data_set, FALSE); + rc = update_dataset(cib, scheduler, FALSE); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were stopped"); goto failure; @@ -1572,12 +1626,12 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, if (current_active != NULL) { g_list_free_full(current_active, free); - current_active = NULL; } - current_active = get_active_resources(host, data_set->resources); + current_active = get_active_resources(host, scheduler->resources); + g_list_free(list_delta); - list_delta = NULL; list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); + dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } @@ -1610,15 +1664,15 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, } if(rc != pcmk_rc_ok) { - out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); + out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); goto done; } if (target_active != NULL) { g_list_free_full(target_active, free); - target_active = NULL; } target_active = restart_target_active; + list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); display_list(out, list_delta, " * "); @@ -1627,7 +1681,8 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { - step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; + step_timeout_s = wait_time_estimate(scheduler, list_delta) + / sleep_interval; } /* We probably don't need the entire step timeout */ @@ -1639,21 +1694,20 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, crm_trace("%ds remaining", timeout); } - rc = update_dataset(cib, data_set, false); + rc = update_dataset(cib, scheduler, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were started"); goto failure; } + /* It's OK if dependent resources moved to a different node, + * so we check active resources on all nodes. + */ if (current_active != NULL) { g_list_free_full(current_active, free); - current_active = NULL; } + current_active = get_active_resources(NULL, scheduler->resources); - /* It's OK if dependent resources moved to a different node, - * so we check active resources on all nodes. - */ - current_active = get_active_resources(NULL, data_set->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); @@ -1702,16 +1756,17 @@ done: } free(rsc_id); free(lookup_id); - pe_free_working_set(data_set); + pe_free_working_set(scheduler); return rc; } static inline bool -action_is_pending(const pe_action_t *action) +action_is_pending(const pcmk_action_t *action) { - if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo) - || !pcmk_is_set(action->flags, pe_action_runnable) - || pcmk__str_eq("notify", action->task, pcmk__str_casei)) { + if (pcmk_any_flags_set(action->flags, + pcmk_action_optional|pcmk_action_pseudo) + || !pcmk_is_set(action->flags, pcmk_action_runnable) + || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) { return false; } return true; @@ -1729,7 +1784,7 @@ static bool actions_are_pending(const GList *actions) { for (const GList *action = actions; action != NULL; action = action->next) { - const pe_action_t *a = (const pe_action_t *) action->data; + const pcmk_action_t *a = (const pcmk_action_t *) action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags); @@ -1746,7 +1801,7 @@ print_pending_actions(pcmk__output_t *out, GList *actions) out->info(out, "Pending actions:"); for (action = actions; action != NULL; action = action->next) { - pe_action_t *a = (pe_action_t *) action->data; + pcmk_action_t *a = (pcmk_action_t *) action->data; if (!action_is_pending(a)) { continue; @@ -1786,27 +1841,28 @@ print_pending_actions(pcmk__output_t *out, GList *actions) int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) { - pe_working_set_t *data_set = NULL; + pcmk_scheduler_t *scheduler = NULL; + xmlXPathObjectPtr search; int rc = pcmk_rc_ok; + bool pending_unknown_state_resources; int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; time_t expire_time = time(NULL) + timeout_s; time_t time_diff; bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet - data_set = pe_new_working_set(); - if (data_set == NULL) { + scheduler = pe_new_working_set(); + if (scheduler == NULL) { return ENOMEM; } do { - /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff > 0) { crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff); } else { - print_pending_actions(out, data_set->actions); - pe_free_working_set(data_set); + print_pending_actions(out, scheduler->actions); + pe_free_working_set(scheduler); return ETIME; } if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */ @@ -1814,14 +1870,15 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) } /* Get latest transition graph */ - pe_reset_working_set(data_set); - rc = update_working_set_from_cib(out, data_set, cib); + pe_reset_working_set(scheduler); + rc = update_scheduler_input_to_cib(out, scheduler, cib); if (rc != pcmk_rc_ok) { - pe_free_working_set(data_set); + pe_free_working_set(scheduler); return rc; } - pcmk__schedule_actions(data_set->input, - pe_flag_no_counts|pe_flag_no_compat, data_set); + pcmk__schedule_actions(scheduler->input, + pcmk_sched_no_counts|pcmk_sched_no_compat, + scheduler); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two @@ -1832,7 +1889,7 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ - const char *dc_version = g_hash_table_lookup(data_set->config_hash, + const char *dc_version = g_hash_table_lookup(scheduler->config_hash, "dc-version"); if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { @@ -1842,9 +1899,13 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) } } - } while (actions_are_pending(data_set->actions)); + search = xpath_search(scheduler->input, "/cib/status/node_state/lrm/lrm_resources/lrm_resource/" + XML_LRM_TAG_RSC_OP "[@" XML_LRM_ATTR_RC "='193']"); + pending_unknown_state_resources = (numXpathResults(search) > 0); + freeXpathObject(search); + } while (actions_are_pending(scheduler->actions) || pending_unknown_state_resources); - pe_free_working_set(data_set); + pe_free_working_set(scheduler); return rc; } @@ -1853,10 +1914,10 @@ get_action(const char *rsc_action) { const char *action = NULL; if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { - action = "validate-all"; + action = PCMK_ACTION_VALIDATE_ALL; } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { - action = "monitor"; + action = PCMK_ACTION_MONITOR; } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop", "force-demote", "force-promote", NULL)) { @@ -1898,7 +1959,7 @@ set_agent_environment(GHashTable *params, int timeout_ms, int check_level, free(level); } - setenv("HA_debug", (verbosity > 0)? "1" : "0", 1); + pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true); if (verbosity > 1) { setenv("OCF_TRACE_RA", "1", 1); } @@ -1948,7 +2009,7 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, // If no timeout was provided, use the same default as the cluster if (timeout_ms == 0) { - timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); + timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } set_agent_environment(params, timeout_ms, check_level, resource_verbose); @@ -2000,12 +2061,12 @@ done: } crm_exit_t -cli_resource_execute(pe_resource_t *rsc, const char *requested_name, +cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, - int timeout_ms, cib_t * cib, pe_working_set_t *data_set, + int timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler, int resource_verbose, gboolean force, int check_level) { - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; crm_exit_t exit_code = CRM_EX_OK; const char *rid = NULL; const char *rtype = NULL; @@ -2016,7 +2077,7 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", "force-promote", NULL)) { if(pe_rsc_is_clone(rsc)) { - GList *nodes = cli_resource_search(rsc, requested_name, data_set); + GList *nodes = cli_resource_search(rsc, requested_name, scheduler); if(nodes != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", rsc_action, rsc->id); @@ -2034,10 +2095,10 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, rsc = rsc->children->data; } - if(rsc->variant == pe_group) { + if (rsc->variant == pcmk_rsc_variant_group) { out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; - } else if (rsc->variant == pe_container || pe_rsc_is_bundled(rsc)) { + } else if (pe_rsc_is_bundled(rsc)) { out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } @@ -2047,10 +2108,11 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); params = generate_resource_params(rsc, NULL /* @TODO use local node */, - data_set); + scheduler); if (timeout_ms == 0) { - timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), data_set); + timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), + scheduler); } rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; @@ -2063,26 +2125,28 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name, // \return Standard Pacemaker return code int -cli_resource_move(const pe_resource_t *rsc, const char *rsc_id, +cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id, const char *host_name, const char *move_lifetime, cib_t *cib, - int cib_options, pe_working_set_t *data_set, + int cib_options, pcmk_scheduler_t *scheduler, gboolean promoted_role_only, gboolean force) { - pcmk__output_t *out = data_set->priv; + pcmk__output_t *out = scheduler->priv; int rc = pcmk_rc_ok; unsigned int count = 0; - pe_node_t *current = NULL; - pe_node_t *dest = pe_find_node(data_set->nodes, host_name); + pcmk_node_t *current = NULL; + pcmk_node_t *dest = pe_find_node(scheduler->nodes, host_name); bool cur_is_dest = false; if (dest == NULL) { return pcmk_rc_node_unknown; } - if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) { - const pe_resource_t *p = pe__const_top_resource(rsc, false); + if (promoted_role_only + && !pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) { - if (pcmk_is_set(p->flags, pe_rsc_promotable)) { + const pcmk_resource_t *p = pe__const_top_resource(rsc, false); + + if (pcmk_is_set(p->flags, pcmk_rsc_promotable)) { out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); rsc_id = p->id; rsc = p; @@ -2096,15 +2160,15 @@ cli_resource_move(const pe_resource_t *rsc, const char *rsc_id, current = pe__find_active_requires(rsc, &count); - if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { + if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) { unsigned int promoted_count = 0; - pe_node_t *promoted_node = NULL; + pcmk_node_t *promoted_node = NULL; for (const GList *iter = rsc->children; iter; iter = iter->next) { - const pe_resource_t *child = (const pe_resource_t *) iter->data; + const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); - if (child_role == RSC_ROLE_PROMOTED) { + if (child_role == pcmk_role_promoted) { rsc = child; promoted_node = pe__current_node(child); promoted_count++; @@ -2137,15 +2201,17 @@ cli_resource_move(const pe_resource_t *rsc, const char *rsc_id, } /* Clear any previous prefer constraints across all nodes. */ - cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, false, force); + cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, cib_options, false, + force); /* Clear any previous ban constraints on 'dest'. */ - cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib, + cli_resource_clear(rsc_id, dest->details->uname, scheduler->nodes, cib, cib_options, TRUE, force); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime, - cib, cib_options, promoted_role_only); + cib, cib_options, promoted_role_only, + PCMK__ROLE_PROMOTED); crm_trace("%s%s now prefers %s%s", rsc->id, (promoted_role_only? " (promoted)" : ""), @@ -2158,8 +2224,8 @@ cli_resource_move(const pe_resource_t *rsc, const char *rsc_id, /* Ban the original location if possible */ if(current) { (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime, - NULL, cib, cib_options, promoted_role_only); - + cib, cib_options, promoted_role_only, + PCMK__ROLE_PROMOTED); } else if(count > 1) { out->info(out, "Resource '%s' is currently %s in %d locations. " "One may now move to %s", diff --git a/tools/crm_shadow.c b/tools/crm_shadow.c index ef69502..b86f462 100644 --- a/tools/crm_shadow.c +++ b/tools/crm_shadow.c @@ -147,15 +147,15 @@ instruction_xml(pcmk__output_t *out, va_list args) * -# Patchset containing the changes in the shadow CIB (can be \p NULL) * -# Group of \p shadow_disp_flags indicating which fields to display */ -PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "xmlNodePtr", - "xmlNodePtr", "enum shadow_disp_flags") +PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "const xmlNode *", + "const xmlNode *", "enum shadow_disp_flags") static int shadow_default(pcmk__output_t *out, va_list args) { const char *instance = va_arg(args, const char *); const char *filename = va_arg(args, const char *); - xmlNodePtr content = va_arg(args, xmlNodePtr); - xmlNodePtr diff = va_arg(args, xmlNodePtr); + const xmlNode *content = va_arg(args, const xmlNode *); + const xmlNode *diff = va_arg(args, const xmlNode *); enum shadow_disp_flags flags = (enum shadow_disp_flags) va_arg(args, int); int rc = pcmk_rc_no_output; @@ -210,8 +210,8 @@ shadow_default(pcmk__output_t *out, va_list args) * -# Patchset containing the changes in the shadow CIB (can be \p NULL) * -# Group of \p shadow_disp_flags indicating which fields to display */ -PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "xmlNodePtr", - "xmlNodePtr", "enum shadow_disp_flags") +PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "const xmlNode *", + "const xmlNode *", "enum shadow_disp_flags") static int shadow_text(pcmk__output_t *out, va_list args) { @@ -221,8 +221,8 @@ shadow_text(pcmk__output_t *out, va_list args) } else { const char *instance = va_arg(args, const char *); const char *filename = va_arg(args, const char *); - xmlNodePtr content = va_arg(args, xmlNodePtr); - xmlNodePtr diff = va_arg(args, xmlNodePtr); + const xmlNode *content = va_arg(args, const xmlNode *); + const xmlNode *diff = va_arg(args, const xmlNode *); enum shadow_disp_flags flags = (enum shadow_disp_flags) va_arg(args, int); int rc = pcmk_rc_no_output; @@ -271,15 +271,15 @@ shadow_text(pcmk__output_t *out, va_list args) * -# Group of \p shadow_disp_flags indicating which fields to display * (ignored) */ -PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "xmlNodePtr", - "xmlNodePtr", "enum shadow_disp_flags") +PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "const xmlNode *", + "const xmlNode *", "enum shadow_disp_flags") static int shadow_xml(pcmk__output_t *out, va_list args) { const char *instance = va_arg(args, const char *); const char *filename = va_arg(args, const char *); - xmlNodePtr content = va_arg(args, xmlNodePtr); - xmlNodePtr diff = va_arg(args, xmlNodePtr); + const xmlNode *content = va_arg(args, const xmlNode *); + const xmlNode *diff = va_arg(args, const xmlNode *); enum shadow_disp_flags flags G_GNUC_UNUSED = (enum shadow_disp_flags) va_arg(args, int); @@ -512,13 +512,13 @@ read_xml(const char *filename, xmlNode **output, GError **error) * \internal * \brief Write the shadow XML to a file * - * \param[in,out] xml Shadow XML - * \param[in] filename Name of destination file - * \param[in] reset Whether the write is a reset (for logging only) - * \param[out] error Where to store error + * \param[in] xml Shadow XML + * \param[in] filename Name of destination file + * \param[in] reset Whether the write is a reset (for logging only) + * \param[out] error Where to store error */ static int -write_shadow_file(xmlNode *xml, const char *filename, bool reset, +write_shadow_file(const xmlNode *xml, const char *filename, bool reset, GError **error) { int rc = write_xml_file(xml, filename, FALSE); @@ -927,9 +927,7 @@ show_shadow_diff(pcmk__output_t *out, GError **error) xmlNodePtr old_config = NULL; xmlNodePtr new_config = NULL; xmlNodePtr diff = NULL; - pcmk__output_t *logger_out = NULL; bool quiet_orig = out->quiet; - int rc = pcmk_rc_ok; if (get_instance_from_env(error) != pcmk_rc_ok) { return; @@ -951,18 +949,7 @@ show_shadow_diff(pcmk__output_t *out, GError **error) xml_calculate_changes(old_config, new_config); diff = xml_create_patchset(0, old_config, new_config, NULL, false); - rc = pcmk__log_output_new(&logger_out); - if (rc != pcmk_rc_ok) { - exit_code = pcmk_rc2exitc(rc); - g_set_error(error, PCMK__EXITC_ERROR, exit_code, - "Could not create logger object: %s", pcmk_rc_str(rc)); - goto done; - } - pcmk__output_set_log_level(logger_out, LOG_INFO); - rc = pcmk__xml_show_changes(logger_out, new_config); - logger_out->finish(logger_out, pcmk_rc2exitc(rc), true, NULL); - pcmk__output_free(logger_out); - + pcmk__log_xml_changes(LOG_INFO, new_config); xml_accept_changes(new_config); out->quiet = true; diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c index 932c5bd..aab4110 100644 --- a/tools/crm_simulate.c +++ b/tools/crm_simulate.c @@ -29,6 +29,7 @@ #include <crm/common/util.h> #include <crm/common/iso8601.h> #include <crm/pengine/status.h> +#include <crm/pengine/internal.h> #include <pacemaker-internal.h> #include <pacemaker.h> @@ -450,7 +451,7 @@ int main(int argc, char **argv) { int rc = pcmk_rc_ok; - pe_working_set_t *data_set = NULL; + pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; GError *error = NULL; @@ -513,24 +514,26 @@ main(int argc, char **argv) #endif } - data_set = pe_new_working_set(); - if (data_set == NULL) { + scheduler = pe_new_working_set(); + if (scheduler == NULL) { rc = ENOMEM; - g_set_error(&error, PCMK__RC_ERROR, rc, "Could not allocate working set"); + g_set_error(&error, PCMK__RC_ERROR, rc, + "Could not allocate scheduler data"); goto done; } if (pcmk_is_set(options.flags, pcmk_sim_show_scores)) { - pe__set_working_set_flags(data_set, pe_flag_show_scores); + pe__set_working_set_flags(scheduler, pcmk_sched_output_scores); } if (pcmk_is_set(options.flags, pcmk_sim_show_utilization)) { - pe__set_working_set_flags(data_set, pe_flag_show_utilization); + pe__set_working_set_flags(scheduler, pcmk_sched_show_utilization); } - pe__set_working_set_flags(data_set, pe_flag_no_compat); + pe__set_working_set_flags(scheduler, pcmk_sched_no_compat); if (options.test_dir != NULL) { - data_set->priv = out; - pcmk__profile_dir(options.test_dir, options.repeat, data_set, options.use_date); + scheduler->priv = out; + pcmk__profile_dir(options.test_dir, options.repeat, scheduler, + options.use_date); rc = pcmk_rc_ok; goto done; } @@ -542,9 +545,9 @@ main(int argc, char **argv) goto done; } - rc = pcmk__simulate(data_set, out, options.injections, options.flags, section_opts, - options.use_date, options.input_file, options.graph_file, - options.dot_file); + rc = pcmk__simulate(scheduler, out, options.injections, options.flags, + section_opts, options.use_date, options.input_file, + options.graph_file, options.dot_file); done: pcmk__output_and_clear_error(&error, NULL); @@ -562,8 +565,8 @@ main(int argc, char **argv) pcmk__free_arg_context(context); g_strfreev(processed_args); - if (data_set) { - pe_free_working_set(data_set); + if (scheduler != NULL) { + pe_free_working_set(scheduler); } fflush(stderr); diff --git a/tools/crm_ticket.c b/tools/crm_ticket.c index c451e8a..d95b581 100644 --- a/tools/crm_ticket.c +++ b/tools/crm_ticket.c @@ -31,6 +31,7 @@ #include <crm/cib/internal.h> #include <crm/pengine/rules.h> #include <crm/pengine/status.h> +#include <crm/pengine/internal.h> #include <pacemaker-internal.h> @@ -253,10 +254,10 @@ static GOptionEntry deprecated_entries[] = { { NULL } }; -static pe_ticket_t * -find_ticket(gchar *ticket_id, pe_working_set_t * data_set) +static pcmk_ticket_t * +find_ticket(gchar *ticket_id, pcmk_scheduler_t *scheduler) { - return g_hash_table_lookup(data_set->tickets, ticket_id); + return g_hash_table_lookup(scheduler->tickets, ticket_id); } static void @@ -275,7 +276,7 @@ print_date(time_t time) } static void -print_ticket(pe_ticket_t * ticket, bool raw, bool details) +print_ticket(pcmk_ticket_t *ticket, bool raw, bool details) { if (raw) { fprintf(stdout, "%s\n", ticket->id); @@ -325,12 +326,12 @@ print_ticket(pe_ticket_t * ticket, bool raw, bool details) } static void -print_ticket_list(pe_working_set_t * data_set, bool raw, bool details) +print_ticket_list(pcmk_scheduler_t *scheduler, bool raw, bool details) { GHashTableIter iter; - pe_ticket_t *ticket = NULL; + pcmk_ticket_t *ticket = NULL; - g_hash_table_iter_init(&iter, data_set->tickets); + g_hash_table_iter_init(&iter, scheduler->tickets); while (g_hash_table_iter_next(&iter, NULL, (void **)&ticket)) { print_ticket(ticket, raw, details); @@ -369,7 +370,7 @@ find_ticket_state(cib_t * the_cib, gchar *ticket_id, xmlNode ** ticket_state_xml } crm_log_xml_debug(xml_search, "Match"); - if (xml_has_children(xml_search)) { + if (xml_search->children != NULL) { if (ticket_id) { fprintf(stdout, "Multiple ticket_states match ticket_id=%s\n", ticket_id); } @@ -439,7 +440,7 @@ dump_ticket_xml(cib_t * the_cib, gchar *ticket_id) char *state_xml_str = NULL; state_xml_str = dump_xml_formatted(state_xml); - fprintf(stdout, "\n%s", pcmk__s(state_xml_str, "<null>\n")); + fprintf(stdout, "\n%s", state_xml_str); free_xml(state_xml); free(state_xml_str); } @@ -461,8 +462,7 @@ dump_constraints(cib_t * the_cib, gchar *ticket_id) } cons_xml_str = dump_xml_formatted(cons_xml); - fprintf(stdout, "Constraints XML:\n\n%s", - pcmk__s(cons_xml_str, "<null>\n")); + fprintf(stdout, "Constraints XML:\n\n%s", cons_xml_str); free_xml(cons_xml); free(cons_xml_str); @@ -471,14 +471,14 @@ dump_constraints(cib_t * the_cib, gchar *ticket_id) static int get_ticket_state_attr(gchar *ticket_id, const char *attr_name, const char **attr_value, - pe_working_set_t * data_set) + pcmk_scheduler_t *scheduler) { - pe_ticket_t *ticket = NULL; + pcmk_ticket_t *ticket = NULL; CRM_ASSERT(attr_value != NULL); *attr_value = NULL; - ticket = g_hash_table_lookup(data_set->tickets, ticket_id); + ticket = g_hash_table_lookup(scheduler->tickets, ticket_id); if (ticket == NULL) { return ENXIO; } @@ -564,7 +564,7 @@ allow_modification(gchar *ticket_id) } static int -modify_ticket_state(gchar * ticket_id, cib_t * cib, pe_working_set_t * data_set) +modify_ticket_state(gchar *ticket_id, cib_t *cib, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; xmlNode *xml_top = NULL; @@ -577,7 +577,7 @@ modify_ticket_state(gchar * ticket_id, cib_t * cib, pe_working_set_t * data_set) char *key = NULL; char *value = NULL; - pe_ticket_t *ticket = NULL; + pcmk_ticket_t *ticket = NULL; rc = find_ticket_state(cib, ticket_id, &ticket_state_xml); if (rc == pcmk_rc_ok) { @@ -605,7 +605,7 @@ modify_ticket_state(gchar * ticket_id, cib_t * cib, pe_working_set_t * data_set) xml_remove_prop(ticket_state_xml, key); } - ticket = find_ticket(ticket_id, data_set); + ticket = find_ticket(ticket_id, scheduler); g_hash_table_iter_init(&hash_iter, attr_set); while (g_hash_table_iter_next(&hash_iter, (gpointer *) & key, (gpointer *) & value)) { @@ -719,7 +719,7 @@ build_arg_context(pcmk__common_args_t *args) { int main(int argc, char **argv) { - pe_working_set_t *data_set = NULL; + pcmk_scheduler_t *scheduler = NULL; xmlNode *cib_xml_copy = NULL; cib_t *cib_conn = NULL; @@ -751,13 +751,16 @@ main(int argc, char **argv) pcmk__cli_help('v'); } - data_set = pe_new_working_set(); - if (data_set == NULL) { - crm_perror(LOG_CRIT, "Could not allocate working set"); - exit_code = CRM_EX_OSERR; + scheduler = pe_new_working_set(); + if (scheduler == NULL) { + rc = errno; + exit_code = pcmk_rc2exitc(rc); + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } - pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); + pe__set_working_set_flags(scheduler, + pcmk_sched_no_counts|pcmk_sched_no_compat); cib_conn = cib_new(); if (cib_conn == NULL) { @@ -798,14 +801,14 @@ main(int argc, char **argv) goto done; } - data_set->input = cib_xml_copy; - data_set->now = crm_time_new(NULL); + scheduler->input = cib_xml_copy; + scheduler->now = crm_time_new(NULL); - cluster_status(data_set); + cluster_status(scheduler); /* For recording the tickets that are referenced in rsc_ticket constraints * but have never been granted yet. */ - pcmk__unpack_constraints(data_set); + pcmk__unpack_constraints(scheduler); if (options.ticket_cmd == 'l' || options.ticket_cmd == 'L' || options.ticket_cmd == 'w') { bool raw = false; @@ -818,7 +821,7 @@ main(int argc, char **argv) } if (options.ticket_id) { - pe_ticket_t *ticket = find_ticket(options.ticket_id, data_set); + pcmk_ticket_t *ticket = find_ticket(options.ticket_id, scheduler); if (ticket == NULL) { exit_code = CRM_EX_NOSUCH; @@ -829,7 +832,7 @@ main(int argc, char **argv) print_ticket(ticket, raw, details); } else { - print_ticket_list(data_set, raw, details); + print_ticket_list(scheduler, raw, details); } } else if (options.ticket_cmd == 'q') { @@ -860,7 +863,8 @@ main(int argc, char **argv) goto done; } - rc = get_ticket_state_attr(options.ticket_id, options.get_attr_name, &value, data_set); + rc = get_ticket_state_attr(options.ticket_id, options.get_attr_name, + &value, scheduler); if (rc == pcmk_rc_ok) { fprintf(stdout, "%s\n", value); } else if (rc == ENXIO && options.attr_default) { @@ -878,9 +882,9 @@ main(int argc, char **argv) } if (options.force == FALSE) { - pe_ticket_t *ticket = NULL; + pcmk_ticket_t *ticket = NULL; - ticket = find_ticket(options.ticket_id, data_set); + ticket = find_ticket(options.ticket_id, scheduler); if (ticket == NULL) { exit_code = CRM_EX_NOSUCH; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, @@ -934,7 +938,7 @@ main(int argc, char **argv) goto done; } - rc = modify_ticket_state(options.ticket_id, cib_conn, data_set); + rc = modify_ticket_state(options.ticket_id, cib_conn, scheduler); exit_code = pcmk_rc2exitc(rc); if (rc != pcmk_rc_ok) { @@ -985,8 +989,8 @@ main(int argc, char **argv) } attr_delete = NULL; - pe_free_working_set(data_set); - data_set = NULL; + pe_free_working_set(scheduler); + scheduler = NULL; cib__clean_up_connection(&cib_conn); diff --git a/tools/crm_verify.c b/tools/crm_verify.c index 43b09da..199814e 100644 --- a/tools/crm_verify.c +++ b/tools/crm_verify.c @@ -85,10 +85,23 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { "Check the consistency of the configuration in the running cluster:\n\n" "\tcrm_verify --live-check\n\n" "Check the consistency of the configuration in a given file and " + "produce quiet output:\n\n" + "\tcrm_verify --xml-file file.xml --quiet\n\n" + "Check the consistency of the configuration in a given file and " "produce verbose output:\n\n" "\tcrm_verify --xml-file file.xml --verbose\n\n"; + GOptionEntry extra_prog_entries[] = { + { "quiet", 'q', 0, G_OPTION_ARG_NONE, &(args->quiet), + "Don't print verify information", + NULL }, + { NULL } + }; + context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); + + pcmk__add_main_args(context, extra_prog_entries); + g_option_context_set_description(context, description); pcmk__add_arg_group(context, "data", "Data sources:", @@ -105,8 +118,7 @@ main(int argc, char **argv) xmlNode *cib_object = NULL; xmlNode *status = NULL; - pe_working_set_t *data_set = NULL; - const char *xml_tag = NULL; + pcmk_scheduler_t *scheduler = NULL; int rc = pcmk_rc_ok; crm_exit_t exit_code = CRM_EX_OK; @@ -126,6 +138,10 @@ main(int argc, char **argv) goto done; } + if (args->verbosity > 0) { + args->verbosity -= args->quiet; + } + pcmk__cli_init_logging("crm_verify", args->verbosity); rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); @@ -143,6 +159,9 @@ main(int argc, char **argv) pcmk__register_lib_messages(out); + pcmk__set_config_error_handler((pcmk__config_error_func) out->err, out); + pcmk__set_config_warning_handler((pcmk__config_warning_func) out->err, out); + crm_info("=#=#=#=#= Getting XML =#=#=#=#="); if (options.use_live_cib) { @@ -184,8 +203,7 @@ main(int argc, char **argv) goto done; } - xml_tag = crm_element_name(cib_object); - if (!pcmk__str_eq(xml_tag, XML_TAG_CIB, pcmk__str_casei)) { + if (!pcmk__xe_is(cib_object, XML_TAG_CIB)) { rc = EBADMSG; g_set_error(&error, PCMK__RC_ERROR, rc, "This tool can only check complete configurations (i.e. those starting with <cib>)."); @@ -201,7 +219,7 @@ main(int argc, char **argv) create_xml_node(cib_object, XML_CIB_TAG_STATUS); } - if (validate_xml(cib_object, NULL, FALSE) == FALSE) { + if (pcmk__validate_xml(cib_object, NULL, (xmlRelaxNGValidityErrorFunc) out->err, out) == FALSE) { pcmk__config_err("CIB did not pass schema validation"); free_xml(cib_object); cib_object = NULL; @@ -215,13 +233,14 @@ main(int argc, char **argv) xml_latest_schema()); } - data_set = pe_new_working_set(); - if (data_set == NULL) { + scheduler = pe_new_working_set(); + if (scheduler == NULL) { rc = errno; - crm_perror(LOG_CRIT, "Unable to allocate working set"); + g_set_error(&error, PCMK__RC_ERROR, rc, + "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } - data_set->priv = out; + scheduler->priv = out; /* Process the configuration to set crm_config_error/crm_config_warning. * @@ -229,31 +248,31 @@ main(int argc, char **argv) * example, action configuration), so we aren't necessarily checking those. */ if (cib_object != NULL) { - unsigned long long flags = pe_flag_no_counts|pe_flag_no_compat; + unsigned long long flags = pcmk_sched_no_counts|pcmk_sched_no_compat; if ((status == NULL) && !options.use_live_cib) { // No status available, so do minimal checks - flags |= pe_flag_check_config; + flags |= pcmk_sched_validate_only; } - pcmk__schedule_actions(cib_object, flags, data_set); + pcmk__schedule_actions(cib_object, flags, scheduler); } - pe_free_working_set(data_set); + pe_free_working_set(scheduler); if (crm_config_error) { rc = pcmk_rc_schema_validation; - if (args->verbosity > 0) { + if (args->verbosity > 0 || pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { g_set_error(&error, PCMK__RC_ERROR, rc, "Errors found during check: config not valid"); } else { g_set_error(&error, PCMK__RC_ERROR, rc, "Errors found during check: config not valid\n-V may provide more details"); - } + } } else if (crm_config_warning) { rc = pcmk_rc_schema_validation; - if (args->verbosity > 0) { + if (args->verbosity > 0 || pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { g_set_error(&error, PCMK__RC_ERROR, rc, "Warnings found during check: config may not be valid"); } else { @@ -273,7 +292,7 @@ main(int argc, char **argv) exit_code = pcmk_rc2exitc(rc); } - pcmk__output_and_clear_error(&error, NULL); + pcmk__output_and_clear_error(&error, out); if (out != NULL) { out->finish(out, exit_code, true, NULL); diff --git a/tools/stonith_admin.c b/tools/stonith_admin.c index 1077de7..01f72d5 100644 --- a/tools/stonith_admin.c +++ b/tools/stonith_admin.c @@ -344,7 +344,11 @@ request_fencing(stonith_t *st, const char *target, const char *command, if (rc != pcmk_rc_ok) { const char *rc_str = pcmk_rc_str(rc); - const char *what = (strcmp(command, "on") == 0)? "unfence" : "fence"; + const char *what = "fence"; + + if (strcmp(command, PCMK_ACTION_ON) == 0) { + what = "unfence"; + } // If reason is identical to return code string, don't display it twice if (pcmk__str_eq(rc_str, reason, pcmk__str_none)) { @@ -542,7 +546,7 @@ main(int argc, char **argv) case 'I': rc = pcmk__fence_installed(out, st, options.timeout*1000); if (rc != pcmk_rc_ok) { - out->err(out, "Failed to list installed devices: %s", pcmk_strerror(rc)); + out->err(out, "Failed to list installed devices: %s", pcmk_rc_str(rc)); } break; @@ -550,7 +554,7 @@ main(int argc, char **argv) case 'L': rc = pcmk__fence_registered(out, st, target, options.timeout*1000); if (rc != pcmk_rc_ok) { - out->err(out, "Failed to list registered devices: %s", pcmk_strerror(rc)); + out->err(out, "Failed to list registered devices: %s", pcmk_rc_str(rc)); } break; @@ -566,7 +570,7 @@ main(int argc, char **argv) case 's': rc = pcmk__fence_list_targets(out, st, device, options.timeout*1000); if (rc != pcmk_rc_ok) { - out->err(out, "Couldn't list targets: %s", pcmk_strerror(rc)); + out->err(out, "Couldn't list targets: %s", pcmk_rc_str(rc)); } break; @@ -621,15 +625,15 @@ main(int argc, char **argv) break; case 'B': - rc = request_fencing(st, target, "reboot", &error); + rc = request_fencing(st, target, PCMK_ACTION_REBOOT, &error); break; case 'F': - rc = request_fencing(st, target, "off", &error); + rc = request_fencing(st, target, PCMK_ACTION_OFF, &error); break; case 'U': - rc = request_fencing(st, target, "on", &error); + rc = request_fencing(st, target, PCMK_ACTION_ON, &error); break; case 'h': |